xref: /openbsd/sys/dev/pci/if_iwx.c (revision 55cc5ba3)
1 /*	$OpenBSD: if_iwx.c,v 1.49 2021/01/17 14:24:00 jcs Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/malloc.h>
102 #include <sys/mbuf.h>
103 #include <sys/mutex.h>
104 #include <sys/proc.h>
105 #include <sys/rwlock.h>
106 #include <sys/socket.h>
107 #include <sys/sockio.h>
108 #include <sys/systm.h>
109 #include <sys/endian.h>
110 
111 #include <sys/refcnt.h>
112 #include <sys/task.h>
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_media.h>
126 
127 #include <netinet/in.h>
128 #include <netinet/if_ether.h>
129 
130 #include <net80211/ieee80211_var.h>
131 #include <net80211/ieee80211_radiotap.h>
132 
133 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
134 
135 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
136 
137 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
138 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
139 
140 #ifdef IWX_DEBUG
141 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
142 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
143 int iwx_debug = 1;
144 #else
145 #define DPRINTF(x)	do { ; } while (0)
146 #define DPRINTFN(n, x)	do { ; } while (0)
147 #endif
148 
149 #include <dev/pci/if_iwxreg.h>
150 #include <dev/pci/if_iwxvar.h>
151 
152 const uint8_t iwx_nvm_channels_8000[] = {
153 	/* 2.4 GHz */
154 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
155 	/* 5 GHz */
156 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
157 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
158 	149, 153, 157, 161, 165, 169, 173, 177, 181
159 };
160 
161 static const uint8_t iwx_nvm_channels_uhb[] = {
162 	/* 2.4 GHz */
163 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
164 	/* 5 GHz */
165 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
166 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
167 	149, 153, 157, 161, 165, 169, 173, 177, 181,
168 	/* 6-7 GHz */
169 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
170 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
171 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
172 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
173 };
174 
175 #define IWX_NUM_2GHZ_CHANNELS	14
176 
177 const struct iwx_rate {
178 	uint16_t rate;
179 	uint8_t plcp;
180 	uint8_t ht_plcp;
181 } iwx_rates[] = {
182 		/* Legacy */		/* HT */
183 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
184 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
185 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
186 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
187 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
188 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
189 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
190 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
191 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
192 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
193 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
194 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
195 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
196 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
197 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
198 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
199 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
200 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
201 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
202 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
203 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
204 };
205 #define IWX_RIDX_CCK	0
206 #define IWX_RIDX_OFDM	4
207 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
208 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
209 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
210 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
211 
212 /* Convert an MCS index into an iwx_rates[] index. */
213 const int iwx_mcs2ridx[] = {
214 	IWX_RATE_MCS_0_INDEX,
215 	IWX_RATE_MCS_1_INDEX,
216 	IWX_RATE_MCS_2_INDEX,
217 	IWX_RATE_MCS_3_INDEX,
218 	IWX_RATE_MCS_4_INDEX,
219 	IWX_RATE_MCS_5_INDEX,
220 	IWX_RATE_MCS_6_INDEX,
221 	IWX_RATE_MCS_7_INDEX,
222 	IWX_RATE_MCS_8_INDEX,
223 	IWX_RATE_MCS_9_INDEX,
224 	IWX_RATE_MCS_10_INDEX,
225 	IWX_RATE_MCS_11_INDEX,
226 	IWX_RATE_MCS_12_INDEX,
227 	IWX_RATE_MCS_13_INDEX,
228 	IWX_RATE_MCS_14_INDEX,
229 	IWX_RATE_MCS_15_INDEX,
230 };
231 
232 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
233 int	iwx_is_mimo_ht_plcp(uint8_t);
234 int	iwx_is_mimo_mcs(int);
235 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
236 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
237 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
238 int	iwx_apply_debug_destination(struct iwx_softc *);
239 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
240 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
241 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
242 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
243 	    struct iwx_context_info_dram *);
244 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
245 	    uint8_t *, size_t);
246 int	iwx_set_default_calib(struct iwx_softc *, const void *);
247 void	iwx_fw_info_free(struct iwx_fw_info *);
248 int	iwx_read_firmware(struct iwx_softc *);
249 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
250 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
251 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
252 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
253 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
254 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
255 int	iwx_nic_lock(struct iwx_softc *);
256 void	iwx_nic_assert_locked(struct iwx_softc *);
257 void	iwx_nic_unlock(struct iwx_softc *);
258 void	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
259 	    uint32_t);
260 void	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
261 void	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
262 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
263 	    bus_size_t);
264 void	iwx_dma_contig_free(struct iwx_dma_info *);
265 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
266 void	iwx_disable_rx_dma(struct iwx_softc *);
267 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
268 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
269 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
270 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
271 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
272 void	iwx_enable_rfkill_int(struct iwx_softc *);
273 int	iwx_check_rfkill(struct iwx_softc *);
274 void	iwx_enable_interrupts(struct iwx_softc *);
275 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
276 void	iwx_restore_interrupts(struct iwx_softc *);
277 void	iwx_disable_interrupts(struct iwx_softc *);
278 void	iwx_ict_reset(struct iwx_softc *);
279 int	iwx_set_hw_ready(struct iwx_softc *);
280 int	iwx_prepare_card_hw(struct iwx_softc *);
281 void	iwx_force_power_gating(struct iwx_softc *);
282 void	iwx_apm_config(struct iwx_softc *);
283 int	iwx_apm_init(struct iwx_softc *);
284 void	iwx_apm_stop(struct iwx_softc *);
285 int	iwx_allow_mcast(struct iwx_softc *);
286 void	iwx_init_msix_hw(struct iwx_softc *);
287 void	iwx_conf_msix_hw(struct iwx_softc *, int);
288 int	iwx_start_hw(struct iwx_softc *);
289 void	iwx_stop_device(struct iwx_softc *);
290 void	iwx_nic_config(struct iwx_softc *);
291 int	iwx_nic_rx_init(struct iwx_softc *);
292 int	iwx_nic_init(struct iwx_softc *);
293 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
294 void	iwx_post_alive(struct iwx_softc *);
295 void	iwx_protect_session(struct iwx_softc *, struct iwx_node *, uint32_t,
296 	    uint32_t);
297 void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
298 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
299 void	iwx_setup_ht_rates(struct iwx_softc *);
300 int	iwx_mimo_enabled(struct iwx_softc *);
301 void	iwx_htprot_task(void *);
302 void	iwx_update_htprot(struct ieee80211com *, struct ieee80211_node *);
303 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
304 	    uint8_t);
305 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
306 	    uint8_t);
307 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
308 	    uint16_t, uint16_t, int);
309 #ifdef notyet
310 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
311 	    uint8_t);
312 void	iwx_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
313 	    uint8_t);
314 #endif
315 void	iwx_ba_task(void *);
316 
317 int	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
318 int	iwx_is_valid_mac_addr(const uint8_t *);
319 int	iwx_nvm_get(struct iwx_softc *);
320 int	iwx_load_firmware(struct iwx_softc *);
321 int	iwx_start_fw(struct iwx_softc *);
322 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
323 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
324 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
325 int	iwx_send_dqa_cmd(struct iwx_softc *);
326 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
327 int	iwx_config_ltr(struct iwx_softc *);
328 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
329 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
330 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
331 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
332 	    struct iwx_rx_data *);
333 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
334 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
335 	    struct ieee80211_node *);
336 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
337 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
338 void	iwx_rx_tx_cmd_single(struct iwx_softc *, struct iwx_rx_packet *,
339 	    struct iwx_node *);
340 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
341 	    struct iwx_rx_data *);
342 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
343 	    struct iwx_rx_data *);
344 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
345 int	iwx_phy_ctxt_cmd_uhb(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
346 	    uint8_t, uint32_t, uint32_t);
347 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
348 	    uint8_t, uint32_t, uint32_t);
349 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
350 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
351 	    const void *);
352 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
353 	    uint32_t *);
354 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
355 	    const void *, uint32_t *);
356 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
357 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
358 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
359 	    struct ieee80211_frame *, struct iwx_tx_cmd_gen2 *);
360 void	iwx_tx_update_byte_tbl(struct iwx_tx_ring *, int, uint16_t, uint16_t);
361 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *, int);
362 int	iwx_flush_tx_path(struct iwx_softc *);
363 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
364 	    struct iwx_beacon_filter_cmd *);
365 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
366 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
367 	    struct iwx_mac_power_cmd *);
368 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
369 int	iwx_power_update_device(struct iwx_softc *);
370 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
371 int	iwx_disable_beacon_filter(struct iwx_softc *);
372 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
373 int	iwx_add_aux_sta(struct iwx_softc *);
374 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
375 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
376 int	iwx_config_umac_scan(struct iwx_softc *);
377 int	iwx_umac_scan(struct iwx_softc *, int);
378 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
379 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
380 int	iwx_rval2ridx(int);
381 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
382 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
383 	    struct iwx_mac_ctx_cmd *, uint32_t);
384 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
385 	    struct iwx_mac_data_sta *, int);
386 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
387 int	iwx_clear_statistics(struct iwx_softc *);
388 int	iwx_update_quotas(struct iwx_softc *, struct iwx_node *, int);
389 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
390 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
391 int	iwx_scan(struct iwx_softc *);
392 int	iwx_bgscan(struct ieee80211com *);
393 int	iwx_umac_scan_abort(struct iwx_softc *);
394 int	iwx_scan_abort(struct iwx_softc *);
395 int	iwx_rs_rval2idx(uint8_t);
396 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
397 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
398 int	iwx_enable_data_tx_queues(struct iwx_softc *);
399 int	iwx_auth(struct iwx_softc *);
400 int	iwx_deauth(struct iwx_softc *);
401 int	iwx_assoc(struct iwx_softc *);
402 int	iwx_disassoc(struct iwx_softc *);
403 int	iwx_run(struct iwx_softc *);
404 int	iwx_run_stop(struct iwx_softc *);
405 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
406 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
407 	    struct ieee80211_key *);
408 void	iwx_delete_key(struct ieee80211com *,
409 	    struct ieee80211_node *, struct ieee80211_key *);
410 int	iwx_media_change(struct ifnet *);
411 void	iwx_newstate_task(void *);
412 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
413 void	iwx_endscan(struct iwx_softc *);
414 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
415 	    struct ieee80211_node *);
416 int	iwx_sf_config(struct iwx_softc *, int);
417 int	iwx_send_bt_init_conf(struct iwx_softc *);
418 int	iwx_send_soc_conf(struct iwx_softc *);
419 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
420 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
421 int	iwx_init_hw(struct iwx_softc *);
422 int	iwx_init(struct ifnet *);
423 void	iwx_start(struct ifnet *);
424 void	iwx_stop(struct ifnet *);
425 void	iwx_watchdog(struct ifnet *);
426 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
427 const char *iwx_desc_lookup(uint32_t);
428 void	iwx_nic_error(struct iwx_softc *);
429 void	iwx_nic_umac_error(struct iwx_softc *);
430 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
431 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
432 	    struct mbuf_list *);
433 void	iwx_notif_intr(struct iwx_softc *);
434 int	iwx_intr(void *);
435 int	iwx_intr_msix(void *);
436 int	iwx_match(struct device *, void *, void *);
437 int	iwx_preinit(struct iwx_softc *);
438 void	iwx_attach_hook(struct device *);
439 void	iwx_attach(struct device *, struct device *, void *);
440 void	iwx_init_task(void *);
441 int	iwx_activate(struct device *, int);
442 int	iwx_resume(struct iwx_softc *);
443 
444 #if NBPFILTER > 0
445 void	iwx_radiotap_attach(struct iwx_softc *);
446 #endif
447 
448 uint8_t
449 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
450 {
451 	const struct iwx_fw_cmd_version *entry;
452 	int i;
453 
454 	for (i = 0; i < sc->n_cmd_versions; i++) {
455 		entry = &sc->cmd_versions[i];
456 		if (entry->group == grp && entry->cmd == cmd)
457 			return entry->cmd_ver;
458 	}
459 
460 	return IWX_FW_CMD_VER_UNKNOWN;
461 }
462 
463 int
464 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
465 {
466 	return (ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP &&
467 	    (ht_plcp & IWX_RATE_HT_MCS_NSS_MSK));
468 }
469 
470 int
471 iwx_is_mimo_mcs(int mcs)
472 {
473 	int ridx = iwx_mcs2ridx[mcs];
474 	return iwx_is_mimo_ht_plcp(iwx_rates[ridx].ht_plcp);
475 
476 }
477 
478 int
479 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
480 {
481 	struct iwx_fw_cscheme_list *l = (void *)data;
482 
483 	if (dlen < sizeof(*l) ||
484 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
485 		return EINVAL;
486 
487 	/* we don't actually store anything for now, always use s/w crypto */
488 
489 	return 0;
490 }
491 
492 int
493 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
494     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
495 {
496 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
497 	if (err) {
498 		printf("%s: could not allocate context info DMA memory\n",
499 		    DEVNAME(sc));
500 		return err;
501 	}
502 
503 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
504 
505 	return 0;
506 }
507 
508 void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
509 {
510 	struct iwx_self_init_dram *dram = &sc->init_dram;
511 	int i;
512 
513 	if (!dram->paging)
514 		return;
515 
516 	/* free paging*/
517 	for (i = 0; i < dram->paging_cnt; i++)
518 		iwx_dma_contig_free(dram->paging);
519 
520 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
521 	dram->paging_cnt = 0;
522 	dram->paging = NULL;
523 }
524 
525 int
526 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
527 {
528 	int i = 0;
529 
530 	while (start < fws->fw_count &&
531 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
532 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
533 		start++;
534 		i++;
535 	}
536 
537 	return i;
538 }
539 
540 int
541 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
542     struct iwx_context_info_dram *ctxt_dram)
543 {
544 	struct iwx_self_init_dram *dram = &sc->init_dram;
545 	int i, ret, fw_cnt = 0;
546 
547 	KASSERT(dram->paging == NULL);
548 
549 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
550 	/* add 1 due to separator */
551 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
552 	/* add 2 due to separators */
553 	dram->paging_cnt = iwx_get_num_sections(fws,
554 	    dram->lmac_cnt + dram->umac_cnt + 2);
555 
556 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
557 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
558 	if (!dram->fw) {
559 		printf("%s: could not allocate memory for firmware sections\n",
560 		    DEVNAME(sc));
561 		return ENOMEM;
562 	}
563 
564 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
565 	    M_DEVBUF, M_ZERO | M_NOWAIT);
566 	if (!dram->paging) {
567 		printf("%s: could not allocate memory for firmware paging\n",
568 		    DEVNAME(sc));
569 		return ENOMEM;
570 	}
571 
572 	/* initialize lmac sections */
573 	for (i = 0; i < dram->lmac_cnt; i++) {
574 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
575 						   &dram->fw[fw_cnt]);
576 		if (ret)
577 			return ret;
578 		ctxt_dram->lmac_img[i] =
579 			htole64(dram->fw[fw_cnt].paddr);
580 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
581 		    (unsigned long long)dram->fw[fw_cnt].paddr,
582 		    (unsigned long long)dram->fw[fw_cnt].size));
583 		fw_cnt++;
584 	}
585 
586 	/* initialize umac sections */
587 	for (i = 0; i < dram->umac_cnt; i++) {
588 		/* access FW with +1 to make up for lmac separator */
589 		ret = iwx_ctxt_info_alloc_dma(sc,
590 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
591 		if (ret)
592 			return ret;
593 		ctxt_dram->umac_img[i] =
594 			htole64(dram->fw[fw_cnt].paddr);
595 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
596 			(unsigned long long)dram->fw[fw_cnt].paddr,
597 			(unsigned long long)dram->fw[fw_cnt].size));
598 		fw_cnt++;
599 	}
600 
601 	/*
602 	 * Initialize paging.
603 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
604 	 * stored separately.
605 	 * This is since the timing of its release is different -
606 	 * while fw memory can be released on alive, the paging memory can be
607 	 * freed only when the device goes down.
608 	 * Given that, the logic here in accessing the fw image is a bit
609 	 * different - fw_cnt isn't changing so loop counter is added to it.
610 	 */
611 	for (i = 0; i < dram->paging_cnt; i++) {
612 		/* access FW with +2 to make up for lmac & umac separators */
613 		int fw_idx = fw_cnt + i + 2;
614 
615 		ret = iwx_ctxt_info_alloc_dma(sc,
616 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
617 		if (ret)
618 			return ret;
619 
620 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
621 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
622 		    (unsigned long long)dram->paging[i].paddr,
623 		    (unsigned long long)dram->paging[i].size));
624 	}
625 
626 	return 0;
627 }
628 
629 int
630 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
631     uint8_t min_power)
632 {
633 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
634 	uint32_t size = 0;
635 	uint8_t power;
636 	int err;
637 
638 	if (fw_mon->size)
639 		return 0;
640 
641 	for (power = max_power; power >= min_power; power--) {
642 		size = (1 << power);
643 
644 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
645 		if (err)
646 			continue;
647 
648 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
649 			 DEVNAME(sc), size));
650 		break;
651 	}
652 
653 	if (err) {
654 		fw_mon->size = 0;
655 		return err;
656 	}
657 
658 	if (power != max_power)
659 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
660 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
661 			(unsigned long)(1 << (max_power - 10))));
662 
663 	return 0;
664 }
665 
666 int
667 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
668 {
669 	if (!max_power) {
670 		/* default max_power is maximum */
671 		max_power = 26;
672 	} else {
673 		max_power += 11;
674 	}
675 
676 	if (max_power > 26) {
677 		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
678 		     "check the FW TLV\n", DEVNAME(sc), max_power));
679 		return 0;
680 	}
681 
682 	if (sc->fw_mon.size)
683 		return 0;
684 
685 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
686 }
687 
688 int
689 iwx_apply_debug_destination(struct iwx_softc *sc)
690 {
691 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
692 	int i, err;
693 	uint8_t mon_mode, size_power, base_shift, end_shift;
694 	uint32_t base_reg, end_reg;
695 
696 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
697 	mon_mode = dest_v1->monitor_mode;
698 	size_power = dest_v1->size_power;
699 	base_reg = le32toh(dest_v1->base_reg);
700 	end_reg = le32toh(dest_v1->end_reg);
701 	base_shift = dest_v1->base_shift;
702 	end_shift = dest_v1->end_shift;
703 
704 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
705 
706 	if (mon_mode == EXTERNAL_MODE) {
707 		err = iwx_alloc_fw_monitor(sc, size_power);
708 		if (err)
709 			return err;
710 	}
711 
712 	if (!iwx_nic_lock(sc))
713 		return EBUSY;
714 
715 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
716 		uint32_t addr, val;
717 		uint8_t op;
718 
719 		addr = le32toh(dest_v1->reg_ops[i].addr);
720 		val = le32toh(dest_v1->reg_ops[i].val);
721 		op = dest_v1->reg_ops[i].op;
722 
723 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
724 		switch (op) {
725 		case CSR_ASSIGN:
726 			IWX_WRITE(sc, addr, val);
727 			break;
728 		case CSR_SETBIT:
729 			IWX_SETBITS(sc, addr, (1 << val));
730 			break;
731 		case CSR_CLEARBIT:
732 			IWX_CLRBITS(sc, addr, (1 << val));
733 			break;
734 		case PRPH_ASSIGN:
735 			iwx_write_prph(sc, addr, val);
736 			break;
737 		case PRPH_SETBIT:
738 			iwx_set_bits_prph(sc, addr, (1 << val));
739 			break;
740 		case PRPH_CLEARBIT:
741 			iwx_clear_bits_prph(sc, addr, (1 << val));
742 			break;
743 		case PRPH_BLOCKBIT:
744 			if (iwx_read_prph(sc, addr) & (1 << val))
745 				goto monitor;
746 			break;
747 		default:
748 			DPRINTF(("%s: FW debug - unknown OP %d\n",
749 			    DEVNAME(sc), op));
750 			break;
751 		}
752 	}
753 
754 monitor:
755 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
756 		iwx_write_prph(sc, le32toh(base_reg),
757 		    sc->fw_mon.paddr >> base_shift);
758 		iwx_write_prph(sc, end_reg,
759 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
760 		    >> end_shift);
761 	}
762 
763 	iwx_nic_unlock(sc);
764 	return 0;
765 }
766 
767 int
768 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
769 {
770 	struct iwx_context_info *ctxt_info;
771 	struct iwx_context_info_rbd_cfg *rx_cfg;
772 	uint32_t control_flags = 0, rb_size;
773 	uint64_t paddr;
774 	int err;
775 
776 	ctxt_info = sc->ctxt_info_dma.vaddr;
777 
778 	ctxt_info->version.version = 0;
779 	ctxt_info->version.mac_id =
780 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
781 	/* size is in DWs */
782 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
783 
784 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22560)
785 		rb_size = IWX_CTXT_INFO_RB_SIZE_2K;
786 	else
787 		rb_size = IWX_CTXT_INFO_RB_SIZE_4K;
788 
789 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
790 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
791 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
792 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
793 			(rb_size << IWX_CTXT_INFO_RB_SIZE_POS);
794 	ctxt_info->control.control_flags = htole32(control_flags);
795 
796 	/* initialize RX default queue */
797 	rx_cfg = &ctxt_info->rbd_cfg;
798 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
799 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
800 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
801 
802 	/* initialize TX command queue */
803 	ctxt_info->hcmd_cfg.cmd_queue_addr =
804 		htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
805 	ctxt_info->hcmd_cfg.cmd_queue_size =
806 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
807 
808 	/* allocate ucode sections in dram and set addresses */
809 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
810 	if (err) {
811 		iwx_ctxt_info_free_fw_img(sc);
812 		return err;
813 	}
814 
815 	/* Configure debug, if exists */
816 	if (sc->sc_fw.dbg_dest_tlv_v1) {
817 		err = iwx_apply_debug_destination(sc);
818 		if (err) {
819 			iwx_ctxt_info_free_fw_img(sc);
820 			return err;
821 		}
822 	}
823 
824 	/*
825 	 * Write the context info DMA base address. The device expects a
826 	 * 64-bit address but a simple bus_space_write_8 to this register
827 	 * won't work on some devices, such as the AX201.
828 	 */
829 	paddr = sc->ctxt_info_dma.paddr;
830 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
831 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
832 
833 	/* kick FW self load */
834 	if (!iwx_nic_lock(sc))
835 		return EBUSY;
836 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
837 	iwx_nic_unlock(sc);
838 
839 	/* Context info will be released upon alive or failure to get one */
840 
841 	return 0;
842 }
843 
844 void
845 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
846 {
847 	struct iwx_self_init_dram *dram = &sc->init_dram;
848 	int i;
849 
850 	if (!dram->fw)
851 		return;
852 
853 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
854 		iwx_dma_contig_free(&dram->fw[i]);
855 
856 	free(dram->fw, M_DEVBUF,
857 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
858 	dram->lmac_cnt = 0;
859 	dram->umac_cnt = 0;
860 	dram->fw = NULL;
861 }
862 
863 int
864 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
865     uint8_t *data, size_t dlen)
866 {
867 	struct iwx_fw_sects *fws;
868 	struct iwx_fw_onesect *fwone;
869 
870 	if (type >= IWX_UCODE_TYPE_MAX)
871 		return EINVAL;
872 	if (dlen < sizeof(uint32_t))
873 		return EINVAL;
874 
875 	fws = &sc->sc_fw.fw_sects[type];
876 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
877 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
878 		return EINVAL;
879 
880 	fwone = &fws->fw_sect[fws->fw_count];
881 
882 	/* first 32bit are device load offset */
883 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
884 
885 	/* rest is data */
886 	fwone->fws_data = data + sizeof(uint32_t);
887 	fwone->fws_len = dlen - sizeof(uint32_t);
888 
889 	fws->fw_count++;
890 	fws->fw_totlen += fwone->fws_len;
891 
892 	return 0;
893 }
894 
895 #define IWX_DEFAULT_SCAN_CHANNELS	40
896 /* Newer firmware might support more channels. Raise this value if needed. */
897 #define IWX_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
898 
899 struct iwx_tlv_calib_data {
900 	uint32_t ucode_type;
901 	struct iwx_tlv_calib_ctrl calib;
902 } __packed;
903 
904 int
905 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
906 {
907 	const struct iwx_tlv_calib_data *def_calib = data;
908 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
909 
910 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
911 		return EINVAL;
912 
913 	sc->sc_default_calib[ucode_type].flow_trigger =
914 	    def_calib->calib.flow_trigger;
915 	sc->sc_default_calib[ucode_type].event_trigger =
916 	    def_calib->calib.event_trigger;
917 
918 	return 0;
919 }
920 
921 void
922 iwx_fw_info_free(struct iwx_fw_info *fw)
923 {
924 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
925 	fw->fw_rawdata = NULL;
926 	fw->fw_rawsize = 0;
927 	/* don't touch fw->fw_status */
928 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
929 }
930 
931 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
932 
933 int
934 iwx_read_firmware(struct iwx_softc *sc)
935 {
936 	struct iwx_fw_info *fw = &sc->sc_fw;
937 	struct iwx_tlv_ucode_header *uhdr;
938 	struct iwx_ucode_tlv tlv;
939 	uint32_t tlv_type;
940 	uint8_t *data;
941 	int err;
942 	size_t len;
943 
944 	if (fw->fw_status == IWX_FW_STATUS_DONE)
945 		return 0;
946 
947 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
948 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
949 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
950 
951 	if (fw->fw_rawdata != NULL)
952 		iwx_fw_info_free(fw);
953 
954 	err = loadfirmware(sc->sc_fwname,
955 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
956 	if (err) {
957 		printf("%s: could not read firmware %s (error %d)\n",
958 		    DEVNAME(sc), sc->sc_fwname, err);
959 		goto out;
960 	}
961 
962 	sc->sc_capaflags = 0;
963 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
964 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
965 
966 	uhdr = (void *)fw->fw_rawdata;
967 	if (*(uint32_t *)fw->fw_rawdata != 0
968 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
969 		printf("%s: invalid firmware %s\n",
970 		    DEVNAME(sc), sc->sc_fwname);
971 		err = EINVAL;
972 		goto out;
973 	}
974 
975 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
976 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
977 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
978 	    IWX_UCODE_API(le32toh(uhdr->ver)));
979 	data = uhdr->data;
980 	len = fw->fw_rawsize - sizeof(*uhdr);
981 
982 	while (len >= sizeof(tlv)) {
983 		size_t tlv_len;
984 		void *tlv_data;
985 
986 		memcpy(&tlv, data, sizeof(tlv));
987 		tlv_len = le32toh(tlv.length);
988 		tlv_type = le32toh(tlv.type);
989 
990 		len -= sizeof(tlv);
991 		data += sizeof(tlv);
992 		tlv_data = data;
993 
994 		if (len < tlv_len) {
995 			printf("%s: firmware too short: %zu bytes\n",
996 			    DEVNAME(sc), len);
997 			err = EINVAL;
998 			goto parse_out;
999 		}
1000 
1001 		switch (tlv_type) {
1002 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1003 			if (tlv_len < sizeof(uint32_t)) {
1004 				err = EINVAL;
1005 				goto parse_out;
1006 			}
1007 			sc->sc_capa_max_probe_len
1008 			    = le32toh(*(uint32_t *)tlv_data);
1009 			if (sc->sc_capa_max_probe_len >
1010 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1011 				err = EINVAL;
1012 				goto parse_out;
1013 			}
1014 			break;
1015 		case IWX_UCODE_TLV_PAN:
1016 			if (tlv_len) {
1017 				err = EINVAL;
1018 				goto parse_out;
1019 			}
1020 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1021 			break;
1022 		case IWX_UCODE_TLV_FLAGS:
1023 			if (tlv_len < sizeof(uint32_t)) {
1024 				err = EINVAL;
1025 				goto parse_out;
1026 			}
1027 			/*
1028 			 * Apparently there can be many flags, but Linux driver
1029 			 * parses only the first one, and so do we.
1030 			 *
1031 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1032 			 * Intentional or a bug?  Observations from
1033 			 * current firmware file:
1034 			 *  1) TLV_PAN is parsed first
1035 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1036 			 * ==> this resets TLV_PAN to itself... hnnnk
1037 			 */
1038 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1039 			break;
1040 		case IWX_UCODE_TLV_CSCHEME:
1041 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1042 			if (err)
1043 				goto parse_out;
1044 			break;
1045 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1046 			uint32_t num_cpu;
1047 			if (tlv_len != sizeof(uint32_t)) {
1048 				err = EINVAL;
1049 				goto parse_out;
1050 			}
1051 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1052 			if (num_cpu < 1 || num_cpu > 2) {
1053 				err = EINVAL;
1054 				goto parse_out;
1055 			}
1056 			break;
1057 		}
1058 		case IWX_UCODE_TLV_SEC_RT:
1059 			err = iwx_firmware_store_section(sc,
1060 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1061 			if (err)
1062 				goto parse_out;
1063 			break;
1064 		case IWX_UCODE_TLV_SEC_INIT:
1065 			err = iwx_firmware_store_section(sc,
1066 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1067 			if (err)
1068 				goto parse_out;
1069 			break;
1070 		case IWX_UCODE_TLV_SEC_WOWLAN:
1071 			err = iwx_firmware_store_section(sc,
1072 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1073 			if (err)
1074 				goto parse_out;
1075 			break;
1076 		case IWX_UCODE_TLV_DEF_CALIB:
1077 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1078 				err = EINVAL;
1079 				goto parse_out;
1080 			}
1081 			err = iwx_set_default_calib(sc, tlv_data);
1082 			if (err)
1083 				goto parse_out;
1084 			break;
1085 		case IWX_UCODE_TLV_PHY_SKU:
1086 			if (tlv_len != sizeof(uint32_t)) {
1087 				err = EINVAL;
1088 				goto parse_out;
1089 			}
1090 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1091 			break;
1092 
1093 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1094 			struct iwx_ucode_api *api;
1095 			int idx, i;
1096 			if (tlv_len != sizeof(*api)) {
1097 				err = EINVAL;
1098 				goto parse_out;
1099 			}
1100 			api = (struct iwx_ucode_api *)tlv_data;
1101 			idx = le32toh(api->api_index);
1102 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1103 				err = EINVAL;
1104 				goto parse_out;
1105 			}
1106 			for (i = 0; i < 32; i++) {
1107 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1108 					continue;
1109 				setbit(sc->sc_ucode_api, i + (32 * idx));
1110 			}
1111 			break;
1112 		}
1113 
1114 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1115 			struct iwx_ucode_capa *capa;
1116 			int idx, i;
1117 			if (tlv_len != sizeof(*capa)) {
1118 				err = EINVAL;
1119 				goto parse_out;
1120 			}
1121 			capa = (struct iwx_ucode_capa *)tlv_data;
1122 			idx = le32toh(capa->api_index);
1123 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1124 				goto parse_out;
1125 			}
1126 			for (i = 0; i < 32; i++) {
1127 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1128 					continue;
1129 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1130 			}
1131 			break;
1132 		}
1133 
1134 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1135 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1136 			/* ignore, not used by current driver */
1137 			break;
1138 
1139 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1140 			err = iwx_firmware_store_section(sc,
1141 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1142 			    tlv_len);
1143 			if (err)
1144 				goto parse_out;
1145 			break;
1146 
1147 		case IWX_UCODE_TLV_PAGING:
1148 			if (tlv_len != sizeof(uint32_t)) {
1149 				err = EINVAL;
1150 				goto parse_out;
1151 			}
1152 			break;
1153 
1154 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1155 			if (tlv_len != sizeof(uint32_t)) {
1156 				err = EINVAL;
1157 				goto parse_out;
1158 			}
1159 			sc->sc_capa_n_scan_channels =
1160 			  le32toh(*(uint32_t *)tlv_data);
1161 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1162 				err = ERANGE;
1163 				goto parse_out;
1164 			}
1165 			break;
1166 
1167 		case IWX_UCODE_TLV_FW_VERSION:
1168 			if (tlv_len != sizeof(uint32_t) * 3) {
1169 				err = EINVAL;
1170 				goto parse_out;
1171 			}
1172 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
1173 			    "%u.%u.%u",
1174 			    le32toh(((uint32_t *)tlv_data)[0]),
1175 			    le32toh(((uint32_t *)tlv_data)[1]),
1176 			    le32toh(((uint32_t *)tlv_data)[2]));
1177 			break;
1178 
1179 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1180 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1181 
1182 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1183 			if (*fw->dbg_dest_ver != 0) {
1184 				err = EINVAL;
1185 				goto parse_out;
1186 			}
1187 
1188 			if (fw->dbg_dest_tlv_init)
1189 				break;
1190 			fw->dbg_dest_tlv_init = true;
1191 
1192 			dest_v1 = (void *)tlv_data;
1193 			fw->dbg_dest_tlv_v1 = dest_v1;
1194 			fw->n_dest_reg = tlv_len -
1195 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1196 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1197 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1198 			break;
1199 		}
1200 
1201 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1202 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1203 
1204 			if (!fw->dbg_dest_tlv_init ||
1205 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1206 			    fw->dbg_conf_tlv[conf->id] != NULL)
1207 				break;
1208 
1209 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1210 			fw->dbg_conf_tlv[conf->id] = conf;
1211 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1212 			break;
1213 		}
1214 
1215 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1216 			struct iwx_umac_debug_addrs *dbg_ptrs =
1217 				(void *)tlv_data;
1218 
1219 			if (tlv_len != sizeof(*dbg_ptrs)) {
1220 				err = EINVAL;
1221 				goto parse_out;
1222 			}
1223 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1224 				break;
1225 			sc->sc_uc.uc_umac_error_event_table =
1226 				le32toh(dbg_ptrs->error_info_addr) &
1227 				~IWX_FW_ADDR_CACHE_CONTROL;
1228 			sc->sc_uc.error_event_table_tlv_status |=
1229 				IWX_ERROR_EVENT_TABLE_UMAC;
1230 			break;
1231 		}
1232 
1233 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1234 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1235 				(void *)tlv_data;
1236 
1237 			if (tlv_len != sizeof(*dbg_ptrs)) {
1238 				err = EINVAL;
1239 				goto parse_out;
1240 			}
1241 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1242 				break;
1243 			sc->sc_uc.uc_lmac_error_event_table[0] =
1244 				le32toh(dbg_ptrs->error_event_table_ptr) &
1245 				~IWX_FW_ADDR_CACHE_CONTROL;
1246 			sc->sc_uc.error_event_table_tlv_status |=
1247 				IWX_ERROR_EVENT_TABLE_LMAC1;
1248 			break;
1249 		}
1250 
1251 		case IWX_UCODE_TLV_FW_MEM_SEG:
1252 			break;
1253 
1254 		case IWX_UCODE_TLV_CMD_VERSIONS:
1255 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1256 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1257 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1258 			}
1259 			if (sc->n_cmd_versions != 0) {
1260 				err = EINVAL;
1261 				goto parse_out;
1262 			}
1263 			if (tlv_len > sizeof(sc->cmd_versions)) {
1264 				err = EINVAL;
1265 				goto parse_out;
1266 			}
1267 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1268 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1269 			break;
1270 
1271 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1272 			break;
1273 
1274 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1275 			break;
1276 
1277 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1278 		case 58:
1279 		case 0x1000003:
1280 		case 0x1000004:
1281 			break;
1282 
1283 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1284 		case 0x1000000:
1285 		case 0x1000002:
1286 			break;
1287 
1288 		default:
1289 			err = EINVAL;
1290 			goto parse_out;
1291 		}
1292 
1293 		len -= roundup(tlv_len, 4);
1294 		data += roundup(tlv_len, 4);
1295 	}
1296 
1297 	KASSERT(err == 0);
1298 
1299  parse_out:
1300 	if (err) {
1301 		printf("%s: firmware parse error %d, "
1302 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1303 	}
1304 
1305  out:
1306 	if (err) {
1307 		fw->fw_status = IWX_FW_STATUS_NONE;
1308 		if (fw->fw_rawdata != NULL)
1309 			iwx_fw_info_free(fw);
1310 	} else
1311 		fw->fw_status = IWX_FW_STATUS_DONE;
1312 	wakeup(&sc->sc_fw);
1313 
1314 	return err;
1315 }
1316 
1317 uint32_t
1318 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1319 {
1320 	iwx_nic_assert_locked(sc);
1321 	IWX_WRITE(sc,
1322 	    IWX_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1323 	IWX_BARRIER_READ_WRITE(sc);
1324 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1325 }
1326 
1327 void
1328 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1329 {
1330 	iwx_nic_assert_locked(sc);
1331 	IWX_WRITE(sc,
1332 	    IWX_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1333 	IWX_BARRIER_WRITE(sc);
1334 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1335 }
1336 
1337 void
1338 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1339 {
1340 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1341 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1342 }
1343 
1344 int
1345 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1346 {
1347 	int offs, err = 0;
1348 	uint32_t *vals = buf;
1349 
1350 	if (iwx_nic_lock(sc)) {
1351 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1352 		for (offs = 0; offs < dwords; offs++)
1353 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1354 		iwx_nic_unlock(sc);
1355 	} else {
1356 		err = EBUSY;
1357 	}
1358 	return err;
1359 }
1360 
1361 int
1362 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1363 {
1364 	int offs;
1365 	const uint32_t *vals = buf;
1366 
1367 	if (iwx_nic_lock(sc)) {
1368 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1369 		/* WADDR auto-increments */
1370 		for (offs = 0; offs < dwords; offs++) {
1371 			uint32_t val = vals ? vals[offs] : 0;
1372 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1373 		}
1374 		iwx_nic_unlock(sc);
1375 	} else {
1376 		return EBUSY;
1377 	}
1378 	return 0;
1379 }
1380 
1381 int
1382 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1383 {
1384 	return iwx_write_mem(sc, addr, &val, 1);
1385 }
1386 
1387 int
1388 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1389     int timo)
1390 {
1391 	for (;;) {
1392 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1393 			return 1;
1394 		}
1395 		if (timo < 10) {
1396 			return 0;
1397 		}
1398 		timo -= 10;
1399 		DELAY(10);
1400 	}
1401 }
1402 
1403 int
1404 iwx_nic_lock(struct iwx_softc *sc)
1405 {
1406 	if (sc->sc_nic_locks > 0) {
1407 		iwx_nic_assert_locked(sc);
1408 		sc->sc_nic_locks++;
1409 		return 1; /* already locked */
1410 	}
1411 
1412 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1413 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1414 
1415 	DELAY(2);
1416 
1417 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1418 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1419 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1420 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1421 		sc->sc_nic_locks++;
1422 		return 1;
1423 	}
1424 
1425 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1426 	return 0;
1427 }
1428 
1429 void
1430 iwx_nic_assert_locked(struct iwx_softc *sc)
1431 {
1432 	uint32_t reg = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1433 	if ((reg & IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
1434 		panic("%s: mac clock not ready", DEVNAME(sc));
1435 	if (reg & IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
1436 		panic("%s: mac gone to sleep", DEVNAME(sc));
1437 	if (sc->sc_nic_locks <= 0)
1438 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1439 }
1440 
1441 void
1442 iwx_nic_unlock(struct iwx_softc *sc)
1443 {
1444 	if (sc->sc_nic_locks > 0) {
1445 		if (--sc->sc_nic_locks == 0)
1446 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1447 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1448 	} else
1449 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1450 }
1451 
1452 void
1453 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1454     uint32_t mask)
1455 {
1456 	uint32_t val;
1457 
1458 	/* XXX: no error path? */
1459 	if (iwx_nic_lock(sc)) {
1460 		val = iwx_read_prph(sc, reg) & mask;
1461 		val |= bits;
1462 		iwx_write_prph(sc, reg, val);
1463 		iwx_nic_unlock(sc);
1464 	}
1465 }
1466 
1467 void
1468 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1469 {
1470 	iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1471 }
1472 
1473 void
1474 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1475 {
1476 	iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1477 }
1478 
1479 int
1480 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1481     bus_size_t size, bus_size_t alignment)
1482 {
1483 	int nsegs, err;
1484 	caddr_t va;
1485 
1486 	dma->tag = tag;
1487 	dma->size = size;
1488 
1489 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1490 	    &dma->map);
1491 	if (err)
1492 		goto fail;
1493 
1494 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1495 	    BUS_DMA_NOWAIT);
1496 	if (err)
1497 		goto fail;
1498 
1499 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1500 	    BUS_DMA_NOWAIT);
1501 	if (err)
1502 		goto fail;
1503 	dma->vaddr = va;
1504 
1505 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1506 	    BUS_DMA_NOWAIT);
1507 	if (err)
1508 		goto fail;
1509 
1510 	memset(dma->vaddr, 0, size);
1511 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1512 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1513 
1514 	return 0;
1515 
1516 fail:	iwx_dma_contig_free(dma);
1517 	return err;
1518 }
1519 
1520 void
1521 iwx_dma_contig_free(struct iwx_dma_info *dma)
1522 {
1523 	if (dma->map != NULL) {
1524 		if (dma->vaddr != NULL) {
1525 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1526 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1527 			bus_dmamap_unload(dma->tag, dma->map);
1528 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1529 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1530 			dma->vaddr = NULL;
1531 		}
1532 		bus_dmamap_destroy(dma->tag, dma->map);
1533 		dma->map = NULL;
1534 	}
1535 }
1536 
1537 int
1538 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1539 {
1540 	bus_size_t size;
1541 	int i, err;
1542 
1543 	ring->cur = 0;
1544 
1545 	/* Allocate RX descriptors (256-byte aligned). */
1546 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint64_t);
1547 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1548 	if (err) {
1549 		printf("%s: could not allocate RX ring DMA memory\n",
1550 		    DEVNAME(sc));
1551 		goto fail;
1552 	}
1553 	ring->desc = ring->free_desc_dma.vaddr;
1554 
1555 	/* Allocate RX status area (16-byte aligned). */
1556 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1557 	    sizeof(*ring->stat), 16);
1558 	if (err) {
1559 		printf("%s: could not allocate RX status DMA memory\n",
1560 		    DEVNAME(sc));
1561 		goto fail;
1562 	}
1563 	ring->stat = ring->stat_dma.vaddr;
1564 
1565 	size = IWX_RX_MQ_RING_COUNT * sizeof(uint32_t);
1566 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1567 	    size, 256);
1568 	if (err) {
1569 		printf("%s: could not allocate RX ring DMA memory\n",
1570 		    DEVNAME(sc));
1571 		goto fail;
1572 	}
1573 
1574 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1575 		struct iwx_rx_data *data = &ring->data[i];
1576 
1577 		memset(data, 0, sizeof(*data));
1578 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1579 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1580 		    &data->map);
1581 		if (err) {
1582 			printf("%s: could not create RX buf DMA map\n",
1583 			    DEVNAME(sc));
1584 			goto fail;
1585 		}
1586 
1587 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1588 		if (err)
1589 			goto fail;
1590 	}
1591 	return 0;
1592 
1593 fail:	iwx_free_rx_ring(sc, ring);
1594 	return err;
1595 }
1596 
1597 void
1598 iwx_disable_rx_dma(struct iwx_softc *sc)
1599 {
1600 	int ntries;
1601 
1602 	if (iwx_nic_lock(sc)) {
1603 		iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1604 		for (ntries = 0; ntries < 1000; ntries++) {
1605 			if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1606 			    IWX_RXF_DMA_IDLE)
1607 				break;
1608 			DELAY(10);
1609 		}
1610 		iwx_nic_unlock(sc);
1611 	}
1612 }
1613 
1614 void
1615 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1616 {
1617 	ring->cur = 0;
1618 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1619 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1620 	memset(ring->stat, 0, sizeof(*ring->stat));
1621 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1622 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1623 
1624 }
1625 
1626 void
1627 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1628 {
1629 	int i;
1630 
1631 	iwx_dma_contig_free(&ring->free_desc_dma);
1632 	iwx_dma_contig_free(&ring->stat_dma);
1633 	iwx_dma_contig_free(&ring->used_desc_dma);
1634 
1635 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1636 		struct iwx_rx_data *data = &ring->data[i];
1637 
1638 		if (data->m != NULL) {
1639 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1640 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1641 			bus_dmamap_unload(sc->sc_dmat, data->map);
1642 			m_freem(data->m);
1643 			data->m = NULL;
1644 		}
1645 		if (data->map != NULL)
1646 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1647 	}
1648 }
1649 
1650 int
1651 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
1652 {
1653 	bus_addr_t paddr;
1654 	bus_size_t size;
1655 	int i, err;
1656 
1657 	ring->qid = qid;
1658 	ring->queued = 0;
1659 	ring->cur = 0;
1660 	ring->tail = 0;
1661 
1662 	/* Allocate TX descriptors (256-byte aligned). */
1663 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
1664 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1665 	if (err) {
1666 		printf("%s: could not allocate TX ring DMA memory\n",
1667 		    DEVNAME(sc));
1668 		goto fail;
1669 	}
1670 	ring->desc = ring->desc_dma.vaddr;
1671 
1672 	/*
1673 	 * There is no need to allocate DMA buffers for unused rings.
1674 	 * The hardware supports up to 31 Tx rings which is more
1675 	 * than we currently need.
1676 	 *
1677 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1678 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1679 	 * are sc->tqx[ac + IWX_DQA_AUX_QUEUE + 1], i.e. sc->txq[2:5],
1680 	 * in order to provide one queue per EDCA category.
1681 	 *
1682 	 * Tx aggregation will require additional queues (one queue per TID
1683 	 * for which aggregation is enabled) but we do not implement this yet.
1684 	 */
1685 	if (qid > IWX_DQA_MIN_MGMT_QUEUE)
1686 		return 0;
1687 
1688 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl,
1689 	    sizeof(struct iwx_agn_scd_bc_tbl), 0);
1690 	if (err) {
1691 		printf("%s: could not allocate byte count table DMA memory\n",
1692 		    DEVNAME(sc));
1693 		goto fail;
1694 	}
1695 
1696 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
1697 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
1698 	    IWX_FIRST_TB_SIZE_ALIGN);
1699 	if (err) {
1700 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1701 		goto fail;
1702 	}
1703 	ring->cmd = ring->cmd_dma.vaddr;
1704 
1705 	paddr = ring->cmd_dma.paddr;
1706 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1707 		struct iwx_tx_data *data = &ring->data[i];
1708 		size_t mapsize;
1709 
1710 		data->cmd_paddr = paddr;
1711 		paddr += sizeof(struct iwx_device_cmd);
1712 
1713 		/* FW commands may require more mapped space than packets. */
1714 		if (qid == IWX_DQA_CMD_QUEUE)
1715 			mapsize = (sizeof(struct iwx_cmd_header) +
1716 			    IWX_MAX_CMD_PAYLOAD_SIZE);
1717 		else
1718 			mapsize = MCLBYTES;
1719 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1720 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1721 		    &data->map);
1722 		if (err) {
1723 			printf("%s: could not create TX buf DMA map\n",
1724 			    DEVNAME(sc));
1725 			goto fail;
1726 		}
1727 	}
1728 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1729 	return 0;
1730 
1731 fail:	iwx_free_tx_ring(sc, ring);
1732 	return err;
1733 }
1734 
1735 void
1736 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1737 {
1738 	int i;
1739 
1740 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1741 		struct iwx_tx_data *data = &ring->data[i];
1742 
1743 		if (data->m != NULL) {
1744 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1745 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1746 			bus_dmamap_unload(sc->sc_dmat, data->map);
1747 			m_freem(data->m);
1748 			data->m = NULL;
1749 		}
1750 	}
1751 
1752 	/* Clear byte count table. */
1753 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
1754 
1755 	/* Clear TX descriptors. */
1756 	memset(ring->desc, 0, ring->desc_dma.size);
1757 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1758 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1759 	sc->qfullmsk &= ~(1 << ring->qid);
1760 	ring->queued = 0;
1761 	ring->cur = 0;
1762 	ring->tail = 0;
1763 }
1764 
1765 void
1766 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
1767 {
1768 	int i;
1769 
1770 	iwx_dma_contig_free(&ring->desc_dma);
1771 	iwx_dma_contig_free(&ring->cmd_dma);
1772 	iwx_dma_contig_free(&ring->bc_tbl);
1773 
1774 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
1775 		struct iwx_tx_data *data = &ring->data[i];
1776 
1777 		if (data->m != NULL) {
1778 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1779 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1780 			bus_dmamap_unload(sc->sc_dmat, data->map);
1781 			m_freem(data->m);
1782 			data->m = NULL;
1783 		}
1784 		if (data->map != NULL)
1785 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1786 	}
1787 }
1788 
1789 void
1790 iwx_enable_rfkill_int(struct iwx_softc *sc)
1791 {
1792 	if (!sc->sc_msix) {
1793 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
1794 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1795 	} else {
1796 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1797 		    sc->sc_fh_init_mask);
1798 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1799 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1800 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1801 	}
1802 
1803 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1804 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1805 }
1806 
1807 int
1808 iwx_check_rfkill(struct iwx_softc *sc)
1809 {
1810 	uint32_t v;
1811 	int s;
1812 	int rv;
1813 
1814 	s = splnet();
1815 
1816 	/*
1817 	 * "documentation" is not really helpful here:
1818 	 *  27:	HW_RF_KILL_SW
1819 	 *	Indicates state of (platform's) hardware RF-Kill switch
1820 	 *
1821 	 * But apparently when it's off, it's on ...
1822 	 */
1823 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
1824 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1825 	if (rv) {
1826 		sc->sc_flags |= IWX_FLAG_RFKILL;
1827 	} else {
1828 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
1829 	}
1830 
1831 	splx(s);
1832 	return rv;
1833 }
1834 
1835 void
1836 iwx_enable_interrupts(struct iwx_softc *sc)
1837 {
1838 	if (!sc->sc_msix) {
1839 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
1840 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1841 	} else {
1842 		/*
1843 		 * fh/hw_mask keeps all the unmasked causes.
1844 		 * Unlike msi, in msix cause is enabled when it is unset.
1845 		 */
1846 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1847 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1848 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1849 		    ~sc->sc_fh_mask);
1850 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1851 		    ~sc->sc_hw_mask);
1852 	}
1853 }
1854 
1855 void
1856 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
1857 {
1858 	if (!sc->sc_msix) {
1859 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
1860 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1861 	} else {
1862 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1863 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
1864 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
1865 		/*
1866 		 * Leave all the FH causes enabled to get the ALIVE
1867 		 * notification.
1868 		 */
1869 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1870 		    ~sc->sc_fh_init_mask);
1871 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1872 	}
1873 }
1874 
1875 void
1876 iwx_restore_interrupts(struct iwx_softc *sc)
1877 {
1878 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
1879 }
1880 
1881 void
1882 iwx_disable_interrupts(struct iwx_softc *sc)
1883 {
1884 	int s = splnet();
1885 
1886 	if (!sc->sc_msix) {
1887 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
1888 
1889 		/* acknowledge all interrupts */
1890 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
1891 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
1892 	} else {
1893 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
1894 		    sc->sc_fh_init_mask);
1895 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
1896 		    sc->sc_hw_init_mask);
1897 	}
1898 
1899 	splx(s);
1900 }
1901 
1902 void
1903 iwx_ict_reset(struct iwx_softc *sc)
1904 {
1905 	iwx_disable_interrupts(sc);
1906 
1907 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
1908 	sc->ict_cur = 0;
1909 
1910 	/* Set physical address of ICT (4KB aligned). */
1911 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
1912 	    IWX_CSR_DRAM_INT_TBL_ENABLE
1913 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
1914 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
1915 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
1916 
1917 	/* Switch to ICT interrupt mode in driver. */
1918 	sc->sc_flags |= IWX_FLAG_USE_ICT;
1919 
1920 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
1921 	iwx_enable_interrupts(sc);
1922 }
1923 
1924 #define IWX_HW_READY_TIMEOUT 50
1925 int
1926 iwx_set_hw_ready(struct iwx_softc *sc)
1927 {
1928 	int ready;
1929 
1930 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
1931 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1932 
1933 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
1934 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1935 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1936 	    IWX_HW_READY_TIMEOUT);
1937 	if (ready)
1938 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
1939 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
1940 
1941 	return ready;
1942 }
1943 #undef IWX_HW_READY_TIMEOUT
1944 
1945 int
1946 iwx_prepare_card_hw(struct iwx_softc *sc)
1947 {
1948 	int t = 0;
1949 
1950 	if (iwx_set_hw_ready(sc))
1951 		return 0;
1952 
1953 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
1954 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1955 	DELAY(1000);
1956 
1957 
1958 	/* If HW is not ready, prepare the conditions to check again */
1959 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
1960 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
1961 
1962 	do {
1963 		if (iwx_set_hw_ready(sc))
1964 			return 0;
1965 		DELAY(200);
1966 		t += 200;
1967 	} while (t < 150000);
1968 
1969 	return ETIMEDOUT;
1970 }
1971 
1972 void
1973 iwx_force_power_gating(struct iwx_softc *sc)
1974 {
1975 	iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
1976 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1977 	DELAY(20);
1978 	iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
1979 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
1980 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
1981 	DELAY(20);
1982 	iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
1983 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1984 }
1985 
1986 void
1987 iwx_apm_config(struct iwx_softc *sc)
1988 {
1989 	pcireg_t lctl, cap;
1990 
1991 	/*
1992 	 * L0S states have been found to be unstable with our devices
1993 	 * and in newer hardware they are not officially supported at
1994 	 * all, so we must always set the L0S_DISABLED bit.
1995 	 */
1996 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
1997 
1998 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1999 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2000 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2001 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2002 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2003 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2004 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2005 	    DEVNAME(sc),
2006 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2007 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2008 }
2009 
2010 /*
2011  * Start up NIC's basic functionality after it has been reset
2012  * e.g. after platform boot or shutdown.
2013  * NOTE:  This does not load uCode nor start the embedded processor
2014  */
2015 int
2016 iwx_apm_init(struct iwx_softc *sc)
2017 {
2018 	int err = 0;
2019 
2020 	/*
2021 	 * Disable L0s without affecting L1;
2022 	 *  don't wait for ICH L0s (ICH bug W/A)
2023 	 */
2024 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2025 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2026 
2027 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2028 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2029 
2030 	/*
2031 	 * Enable HAP INTA (interrupt from management bus) to
2032 	 * wake device's PCI Express link L1a -> L0s
2033 	 */
2034 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2035 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2036 
2037 	iwx_apm_config(sc);
2038 
2039 	/*
2040 	 * Set "initialization complete" bit to move adapter from
2041 	 * D0U* --> D0A* (powered-up active) state.
2042 	 */
2043 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2044 
2045 	/*
2046 	 * Wait for clock stabilization; once stabilized, access to
2047 	 * device-internal resources is supported, e.g. iwx_write_prph()
2048 	 * and accesses to uCode SRAM.
2049 	 */
2050 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2051 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2052 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2053 		printf("%s: timeout waiting for clock stabilization\n",
2054 		    DEVNAME(sc));
2055 		err = ETIMEDOUT;
2056 		goto out;
2057 	}
2058  out:
2059 	if (err)
2060 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2061 	return err;
2062 }
2063 
2064 void
2065 iwx_apm_stop(struct iwx_softc *sc)
2066 {
2067 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2068 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2069 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2070 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2071 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2072 	DELAY(1000);
2073 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2074 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2075 	DELAY(5000);
2076 
2077 	/* stop device's busmaster DMA activity */
2078 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2079 
2080 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2081 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2082 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2083 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2084 
2085 	/*
2086 	 * Clear "initialization complete" bit to move adapter from
2087 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2088 	 */
2089 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2090 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2091 }
2092 
2093 void
2094 iwx_init_msix_hw(struct iwx_softc *sc)
2095 {
2096 	iwx_conf_msix_hw(sc, 0);
2097 
2098 	if (!sc->sc_msix)
2099 		return;
2100 
2101 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2102 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2103 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2104 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2105 }
2106 
2107 void
2108 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2109 {
2110 	int vector = 0;
2111 
2112 	if (!sc->sc_msix) {
2113 		/* Newer chips default to MSIX. */
2114 		if (!stopped && iwx_nic_lock(sc)) {
2115 			iwx_write_prph(sc, IWX_UREG_CHICK,
2116 			    IWX_UREG_CHICK_MSI_ENABLE);
2117 			iwx_nic_unlock(sc);
2118 		}
2119 		return;
2120 	}
2121 
2122 	if (!stopped && iwx_nic_lock(sc)) {
2123 		iwx_write_prph(sc, IWX_UREG_CHICK, IWX_UREG_CHICK_MSIX_ENABLE);
2124 		iwx_nic_unlock(sc);
2125 	}
2126 
2127 	/* Disable all interrupts */
2128 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2129 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2130 
2131 	/* Map fallback-queue (command/mgmt) to a single vector */
2132 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2133 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2134 	/* Map RSS queue (data) to the same vector */
2135 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2136 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2137 
2138 	/* Enable the RX queues cause interrupts */
2139 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2140 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2141 
2142 	/* Map non-RX causes to the same vector */
2143 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2144 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2145 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2146 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2147 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2148 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2149 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2150 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2151 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2152 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2153 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2154 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2155 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_IML),
2156 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2157 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2158 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2159 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2160 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2161 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2162 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2163 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2164 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2165 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2166 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2167 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2168 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2169 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2170 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2171 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2172 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2173 
2174 	/* Enable non-RX causes interrupts */
2175 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2176 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2177 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2178 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2179 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2180 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2181 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2182 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2183 	    IWX_MSIX_HW_INT_CAUSES_REG_IML |
2184 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2185 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2186 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2187 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2188 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2189 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2190 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2191 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2192 }
2193 
2194 int
2195 iwx_start_hw(struct iwx_softc *sc)
2196 {
2197 	int err;
2198 	int t = 0;
2199 
2200 	err = iwx_prepare_card_hw(sc);
2201 	if (err)
2202 		return err;
2203 
2204 	/* Reset the entire device */
2205 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2206 	DELAY(5000);
2207 
2208 	if (sc->sc_integrated) {
2209 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2210 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2211 		DELAY(20);
2212 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2213 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2214 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2215 			printf("%s: timeout waiting for clock stabilization\n",
2216 			    DEVNAME(sc));
2217 			return ETIMEDOUT;
2218 		}
2219 
2220 		iwx_force_power_gating(sc);
2221 
2222 		/* Reset the entire device */
2223 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2224 		DELAY(5000);
2225 	}
2226 
2227 	err = iwx_apm_init(sc);
2228 	if (err)
2229 		return err;
2230 
2231 	iwx_init_msix_hw(sc);
2232 
2233 	while (t < 150000 && !iwx_set_hw_ready(sc)) {
2234 		DELAY(200);
2235 		t += 200;
2236 		if (iwx_set_hw_ready(sc)) {
2237 			break;
2238 		}
2239 	}
2240 	if (t >= 150000)
2241 		return ETIMEDOUT;
2242 
2243 	iwx_enable_rfkill_int(sc);
2244 	iwx_check_rfkill(sc);
2245 
2246 	return 0;
2247 }
2248 
2249 void
2250 iwx_stop_device(struct iwx_softc *sc)
2251 {
2252 	int qid;
2253 
2254 	iwx_disable_interrupts(sc);
2255 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2256 
2257 	iwx_disable_rx_dma(sc);
2258 	iwx_reset_rx_ring(sc, &sc->rxq);
2259 	for (qid = 0; qid < nitems(sc->txq); qid++)
2260 		iwx_reset_tx_ring(sc, &sc->txq[qid]);
2261 
2262 	/* Make sure (redundant) we've released our request to stay awake */
2263 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2264 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2265 	if (sc->sc_nic_locks > 0)
2266 		printf("%s: %d active NIC locks forcefully cleared\n",
2267 		    DEVNAME(sc), sc->sc_nic_locks);
2268 	sc->sc_nic_locks = 0;
2269 
2270 	/* Stop the device, and put it in low power state */
2271 	iwx_apm_stop(sc);
2272 
2273 	/* Reset the on-board processor. */
2274 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2275 	DELAY(5000);
2276 
2277 	/*
2278 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2279 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2280 	 * that enables radio won't fire on the correct irq, and the
2281 	 * driver won't be able to handle the interrupt.
2282 	 * Configure the IVAR table again after reset.
2283 	 */
2284 	iwx_conf_msix_hw(sc, 1);
2285 
2286 	/*
2287 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2288 	 * Clear the interrupt again.
2289 	 */
2290 	iwx_disable_interrupts(sc);
2291 
2292 	/* Even though we stop the HW we still want the RF kill interrupt. */
2293 	iwx_enable_rfkill_int(sc);
2294 	iwx_check_rfkill(sc);
2295 
2296 	iwx_prepare_card_hw(sc);
2297 
2298 	iwx_ctxt_info_free_paging(sc);
2299 }
2300 
2301 void
2302 iwx_nic_config(struct iwx_softc *sc)
2303 {
2304 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2305 	uint32_t mask, val, reg_val = 0;
2306 
2307 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2308 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2309 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2310 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2311 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2312 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2313 
2314 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2315 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2316 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2317 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2318 
2319 	/* radio configuration */
2320 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2321 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2322 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2323 
2324 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2325 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2326 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2327 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2328 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2329 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2330 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2331 
2332 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2333 	val &= ~mask;
2334 	val |= reg_val;
2335 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2336 }
2337 
2338 int
2339 iwx_nic_rx_init(struct iwx_softc *sc)
2340 {
2341 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2342 
2343 	/*
2344 	 * We don't configure the RFH; the firmware will do that.
2345 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2346 	 */
2347 	return 0;
2348 }
2349 
2350 int
2351 iwx_nic_init(struct iwx_softc *sc)
2352 {
2353 	int err;
2354 
2355 	iwx_apm_init(sc);
2356 	iwx_nic_config(sc);
2357 
2358 	err = iwx_nic_rx_init(sc);
2359 	if (err)
2360 		return err;
2361 
2362 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2363 
2364 	return 0;
2365 }
2366 
2367 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2368 const uint8_t iwx_ac_to_tx_fifo[] = {
2369 	IWX_GEN2_EDCA_TX_FIFO_BE,
2370 	IWX_GEN2_EDCA_TX_FIFO_BK,
2371 	IWX_GEN2_EDCA_TX_FIFO_VI,
2372 	IWX_GEN2_EDCA_TX_FIFO_VO,
2373 };
2374 
2375 int
2376 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2377     int num_slots)
2378 {
2379 	struct iwx_tx_queue_cfg_cmd cmd;
2380 	struct iwx_rx_packet *pkt;
2381 	struct iwx_tx_queue_cfg_rsp *resp;
2382 	struct iwx_host_cmd hcmd = {
2383 		.id = IWX_SCD_QUEUE_CFG,
2384 		.flags = IWX_CMD_WANT_RESP,
2385 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2386 	};
2387 	struct iwx_tx_ring *ring = &sc->txq[qid];
2388 	int err, fwqid;
2389 	uint32_t wr_idx;
2390 	size_t resp_len;
2391 
2392 	iwx_reset_tx_ring(sc, ring);
2393 
2394 	memset(&cmd, 0, sizeof(cmd));
2395 	cmd.sta_id = sta_id;
2396 	cmd.tid = tid;
2397 	cmd.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2398 	cmd.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2399 	cmd.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2400 	cmd.tfdq_addr = htole64(ring->desc_dma.paddr);
2401 
2402 	hcmd.data[0] = &cmd;
2403 	hcmd.len[0] = sizeof(cmd);
2404 
2405 	err = iwx_send_cmd(sc, &hcmd);
2406 	if (err)
2407 		return err;
2408 
2409 	pkt = hcmd.resp_pkt;
2410 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2411 		DPRINTF(("SCD_QUEUE_CFG command failed\n"));
2412 		err = EIO;
2413 		goto out;
2414 	}
2415 
2416 	resp_len = iwx_rx_packet_payload_len(pkt);
2417 	if (resp_len != sizeof(*resp)) {
2418 		DPRINTF(("SCD_QUEUE_CFG returned %zu bytes, expected %zu bytes\n", resp_len, sizeof(*resp)));
2419 		err = EIO;
2420 		goto out;
2421 	}
2422 
2423 	resp = (void *)pkt->data;
2424 	fwqid = le16toh(resp->queue_number);
2425 	wr_idx = le16toh(resp->write_pointer);
2426 
2427 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2428 	if (fwqid != qid) {
2429 		DPRINTF(("requested qid %d but %d was assigned\n", qid, fwqid));
2430 		err = EIO;
2431 		goto out;
2432 	}
2433 
2434 	if (wr_idx != ring->cur) {
2435 		DPRINTF(("fw write index is %d but ring is %d\n", wr_idx, ring->cur));
2436 		err = EIO;
2437 		goto out;
2438 	}
2439 out:
2440 	iwx_free_resp(sc, &hcmd);
2441 	return err;
2442 }
2443 
2444 void
2445 iwx_post_alive(struct iwx_softc *sc)
2446 {
2447 	iwx_ict_reset(sc);
2448 }
2449 
2450 /*
2451  * For the high priority TE use a time event type that has similar priority to
2452  * the FW's action scan priority.
2453  */
2454 #define IWX_ROC_TE_TYPE_NORMAL IWX_TE_P2P_DEVICE_DISCOVERABLE
2455 #define IWX_ROC_TE_TYPE_MGMT_TX IWX_TE_P2P_CLIENT_ASSOC
2456 
2457 int
2458 iwx_send_time_event_cmd(struct iwx_softc *sc,
2459     const struct iwx_time_event_cmd *cmd)
2460 {
2461 	struct iwx_rx_packet *pkt;
2462 	struct iwx_time_event_resp *resp;
2463 	struct iwx_host_cmd hcmd = {
2464 		.id = IWX_TIME_EVENT_CMD,
2465 		.flags = IWX_CMD_WANT_RESP,
2466 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2467 	};
2468 	uint32_t resp_len;
2469 	int err;
2470 
2471 	hcmd.data[0] = cmd;
2472 	hcmd.len[0] = sizeof(*cmd);
2473 	err = iwx_send_cmd(sc, &hcmd);
2474 	if (err)
2475 		return err;
2476 
2477 	pkt = hcmd.resp_pkt;
2478 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2479 		err = EIO;
2480 		goto out;
2481 	}
2482 
2483 	resp_len = iwx_rx_packet_payload_len(pkt);
2484 	if (resp_len != sizeof(*resp)) {
2485 		err = EIO;
2486 		goto out;
2487 	}
2488 
2489 	resp = (void *)pkt->data;
2490 	if (le32toh(resp->status) == 0)
2491 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2492 	else
2493 		err = EIO;
2494 out:
2495 	iwx_free_resp(sc, &hcmd);
2496 	return err;
2497 }
2498 
2499 void
2500 iwx_protect_session(struct iwx_softc *sc, struct iwx_node *in,
2501     uint32_t duration, uint32_t max_delay)
2502 {
2503 	struct iwx_time_event_cmd time_cmd;
2504 
2505 	/* Do nothing if a time event is already scheduled. */
2506 	if (sc->sc_flags & IWX_FLAG_TE_ACTIVE)
2507 		return;
2508 
2509 	memset(&time_cmd, 0, sizeof(time_cmd));
2510 
2511 	time_cmd.action = htole32(IWX_FW_CTXT_ACTION_ADD);
2512 	time_cmd.id_and_color =
2513 	    htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2514 	time_cmd.id = htole32(IWX_TE_BSS_STA_AGGRESSIVE_ASSOC);
2515 
2516 	time_cmd.apply_time = htole32(0);
2517 
2518 	time_cmd.max_frags = IWX_TE_V2_FRAG_NONE;
2519 	time_cmd.max_delay = htole32(max_delay);
2520 	/* TODO: why do we need to interval = bi if it is not periodic? */
2521 	time_cmd.interval = htole32(1);
2522 	time_cmd.duration = htole32(duration);
2523 	time_cmd.repeat = 1;
2524 	time_cmd.policy
2525 	    = htole16(IWX_TE_V2_NOTIF_HOST_EVENT_START |
2526 	        IWX_TE_V2_NOTIF_HOST_EVENT_END |
2527 		IWX_T2_V2_START_IMMEDIATELY);
2528 
2529 	if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
2530 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
2531 
2532 	DELAY(100);
2533 }
2534 
2535 void
2536 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
2537 {
2538 	struct iwx_time_event_cmd time_cmd;
2539 
2540 	/* Do nothing if the time event has already ended. */
2541 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
2542 		return;
2543 
2544 	memset(&time_cmd, 0, sizeof(time_cmd));
2545 
2546 	time_cmd.action = htole32(IWX_FW_CTXT_ACTION_REMOVE);
2547 	time_cmd.id_and_color =
2548 	    htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2549 	time_cmd.id = htole32(sc->sc_time_event_uid);
2550 
2551 	if (iwx_send_time_event_cmd(sc, &time_cmd) == 0)
2552 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
2553 
2554 	DELAY(100);
2555 }
2556 
2557 /*
2558  * NVM read access and content parsing.  We do not support
2559  * external NVM or writing NVM.
2560  */
2561 
2562 uint8_t
2563 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
2564 {
2565 	uint8_t tx_ant;
2566 
2567 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
2568 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
2569 
2570 	if (sc->sc_nvm.valid_tx_ant)
2571 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2572 
2573 	return tx_ant;
2574 }
2575 
2576 uint8_t
2577 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
2578 {
2579 	uint8_t rx_ant;
2580 
2581 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
2582 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
2583 
2584 	if (sc->sc_nvm.valid_rx_ant)
2585 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2586 
2587 	return rx_ant;
2588 }
2589 
2590 void
2591 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
2592     uint32_t *channel_profile_v4, int nchan_profile)
2593 {
2594 	struct ieee80211com *ic = &sc->sc_ic;
2595 	struct iwx_nvm_data *data = &sc->sc_nvm;
2596 	int ch_idx;
2597 	struct ieee80211_channel *channel;
2598 	uint32_t ch_flags;
2599 	int is_5ghz;
2600 	int flags, hw_value;
2601 	int nchan;
2602 	const uint8_t *nvm_channels;
2603 
2604 	if (sc->sc_uhb_supported) {
2605 		nchan = nitems(iwx_nvm_channels_uhb);
2606 		nvm_channels = iwx_nvm_channels_uhb;
2607 	} else {
2608 		nchan = nitems(iwx_nvm_channels_8000);
2609 		nvm_channels = iwx_nvm_channels_8000;
2610 	}
2611 
2612 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
2613 		if (channel_profile_v4)
2614 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
2615 		else
2616 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
2617 
2618 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
2619 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
2620 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
2621 
2622 		hw_value = nvm_channels[ch_idx];
2623 		channel = &ic->ic_channels[hw_value];
2624 
2625 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
2626 			channel->ic_freq = 0;
2627 			channel->ic_flags = 0;
2628 			continue;
2629 		}
2630 
2631 		if (!is_5ghz) {
2632 			flags = IEEE80211_CHAN_2GHZ;
2633 			channel->ic_flags
2634 			    = IEEE80211_CHAN_CCK
2635 			    | IEEE80211_CHAN_OFDM
2636 			    | IEEE80211_CHAN_DYN
2637 			    | IEEE80211_CHAN_2GHZ;
2638 		} else {
2639 			flags = IEEE80211_CHAN_5GHZ;
2640 			channel->ic_flags =
2641 			    IEEE80211_CHAN_A;
2642 		}
2643 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2644 
2645 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
2646 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2647 
2648 		if (data->sku_cap_11n_enable)
2649 			channel->ic_flags |= IEEE80211_CHAN_HT;
2650 	}
2651 }
2652 
2653 int
2654 iwx_mimo_enabled(struct iwx_softc *sc)
2655 {
2656 	struct ieee80211com *ic = &sc->sc_ic;
2657 
2658 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2659 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2660 }
2661 
2662 void
2663 iwx_setup_ht_rates(struct iwx_softc *sc)
2664 {
2665 	struct ieee80211com *ic = &sc->sc_ic;
2666 	uint8_t rx_ant;
2667 
2668 	/* TX is supported with the same MCS as RX. */
2669 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2670 
2671 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2672 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2673 
2674 	if (!iwx_mimo_enabled(sc))
2675 		return;
2676 
2677 	rx_ant = iwx_fw_valid_rx_ant(sc);
2678 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
2679 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
2680 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2681 }
2682 
2683 #define IWX_MAX_RX_BA_SESSIONS 16
2684 
2685 void
2686 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2687     uint16_t ssn, uint16_t winsize, int start)
2688 {
2689 	struct ieee80211com *ic = &sc->sc_ic;
2690 	struct iwx_add_sta_cmd cmd;
2691 	struct iwx_node *in = (void *)ni;
2692 	int err, s;
2693 	uint32_t status;
2694 
2695 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
2696 		ieee80211_addba_req_refuse(ic, ni, tid);
2697 		return;
2698 	}
2699 
2700 	memset(&cmd, 0, sizeof(cmd));
2701 
2702 	cmd.sta_id = IWX_STATION_ID;
2703 	cmd.mac_id_n_color
2704 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2705 	cmd.add_modify = IWX_STA_MODE_MODIFY;
2706 
2707 	if (start) {
2708 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2709 		cmd.add_immediate_ba_ssn = htole16(ssn);
2710 		cmd.rx_ba_window = htole16(winsize);
2711 	} else {
2712 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2713 	}
2714 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
2715 	    IWX_STA_MODIFY_REMOVE_BA_TID;
2716 
2717 	status = IWX_ADD_STA_SUCCESS;
2718 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
2719 	    &status);
2720 
2721 	s = splnet();
2722 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) == IWX_ADD_STA_SUCCESS) {
2723 		if (start) {
2724 			sc->sc_rx_ba_sessions++;
2725 			ieee80211_addba_req_accept(ic, ni, tid);
2726 		} else if (sc->sc_rx_ba_sessions > 0)
2727 			sc->sc_rx_ba_sessions--;
2728 	} else if (start)
2729 		ieee80211_addba_req_refuse(ic, ni, tid);
2730 
2731 	splx(s);
2732 }
2733 
2734 void
2735 iwx_htprot_task(void *arg)
2736 {
2737 	struct iwx_softc *sc = arg;
2738 	struct ieee80211com *ic = &sc->sc_ic;
2739 	struct iwx_node *in = (void *)ic->ic_bss;
2740 	int err, s = splnet();
2741 
2742 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
2743 		refcnt_rele_wake(&sc->task_refs);
2744 		splx(s);
2745 		return;
2746 	}
2747 
2748 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2749 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
2750 	if (err)
2751 		printf("%s: could not change HT protection: error %d\n",
2752 		    DEVNAME(sc), err);
2753 
2754 	refcnt_rele_wake(&sc->task_refs);
2755 	splx(s);
2756 }
2757 
2758 /*
2759  * This function is called by upper layer when HT protection settings in
2760  * beacons have changed.
2761  */
2762 void
2763 iwx_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2764 {
2765 	struct iwx_softc *sc = ic->ic_softc;
2766 
2767 	/* assumes that ni == ic->ic_bss */
2768 	iwx_add_task(sc, systq, &sc->htprot_task);
2769 }
2770 
2771 void
2772 iwx_ba_task(void *arg)
2773 {
2774 	struct iwx_softc *sc = arg;
2775 	struct ieee80211com *ic = &sc->sc_ic;
2776 	struct ieee80211_node *ni = ic->ic_bss;
2777 	int s = splnet();
2778 
2779 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
2780 		refcnt_rele_wake(&sc->task_refs);
2781 		splx(s);
2782 		return;
2783 	}
2784 
2785 	if (sc->ba_start)
2786 		iwx_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn,
2787 		    sc->ba_winsize, 1);
2788 	else
2789 		iwx_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0, 0);
2790 
2791 	refcnt_rele_wake(&sc->task_refs);
2792 	splx(s);
2793 }
2794 
2795 /*
2796  * This function is called by upper layer when an ADDBA request is received
2797  * from another STA and before the ADDBA response is sent.
2798  */
2799 int
2800 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2801     uint8_t tid)
2802 {
2803 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2804 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
2805 
2806 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS)
2807 		return ENOSPC;
2808 
2809 	sc->ba_start = 1;
2810 	sc->ba_tid = tid;
2811 	sc->ba_ssn = htole16(ba->ba_winstart);
2812 	sc->ba_winsize = htole16(ba->ba_winsize);
2813 	iwx_add_task(sc, systq, &sc->ba_task);
2814 
2815 	return EBUSY;
2816 }
2817 
2818 /*
2819  * This function is called by upper layer on teardown of an HT-immediate
2820  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2821  */
2822 void
2823 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2824     uint8_t tid)
2825 {
2826 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
2827 
2828 	sc->ba_start = 0;
2829 	sc->ba_tid = tid;
2830 	iwx_add_task(sc, systq, &sc->ba_task);
2831 }
2832 
2833 /* Read the mac address from WFMP registers. */
2834 int
2835 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
2836 {
2837 	const uint8_t *hw_addr;
2838 	uint32_t mac_addr0, mac_addr1;
2839 
2840 	if (!iwx_nic_lock(sc))
2841 		return EBUSY;
2842 
2843 	mac_addr0 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_0));
2844 	mac_addr1 = htole32(iwx_read_prph(sc, IWX_WFMP_MAC_ADDR_1));
2845 
2846 	hw_addr = (const uint8_t *)&mac_addr0;
2847 	data->hw_addr[0] = hw_addr[3];
2848 	data->hw_addr[1] = hw_addr[2];
2849 	data->hw_addr[2] = hw_addr[1];
2850 	data->hw_addr[3] = hw_addr[0];
2851 
2852 	hw_addr = (const uint8_t *)&mac_addr1;
2853 	data->hw_addr[4] = hw_addr[1];
2854 	data->hw_addr[5] = hw_addr[0];
2855 
2856 	iwx_nic_unlock(sc);
2857 	return 0;
2858 }
2859 
2860 int
2861 iwx_is_valid_mac_addr(const uint8_t *addr)
2862 {
2863 	static const uint8_t reserved_mac[] = {
2864 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2865 	};
2866 
2867 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
2868 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
2869 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
2870 	    !ETHER_IS_MULTICAST(addr));
2871 }
2872 
2873 int
2874 iwx_nvm_get(struct iwx_softc *sc)
2875 {
2876 	struct iwx_nvm_get_info cmd = {};
2877 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
2878 	struct iwx_host_cmd hcmd = {
2879 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
2880 		.data = { &cmd, },
2881 		.len = { sizeof(cmd) },
2882 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
2883 		    IWX_NVM_GET_INFO)
2884 	};
2885 	int err;
2886 	uint32_t mac_flags;
2887 	/*
2888 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
2889 	 * in v3, except for the channel profile part of the
2890 	 * regulatory.  So we can just access the new struct, with the
2891 	 * exception of the latter.
2892 	 */
2893 	struct iwx_nvm_get_info_rsp *rsp;
2894 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
2895 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
2896 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
2897 
2898 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
2899 	err = iwx_send_cmd(sc, &hcmd);
2900 	if (err)
2901 		return err;
2902 
2903 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
2904 		err = EIO;
2905 		goto out;
2906 	}
2907 
2908 	memset(nvm, 0, sizeof(*nvm));
2909 
2910 	iwx_set_mac_addr_from_csr(sc, nvm);
2911 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
2912 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
2913 		err = EINVAL;
2914 		goto out;
2915 	}
2916 
2917 	rsp = (void *)hcmd.resp_pkt->data;
2918 
2919 	/* Initialize general data */
2920 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
2921 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
2922 
2923 	/* Initialize MAC sku data */
2924 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
2925 	nvm->sku_cap_11ac_enable =
2926 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
2927 	nvm->sku_cap_11n_enable =
2928 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
2929 	nvm->sku_cap_11ax_enable =
2930 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
2931 	nvm->sku_cap_band_24GHz_enable =
2932 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
2933 	nvm->sku_cap_band_52GHz_enable =
2934 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
2935 	nvm->sku_cap_mimo_disable =
2936 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
2937 
2938 	/* Initialize PHY sku data */
2939 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
2940 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
2941 
2942 	if (le32toh(rsp->regulatory.lar_enabled) &&
2943 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
2944 		nvm->lar_enabled = 1;
2945 	}
2946 
2947 	if (v4) {
2948 		iwx_init_channel_map(sc, NULL,
2949 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
2950 	} else {
2951 		rsp_v3 = (void *)rsp;
2952 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
2953 		    NULL, IWX_NUM_CHANNELS_V1);
2954 	}
2955 out:
2956 	iwx_free_resp(sc, &hcmd);
2957 	return err;
2958 }
2959 
2960 int
2961 iwx_load_firmware(struct iwx_softc *sc)
2962 {
2963 	struct iwx_fw_sects *fws;
2964 	int err, w;
2965 
2966 	sc->sc_uc.uc_intr = 0;
2967 
2968 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
2969 	err = iwx_ctxt_info_init(sc, fws);
2970 	if (err) {
2971 		printf("%s: could not init context info\n", DEVNAME(sc));
2972 		return err;
2973 	}
2974 
2975 	/* wait for the firmware to load */
2976 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2977 		err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", MSEC_TO_NSEC(100));
2978 	}
2979 	if (err || !sc->sc_uc.uc_ok)
2980 		printf("%s: could not load firmware\n", DEVNAME(sc));
2981 
2982 	iwx_ctxt_info_free_fw_img(sc);
2983 
2984 	if (!sc->sc_uc.uc_ok)
2985 		return EINVAL;
2986 
2987 	return err;
2988 }
2989 
2990 int
2991 iwx_start_fw(struct iwx_softc *sc)
2992 {
2993 	int err;
2994 
2995 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2996 
2997 	iwx_disable_interrupts(sc);
2998 
2999 	/* make sure rfkill handshake bits are cleared */
3000 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3001 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3002 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3003 
3004 	/* clear (again), then enable firwmare load interrupt */
3005 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
3006 
3007 	err = iwx_nic_init(sc);
3008 	if (err) {
3009 		printf("%s: unable to init nic\n", DEVNAME(sc));
3010 		return err;
3011 	}
3012 
3013 	iwx_enable_fwload_interrupt(sc);
3014 
3015 	return iwx_load_firmware(sc);
3016 }
3017 
3018 int
3019 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
3020 {
3021 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
3022 		.valid = htole32(valid_tx_ant),
3023 	};
3024 
3025 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
3026 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3027 }
3028 
3029 int
3030 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
3031 {
3032 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
3033 
3034 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3035 	phy_cfg_cmd.calib_control.event_trigger =
3036 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
3037 	phy_cfg_cmd.calib_control.flow_trigger =
3038 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
3039 
3040 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
3041 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3042 }
3043 
3044 int
3045 iwx_send_dqa_cmd(struct iwx_softc *sc)
3046 {
3047 	struct iwx_dqa_enable_cmd dqa_cmd = {
3048 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
3049 	};
3050 	uint32_t cmd_id;
3051 
3052 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
3053 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3054 }
3055 
3056 int
3057 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
3058 {
3059 	int err;
3060 
3061 	err = iwx_read_firmware(sc);
3062 	if (err)
3063 		return err;
3064 
3065 	err = iwx_start_fw(sc);
3066 	if (err)
3067 		return err;
3068 
3069 	iwx_post_alive(sc);
3070 
3071 	return 0;
3072 }
3073 
3074 int
3075 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
3076 {
3077 	const int wait_flags = IWX_INIT_COMPLETE;
3078 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
3079 	struct iwx_init_extended_cfg_cmd init_cfg = {
3080 		.init_flags = htole32(IWX_INIT_NVM),
3081 	};
3082 	int err;
3083 
3084 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
3085 		printf("%s: radio is disabled by hardware switch\n",
3086 		    DEVNAME(sc));
3087 		return EPERM;
3088 	}
3089 
3090 	sc->sc_init_complete = 0;
3091 	err = iwx_load_ucode_wait_alive(sc);
3092 	if (err) {
3093 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3094 		return err;
3095 	}
3096 
3097 	/*
3098 	 * Send init config command to mark that we are sending NVM
3099 	 * access commands
3100 	 */
3101 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
3102 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
3103 	if (err)
3104 		return err;
3105 
3106 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3107 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
3108 	if (err)
3109 		return err;
3110 
3111 	/* Wait for the init complete notification from the firmware. */
3112 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3113 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
3114 		    SEC_TO_NSEC(2));
3115 		if (err)
3116 			return err;
3117 	}
3118 
3119 	if (readnvm) {
3120 		err = iwx_nvm_get(sc);
3121 		if (err) {
3122 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3123 			return err;
3124 		}
3125 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3126 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3127 			    sc->sc_nvm.hw_addr);
3128 
3129 	}
3130 	return 0;
3131 }
3132 
3133 int
3134 iwx_config_ltr(struct iwx_softc *sc)
3135 {
3136 	struct iwx_ltr_config_cmd cmd = {
3137 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
3138 	};
3139 
3140 	if (!sc->sc_ltr_enabled)
3141 		return 0;
3142 
3143 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3144 }
3145 
3146 void
3147 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
3148 {
3149 	struct iwx_rx_data *data = &ring->data[idx];
3150 
3151 	((uint64_t *)ring->desc)[idx] =
3152 	    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
3153 	bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3154 	    idx * sizeof(uint64_t), sizeof(uint64_t),
3155 	    BUS_DMASYNC_PREWRITE);
3156 }
3157 
3158 int
3159 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
3160 {
3161 	struct iwx_rx_ring *ring = &sc->rxq;
3162 	struct iwx_rx_data *data = &ring->data[idx];
3163 	struct mbuf *m;
3164 	int err;
3165 	int fatal = 0;
3166 
3167 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3168 	if (m == NULL)
3169 		return ENOBUFS;
3170 
3171 	if (size <= MCLBYTES) {
3172 		MCLGET(m, M_DONTWAIT);
3173 	} else {
3174 		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
3175 	}
3176 	if ((m->m_flags & M_EXT) == 0) {
3177 		m_freem(m);
3178 		return ENOBUFS;
3179 	}
3180 
3181 	if (data->m != NULL) {
3182 		bus_dmamap_unload(sc->sc_dmat, data->map);
3183 		fatal = 1;
3184 	}
3185 
3186 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3187 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3188 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3189 	if (err) {
3190 		/* XXX */
3191 		if (fatal)
3192 			panic("%s: could not load RX mbuf", DEVNAME(sc));
3193 		m_freem(m);
3194 		return err;
3195 	}
3196 	data->m = m;
3197 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3198 
3199 	/* Update RX descriptor. */
3200 	iwx_update_rx_desc(sc, ring, idx);
3201 
3202 	return 0;
3203 }
3204 
3205 int
3206 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
3207     struct iwx_rx_mpdu_desc *desc)
3208 {
3209 	int energy_a, energy_b;
3210 
3211 	energy_a = desc->v1.energy_a;
3212 	energy_b = desc->v1.energy_b;
3213 	energy_a = energy_a ? -energy_a : -256;
3214 	energy_b = energy_b ? -energy_b : -256;
3215 	return MAX(energy_a, energy_b);
3216 }
3217 
3218 void
3219 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3220     struct iwx_rx_data *data)
3221 {
3222 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
3223 
3224 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3225 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3226 
3227 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3228 }
3229 
3230 /*
3231  * Retrieve the average noise (in dBm) among receivers.
3232  */
3233 int
3234 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
3235 {
3236 	int i, total, nbant, noise;
3237 
3238 	total = nbant = noise = 0;
3239 	for (i = 0; i < 3; i++) {
3240 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3241 		if (noise) {
3242 			total += noise;
3243 			nbant++;
3244 		}
3245 	}
3246 
3247 	/* There should be at least one antenna but check anyway. */
3248 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3249 }
3250 
3251 int
3252 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3253 {
3254 	struct ieee80211com *ic = &sc->sc_ic;
3255 	struct ieee80211_key *k = &ni->ni_pairwise_key;
3256 	struct ieee80211_frame *wh;
3257 	uint64_t pn, *prsc;
3258 	uint8_t *ivp;
3259 	uint8_t tid;
3260 	int hdrlen, hasqos;
3261 
3262 	wh = mtod(m, struct ieee80211_frame *);
3263 	hdrlen = ieee80211_get_hdrlen(wh);
3264 	ivp = (uint8_t *)wh + hdrlen;
3265 
3266 	/* Check that ExtIV bit is be set. */
3267 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
3268 		return 1;
3269 
3270 	hasqos = ieee80211_has_qos(wh);
3271 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
3272 	prsc = &k->k_rsc[tid];
3273 
3274 	/* Extract the 48-bit PN from the CCMP header. */
3275 	pn = (uint64_t)ivp[0]       |
3276 	     (uint64_t)ivp[1] <<  8 |
3277 	     (uint64_t)ivp[4] << 16 |
3278 	     (uint64_t)ivp[5] << 24 |
3279 	     (uint64_t)ivp[6] << 32 |
3280 	     (uint64_t)ivp[7] << 40;
3281 	if (pn <= *prsc) {
3282 		ic->ic_stats.is_ccmp_replays++;
3283 		return 1;
3284 	}
3285 	/* Last seen packet number is updated in ieee80211_inputm(). */
3286 
3287 	/*
3288 	 * Some firmware versions strip the MIC, and some don't. It is not
3289 	 * clear which of the capability flags could tell us what to expect.
3290 	 * For now, keep things simple and just leave the MIC in place if
3291 	 * it is present.
3292 	 *
3293 	 * The IV will be stripped by ieee80211_inputm().
3294 	 */
3295 	return 0;
3296 }
3297 
3298 void
3299 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
3300     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3301     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3302     struct mbuf_list *ml)
3303 {
3304 	struct ieee80211com *ic = &sc->sc_ic;
3305 	struct ieee80211_frame *wh;
3306 	struct ieee80211_node *ni;
3307 	struct ieee80211_channel *bss_chan;
3308 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3309 	struct ifnet *ifp = IC2IFP(ic);
3310 
3311 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3312 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3313 
3314 	wh = mtod(m, struct ieee80211_frame *);
3315 	ni = ieee80211_find_rxnode(ic, wh);
3316 	if (ni == ic->ic_bss) {
3317 		/*
3318 		 * We may switch ic_bss's channel during scans.
3319 		 * Record the current channel so we can restore it later.
3320 		 */
3321 		bss_chan = ni->ni_chan;
3322 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3323 	}
3324 	ni->ni_chan = &ic->ic_channels[chanidx];
3325 
3326 	/* Handle hardware decryption. */
3327 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
3328 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
3329 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
3330 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
3331 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
3332 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3333 			ic->ic_stats.is_ccmp_dec_errs++;
3334 			ifp->if_ierrors++;
3335 			m_freem(m);
3336 			ieee80211_release_node(ic, ni);
3337 			return;
3338 		}
3339 		/* Check whether decryption was successful or not. */
3340 		if ((rx_pkt_status &
3341 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3342 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
3343 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
3344 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
3345 			ic->ic_stats.is_ccmp_dec_errs++;
3346 			ifp->if_ierrors++;
3347 			m_freem(m);
3348 			ieee80211_release_node(ic, ni);
3349 			return;
3350 		}
3351 		if (iwx_ccmp_decap(sc, m, ni) != 0) {
3352 			ifp->if_ierrors++;
3353 			m_freem(m);
3354 			ieee80211_release_node(ic, ni);
3355 			return;
3356 		}
3357 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
3358 	}
3359 
3360 #if NBPFILTER > 0
3361 	if (sc->sc_drvbpf != NULL) {
3362 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
3363 		uint16_t chan_flags;
3364 
3365 		tap->wr_flags = 0;
3366 		if (is_shortpre)
3367 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3368 		tap->wr_chan_freq =
3369 		    htole16(ic->ic_channels[chanidx].ic_freq);
3370 		chan_flags = ic->ic_channels[chanidx].ic_flags;
3371 		if (ic->ic_curmode != IEEE80211_MODE_11N)
3372 			chan_flags &= ~IEEE80211_CHAN_HT;
3373 		tap->wr_chan_flags = htole16(chan_flags);
3374 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
3375 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3376 		tap->wr_tsft = device_timestamp;
3377 		if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
3378 			uint8_t mcs = (rate_n_flags &
3379 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
3380 			    IWX_RATE_HT_MCS_NSS_MSK));
3381 			tap->wr_rate = (0x80 | mcs);
3382 		} else {
3383 			uint8_t rate = (rate_n_flags &
3384 			    IWX_RATE_LEGACY_RATE_MSK);
3385 			switch (rate) {
3386 			/* CCK rates. */
3387 			case  10: tap->wr_rate =   2; break;
3388 			case  20: tap->wr_rate =   4; break;
3389 			case  55: tap->wr_rate =  11; break;
3390 			case 110: tap->wr_rate =  22; break;
3391 			/* OFDM rates. */
3392 			case 0xd: tap->wr_rate =  12; break;
3393 			case 0xf: tap->wr_rate =  18; break;
3394 			case 0x5: tap->wr_rate =  24; break;
3395 			case 0x7: tap->wr_rate =  36; break;
3396 			case 0x9: tap->wr_rate =  48; break;
3397 			case 0xb: tap->wr_rate =  72; break;
3398 			case 0x1: tap->wr_rate =  96; break;
3399 			case 0x3: tap->wr_rate = 108; break;
3400 			/* Unknown rate: should not happen. */
3401 			default:  tap->wr_rate =   0;
3402 			}
3403 		}
3404 
3405 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
3406 		    m, BPF_DIRECTION_IN);
3407 	}
3408 #endif
3409 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
3410 	/*
3411 	 * ieee80211_inputm() might have changed our BSS.
3412 	 * Restore ic_bss's channel if we are still in the same BSS.
3413 	 */
3414 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
3415 		ni->ni_chan = bss_chan;
3416 	ieee80211_release_node(ic, ni);
3417 }
3418 
3419 void
3420 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
3421     size_t maxlen, struct mbuf_list *ml)
3422 {
3423 	struct ieee80211com *ic = &sc->sc_ic;
3424 	struct ieee80211_rxinfo rxi;
3425 	struct iwx_rx_mpdu_desc *desc;
3426 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
3427 	int rssi;
3428 	uint8_t chanidx;
3429 	uint16_t phy_info;
3430 
3431 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
3432 
3433 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
3434 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3435 		m_freem(m);
3436 		return; /* drop */
3437 	}
3438 
3439 	len = le16toh(desc->mpdu_len);
3440 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
3441 		/* Allow control frames in monitor mode. */
3442 		if (len < sizeof(struct ieee80211_frame_cts)) {
3443 			ic->ic_stats.is_rx_tooshort++;
3444 			IC2IFP(ic)->if_ierrors++;
3445 			m_freem(m);
3446 			return;
3447 		}
3448 	} else if (len < sizeof(struct ieee80211_frame)) {
3449 		ic->ic_stats.is_rx_tooshort++;
3450 		IC2IFP(ic)->if_ierrors++;
3451 		m_freem(m);
3452 		return;
3453 	}
3454 	if (len > maxlen - sizeof(*desc)) {
3455 		IC2IFP(ic)->if_ierrors++;
3456 		m_freem(m);
3457 		return;
3458 	}
3459 
3460 	m->m_data = pktdata + sizeof(*desc);
3461 	m->m_pkthdr.len = m->m_len = len;
3462 
3463 	/* Account for padding following the frame header. */
3464 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
3465 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3466 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3467 		if (type == IEEE80211_FC0_TYPE_CTL) {
3468 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
3469 			case IEEE80211_FC0_SUBTYPE_CTS:
3470 				hdrlen = sizeof(struct ieee80211_frame_cts);
3471 				break;
3472 			case IEEE80211_FC0_SUBTYPE_ACK:
3473 				hdrlen = sizeof(struct ieee80211_frame_ack);
3474 				break;
3475 			default:
3476 				hdrlen = sizeof(struct ieee80211_frame_min);
3477 				break;
3478 			}
3479 		} else
3480 			hdrlen = ieee80211_get_hdrlen(wh);
3481 
3482 		if ((le16toh(desc->status) &
3483 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
3484 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3485 			/* Padding is inserted after the IV. */
3486 			hdrlen += IEEE80211_CCMP_HDRLEN;
3487 		}
3488 
3489 		memmove(m->m_data + 2, m->m_data, hdrlen);
3490 		m_adj(m, 2);
3491 	}
3492 
3493 	phy_info = le16toh(desc->phy_info);
3494 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
3495 	chanidx = desc->v1.channel;
3496 	device_timestamp = desc->v1.gp2_on_air_rise;
3497 
3498 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
3499 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
3500 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
3501 
3502 	memset(&rxi, 0, sizeof(rxi));
3503 	rxi.rxi_rssi = rssi;
3504 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
3505 
3506 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
3507 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
3508 	    rate_n_flags, device_timestamp, &rxi, ml);
3509 }
3510 
3511 void
3512 iwx_rx_tx_cmd_single(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3513     struct iwx_node *in)
3514 {
3515 	struct ieee80211com *ic = &sc->sc_ic;
3516 	struct ifnet *ifp = IC2IFP(ic);
3517 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
3518 	int status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
3519 	int txfail;
3520 
3521 	KASSERT(tx_resp->frame_count == 1);
3522 
3523 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
3524 	    status != IWX_TX_STATUS_DIRECT_DONE);
3525 
3526 	if (txfail)
3527 		ifp->if_oerrors++;
3528 }
3529 
3530 void
3531 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
3532 {
3533 	struct ieee80211com *ic = &sc->sc_ic;
3534 
3535 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3536 	    BUS_DMASYNC_POSTWRITE);
3537 	bus_dmamap_unload(sc->sc_dmat, txd->map);
3538 	m_freem(txd->m);
3539 	txd->m = NULL;
3540 
3541 	KASSERT(txd->in);
3542 	ieee80211_release_node(ic, &txd->in->in_ni);
3543 	txd->in = NULL;
3544 }
3545 
3546 void
3547 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3548     struct iwx_rx_data *data)
3549 {
3550 	struct ieee80211com *ic = &sc->sc_ic;
3551 	struct ifnet *ifp = IC2IFP(ic);
3552 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
3553 	int idx = cmd_hdr->idx;
3554 	int qid = cmd_hdr->qid;
3555 	struct iwx_tx_ring *ring = &sc->txq[qid];
3556 	struct iwx_tx_data *txd;
3557 
3558 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
3559 	    BUS_DMASYNC_POSTREAD);
3560 
3561 	sc->sc_tx_timer = 0;
3562 
3563 	txd = &ring->data[idx];
3564 	if (txd->m == NULL)
3565 		return;
3566 
3567 	iwx_rx_tx_cmd_single(sc, pkt, txd->in);
3568 	iwx_txd_done(sc, txd);
3569 	iwx_tx_update_byte_tbl(ring, idx, 0, 0);
3570 
3571 	/*
3572 	 * XXX Sometimes we miss Tx completion interrupts.
3573 	 * We cannot check Tx success/failure for affected frames; just free
3574 	 * the associated mbuf and release the associated node reference.
3575 	 */
3576 	while (ring->tail != idx) {
3577 		txd = &ring->data[ring->tail];
3578 		if (txd->m != NULL) {
3579 			DPRINTF(("%s: missed Tx completion: tail=%d idx=%d\n",
3580 			    __func__, ring->tail, idx));
3581 			iwx_txd_done(sc, txd);
3582 			iwx_tx_update_byte_tbl(ring, idx, 0, 0);
3583 			ring->queued--;
3584 		}
3585 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
3586 	}
3587 
3588 	if (--ring->queued < IWX_TX_RING_LOMARK) {
3589 		sc->qfullmsk &= ~(1 << ring->qid);
3590 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
3591 			ifq_clr_oactive(&ifp->if_snd);
3592 			/*
3593 			 * Well, we're in interrupt context, but then again
3594 			 * I guess net80211 does all sorts of stunts in
3595 			 * interrupt context, so maybe this is no biggie.
3596 			 */
3597 			(*ifp->if_start)(ifp);
3598 		}
3599 	}
3600 }
3601 
3602 void
3603 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3604     struct iwx_rx_data *data)
3605 {
3606 	struct ieee80211com *ic = &sc->sc_ic;
3607 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
3608 	uint32_t missed;
3609 
3610 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
3611 	    (ic->ic_state != IEEE80211_S_RUN))
3612 		return;
3613 
3614 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3615 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
3616 
3617 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
3618 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
3619 		if (ic->ic_if.if_flags & IFF_DEBUG)
3620 			printf("%s: receiving no beacons from %s; checking if "
3621 			    "this AP is still responding to probe requests\n",
3622 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
3623 		/*
3624 		 * Rather than go directly to scan state, try to send a
3625 		 * directed probe request first. If that fails then the
3626 		 * state machine will drop us into scanning after timing
3627 		 * out waiting for a probe response.
3628 		 */
3629 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
3630 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
3631 	}
3632 
3633 }
3634 
3635 int
3636 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
3637 {
3638 	struct iwx_binding_cmd cmd;
3639 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
3640 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
3641 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
3642 	uint32_t status;
3643 
3644 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
3645 		panic("binding already added");
3646 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
3647 		panic("binding already removed");
3648 
3649 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
3650 		return EINVAL;
3651 
3652 	memset(&cmd, 0, sizeof(cmd));
3653 
3654 	cmd.id_and_color
3655 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3656 	cmd.action = htole32(action);
3657 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3658 
3659 	cmd.macs[0] = htole32(mac_id);
3660 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
3661 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
3662 
3663 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
3664 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
3665 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
3666 	else
3667 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
3668 
3669 	status = 0;
3670 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
3671 	    &cmd, &status);
3672 	if (err == 0 && status != 0)
3673 		err = EIO;
3674 
3675 	return err;
3676 }
3677 
3678 int
3679 iwx_phy_ctxt_cmd_uhb(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
3680     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3681     uint32_t apply_time)
3682 {
3683 	struct ieee80211com *ic = &sc->sc_ic;
3684 	struct iwx_phy_context_cmd_uhb cmd;
3685 	uint8_t active_cnt, idle_cnt;
3686 	struct ieee80211_channel *chan = ctxt->channel;
3687 
3688 	memset(&cmd, 0, sizeof(cmd));
3689 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
3690 	    ctxt->color));
3691 	cmd.action = htole32(action);
3692 	cmd.apply_time = htole32(apply_time);
3693 
3694 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3695 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
3696 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
3697 	cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
3698 	cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
3699 
3700 	idle_cnt = chains_static;
3701 	active_cnt = chains_dynamic;
3702 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
3703 					IWX_PHY_RX_CHAIN_VALID_POS);
3704 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
3705 	cmd.rxchain_info |= htole32(active_cnt <<
3706 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
3707 	cmd.txchain_info = htole32(iwx_fw_valid_tx_ant(sc));
3708 
3709 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
3710 }
3711 
3712 int
3713 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
3714     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3715     uint32_t apply_time)
3716 {
3717 	struct ieee80211com *ic = &sc->sc_ic;
3718 	struct iwx_phy_context_cmd cmd;
3719 	uint8_t active_cnt, idle_cnt;
3720 	struct ieee80211_channel *chan = ctxt->channel;
3721 
3722 	/*
3723 	 * Intel increased the size of the fw_channel_info struct and neglected
3724 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
3725 	 * member in the middle.
3726 	 * To keep things simple we use a separate function to handle the larger
3727 	 * variant of the phy context command.
3728 	 */
3729 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
3730 		return iwx_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
3731 		    chains_dynamic, action, apply_time);
3732 
3733 	memset(&cmd, 0, sizeof(cmd));
3734 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
3735 	    ctxt->color));
3736 	cmd.action = htole32(action);
3737 	cmd.apply_time = htole32(apply_time);
3738 
3739 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3740 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
3741 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
3742 	cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
3743 	cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
3744 
3745 	idle_cnt = chains_static;
3746 	active_cnt = chains_dynamic;
3747 	cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
3748 					IWX_PHY_RX_CHAIN_VALID_POS);
3749 	cmd.rxchain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
3750 	cmd.rxchain_info |= htole32(active_cnt <<
3751 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
3752 	cmd.txchain_info = htole32(iwx_fw_valid_tx_ant(sc));
3753 
3754 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
3755 }
3756 
3757 int
3758 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
3759 {
3760 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
3761 	struct iwx_tfh_tfd *desc;
3762 	struct iwx_tx_data *txdata;
3763 	struct iwx_device_cmd *cmd;
3764 	struct mbuf *m;
3765 	bus_addr_t paddr;
3766 	uint64_t addr;
3767 	int err = 0, i, paylen, off, s;
3768 	int idx, code, async, group_id;
3769 	size_t hdrlen, datasz;
3770 	uint8_t *data;
3771 	int generation = sc->sc_generation;
3772 
3773 	code = hcmd->id;
3774 	async = hcmd->flags & IWX_CMD_ASYNC;
3775 	idx = ring->cur;
3776 
3777 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
3778 		paylen += hcmd->len[i];
3779 	}
3780 
3781 	/* If this command waits for a response, allocate response buffer. */
3782 	hcmd->resp_pkt = NULL;
3783 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
3784 		uint8_t *resp_buf;
3785 		KASSERT(!async);
3786 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
3787 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
3788 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
3789 			return ENOSPC;
3790 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
3791 		    M_NOWAIT | M_ZERO);
3792 		if (resp_buf == NULL)
3793 			return ENOMEM;
3794 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
3795 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
3796 	} else {
3797 		sc->sc_cmd_resp_pkt[idx] = NULL;
3798 	}
3799 
3800 	s = splnet();
3801 
3802 	desc = &ring->desc[idx];
3803 	txdata = &ring->data[idx];
3804 
3805 	group_id = iwx_cmd_groupid(code);
3806 	if (group_id != 0) {
3807 		hdrlen = sizeof(cmd->hdr_wide);
3808 		datasz = sizeof(cmd->data_wide);
3809 	} else {
3810 		hdrlen = sizeof(cmd->hdr);
3811 		datasz = sizeof(cmd->data);
3812 	}
3813 
3814 	if (paylen > datasz) {
3815 		/* Command is too large to fit in pre-allocated space. */
3816 		size_t totlen = hdrlen + paylen;
3817 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
3818 			printf("%s: firmware command too long (%zd bytes)\n",
3819 			    DEVNAME(sc), totlen);
3820 			err = EINVAL;
3821 			goto out;
3822 		}
3823 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
3824 		if (m == NULL) {
3825 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
3826 			    DEVNAME(sc), totlen);
3827 			err = ENOMEM;
3828 			goto out;
3829 		}
3830 		cmd = mtod(m, struct iwx_device_cmd *);
3831 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3832 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3833 		if (err) {
3834 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
3835 			    DEVNAME(sc), totlen);
3836 			m_freem(m);
3837 			goto out;
3838 		}
3839 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
3840 		paddr = txdata->map->dm_segs[0].ds_addr;
3841 	} else {
3842 		cmd = &ring->cmd[idx];
3843 		paddr = txdata->cmd_paddr;
3844 	}
3845 
3846 	if (group_id != 0) {
3847 		cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
3848 		cmd->hdr_wide.group_id = group_id;
3849 		cmd->hdr_wide.qid = ring->qid;
3850 		cmd->hdr_wide.idx = idx;
3851 		cmd->hdr_wide.length = htole16(paylen);
3852 		cmd->hdr_wide.version = iwx_cmd_version(code);
3853 		data = cmd->data_wide;
3854 	} else {
3855 		cmd->hdr.code = code;
3856 		cmd->hdr.flags = 0;
3857 		cmd->hdr.qid = ring->qid;
3858 		cmd->hdr.idx = idx;
3859 		data = cmd->data;
3860 	}
3861 
3862 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
3863 		if (hcmd->len[i] == 0)
3864 			continue;
3865 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3866 		off += hcmd->len[i];
3867 	}
3868 	KASSERT(off == paylen);
3869 
3870 	desc->tbs[0].tb_len = htole16(hdrlen + paylen);
3871 	addr = htole64((uint64_t)paddr);
3872 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
3873 	desc->num_tbs = 1;
3874 
3875 	if (paylen > datasz) {
3876 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3877 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3878 	} else {
3879 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3880 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3881 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3882 	}
3883 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3884 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3885 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
3886 	/* Kick command ring. */
3887 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
3888 	ring->queued++;
3889 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
3890 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
3891 
3892 	if (!async) {
3893 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
3894 		if (err == 0) {
3895 			/* if hardware is no longer up, return error */
3896 			if (generation != sc->sc_generation) {
3897 				err = ENXIO;
3898 				goto out;
3899 			}
3900 
3901 			/* Response buffer will be freed in iwx_free_resp(). */
3902 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
3903 			sc->sc_cmd_resp_pkt[idx] = NULL;
3904 		} else if (generation == sc->sc_generation) {
3905 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
3906 			    sc->sc_cmd_resp_len[idx]);
3907 			sc->sc_cmd_resp_pkt[idx] = NULL;
3908 		}
3909 	}
3910  out:
3911 	splx(s);
3912 
3913 	return err;
3914 }
3915 
3916 int
3917 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
3918     uint16_t len, const void *data)
3919 {
3920 	struct iwx_host_cmd cmd = {
3921 		.id = id,
3922 		.len = { len, },
3923 		.data = { data, },
3924 		.flags = flags,
3925 	};
3926 
3927 	return iwx_send_cmd(sc, &cmd);
3928 }
3929 
3930 int
3931 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
3932     uint32_t *status)
3933 {
3934 	struct iwx_rx_packet *pkt;
3935 	struct iwx_cmd_response *resp;
3936 	int err, resp_len;
3937 
3938 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
3939 	cmd->flags |= IWX_CMD_WANT_RESP;
3940 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
3941 
3942 	err = iwx_send_cmd(sc, cmd);
3943 	if (err)
3944 		return err;
3945 
3946 	pkt = cmd->resp_pkt;
3947 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
3948 		return EIO;
3949 
3950 	resp_len = iwx_rx_packet_payload_len(pkt);
3951 	if (resp_len != sizeof(*resp)) {
3952 		iwx_free_resp(sc, cmd);
3953 		return EIO;
3954 	}
3955 
3956 	resp = (void *)pkt->data;
3957 	*status = le32toh(resp->status);
3958 	iwx_free_resp(sc, cmd);
3959 	return err;
3960 }
3961 
3962 int
3963 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
3964     const void *data, uint32_t *status)
3965 {
3966 	struct iwx_host_cmd cmd = {
3967 		.id = id,
3968 		.len = { len, },
3969 		.data = { data, },
3970 	};
3971 
3972 	return iwx_send_cmd_status(sc, &cmd, status);
3973 }
3974 
3975 void
3976 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
3977 {
3978 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
3979 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
3980 	hcmd->resp_pkt = NULL;
3981 }
3982 
3983 void
3984 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
3985 {
3986 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
3987 	struct iwx_tx_data *data;
3988 
3989 	if (qid != IWX_DQA_CMD_QUEUE) {
3990 		return;	/* Not a command ack. */
3991 	}
3992 
3993 	data = &ring->data[idx];
3994 
3995 	if (data->m != NULL) {
3996 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3997 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3998 		bus_dmamap_unload(sc->sc_dmat, data->map);
3999 		m_freem(data->m);
4000 		data->m = NULL;
4001 	}
4002 	wakeup(&ring->desc[idx]);
4003 
4004 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
4005 	if (ring->queued == 0) {
4006 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
4007 			DEVNAME(sc), code));
4008 	} else if (ring->queued > 0)
4009 		ring->queued--;
4010 }
4011 
4012 /*
4013  * Fill in various bit for management frames, and leave them
4014  * unfilled for data frames (firmware takes care of that).
4015  * Return the selected TX rate.
4016  */
4017 const struct iwx_rate *
4018 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
4019     struct ieee80211_frame *wh, struct iwx_tx_cmd_gen2 *tx)
4020 {
4021 	struct ieee80211com *ic = &sc->sc_ic;
4022 	struct ieee80211_node *ni = &in->in_ni;
4023 	struct ieee80211_rateset *rs = &ni->ni_rates;
4024 	const struct iwx_rate *rinfo;
4025 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4026 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
4027 	int ridx, rate_flags;
4028 	uint32_t flags = 0;
4029 
4030 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4031 	    type != IEEE80211_FC0_TYPE_DATA) {
4032 		/* for non-data, use the lowest supported rate */
4033 		ridx = min_ridx;
4034 		flags |= IWX_TX_FLAGS_CMD_RATE;
4035 	} else if (ic->ic_fixed_mcs != -1) {
4036 		ridx = sc->sc_fixed_ridx;
4037 		flags |= IWX_TX_FLAGS_CMD_RATE;
4038 	} else if (ic->ic_fixed_rate != -1) {
4039 		ridx = sc->sc_fixed_ridx;
4040 		flags |= IWX_TX_FLAGS_CMD_RATE;
4041 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
4042 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
4043 	} else {
4044 		uint8_t rval;
4045 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
4046 		ridx = iwx_rval2ridx(rval);
4047 		if (ridx < min_ridx)
4048 			ridx = min_ridx;
4049 	}
4050 
4051 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
4052 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
4053 		flags |= IWX_TX_FLAGS_HIGH_PRI;
4054 	tx->flags = htole32(flags);
4055 
4056 	rinfo = &iwx_rates[ridx];
4057 	if (iwx_is_mimo_ht_plcp(rinfo->ht_plcp))
4058 		rate_flags = IWX_RATE_MCS_ANT_AB_MSK;
4059 	else
4060 		rate_flags = IWX_RATE_MCS_ANT_A_MSK;
4061 	if (IWX_RIDX_IS_CCK(ridx))
4062 		rate_flags |= IWX_RATE_MCS_CCK_MSK;
4063 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4064 	    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
4065 		rate_flags |= IWX_RATE_MCS_HT_MSK;
4066 		if (ieee80211_node_supports_ht_sgi20(ni))
4067 			rate_flags |= IWX_RATE_MCS_SGI_MSK;
4068 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4069 	} else
4070 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4071 
4072 	return rinfo;
4073 }
4074 
4075 void
4076 iwx_tx_update_byte_tbl(struct iwx_tx_ring *txq, int idx, uint16_t byte_cnt,
4077     uint16_t num_tbs)
4078 {
4079 	uint8_t filled_tfd_size, num_fetch_chunks;
4080 	uint16_t len = byte_cnt;
4081 	uint16_t bc_ent;
4082 	struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
4083 
4084 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
4085 			  num_tbs * sizeof(struct iwx_tfh_tb);
4086 	/*
4087 	 * filled_tfd_size contains the number of filled bytes in the TFD.
4088 	 * Dividing it by 64 will give the number of chunks to fetch
4089 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
4090 	 * If, for example, TFD contains only 3 TBs then 32 bytes
4091 	 * of the TFD are used, and only one chunk of 64 bytes should
4092 	 * be fetched
4093 	 */
4094 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
4095 
4096 	/* Before AX210, the HW expects DW */
4097 	len = howmany(len, 4);
4098 	bc_ent = htole16(len | (num_fetch_chunks << 12));
4099 	scd_bc_tbl->tfd_offset[idx] = bc_ent;
4100 }
4101 
4102 int
4103 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4104 {
4105 	struct ieee80211com *ic = &sc->sc_ic;
4106 	struct iwx_node *in = (void *)ni;
4107 	struct iwx_tx_ring *ring;
4108 	struct iwx_tx_data *data;
4109 	struct iwx_tfh_tfd *desc;
4110 	struct iwx_device_cmd *cmd;
4111 	struct iwx_tx_cmd_gen2 *tx;
4112 	struct ieee80211_frame *wh;
4113 	struct ieee80211_key *k = NULL;
4114 	const struct iwx_rate *rinfo;
4115 	uint64_t paddr;
4116 	u_int hdrlen;
4117 	bus_dma_segment_t *seg;
4118 	uint16_t num_tbs;
4119 	uint8_t type;
4120 	int i, totlen, err, pad;
4121 
4122 	wh = mtod(m, struct ieee80211_frame *);
4123 	hdrlen = ieee80211_get_hdrlen(wh);
4124 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4125 
4126 	/*
4127 	 * Map EDCA categories to Tx data queues.
4128 	 *
4129 	 * We use static data queue assignments even in DQA mode. We do not
4130 	 * need to share Tx queues between stations because we only implement
4131 	 * client mode; the firmware's station table contains only one entry
4132 	 * which represents our access point.
4133 	 *
4134 	 * Tx aggregation will require additional queues (one queue per TID
4135 	 * for which aggregation is enabled) but we do not implement this yet.
4136 	 */
4137 	ring = &sc->txq[ac + IWX_DQA_AUX_QUEUE + 1];
4138 	desc = &ring->desc[ring->cur];
4139 	memset(desc, 0, sizeof(*desc));
4140 	data = &ring->data[ring->cur];
4141 
4142 	cmd = &ring->cmd[ring->cur];
4143 	cmd->hdr.code = IWX_TX_CMD;
4144 	cmd->hdr.flags = 0;
4145 	cmd->hdr.qid = ring->qid;
4146 	cmd->hdr.idx = ring->cur;
4147 
4148 	tx = (void *)cmd->data;
4149 	memset(tx, 0, sizeof(*tx));
4150 
4151 	rinfo = iwx_tx_fill_cmd(sc, in, wh, tx);
4152 
4153 #if NBPFILTER > 0
4154 	if (sc->sc_drvbpf != NULL) {
4155 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
4156 		uint16_t chan_flags;
4157 
4158 		tap->wt_flags = 0;
4159 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4160 		chan_flags = ni->ni_chan->ic_flags;
4161 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4162 			chan_flags &= ~IEEE80211_CHAN_HT;
4163 		tap->wt_chan_flags = htole16(chan_flags);
4164 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4165 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4166 		    type == IEEE80211_FC0_TYPE_DATA &&
4167 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
4168 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4169 		} else
4170 			tap->wt_rate = rinfo->rate;
4171 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4172 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4173 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4174 
4175 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
4176 		    m, BPF_DIRECTION_OUT);
4177 	}
4178 #endif
4179 
4180 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4181                 k = ieee80211_get_txkey(ic, wh, ni);
4182 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
4183 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
4184 				return ENOBUFS;
4185 			/* 802.11 header may have moved. */
4186 			wh = mtod(m, struct ieee80211_frame *);
4187 			tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
4188 		} else {
4189 			k->k_tsc++;
4190 			/* Hardware increments PN internally and adds IV. */
4191 		}
4192 	} else
4193 		tx->flags |= htole32(IWX_TX_FLAGS_ENCRYPT_DIS);
4194 
4195 	totlen = m->m_pkthdr.len;
4196 
4197 	if (hdrlen & 3) {
4198 		/* First segment length must be a multiple of 4. */
4199 		pad = 4 - (hdrlen & 3);
4200 		tx->offload_assist |= htole16(IWX_TX_CMD_OFFLD_PAD);
4201 	} else
4202 		pad = 0;
4203 
4204 	tx->len = htole16(totlen);
4205 
4206 	/* Copy 802.11 header in TX command. */
4207 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4208 
4209 	/* Trim 802.11 header. */
4210 	m_adj(m, hdrlen);
4211 
4212 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4213 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4214 	if (err && err != EFBIG) {
4215 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
4216 		m_freem(m);
4217 		return err;
4218 	}
4219 	if (err) {
4220 		/* Too many DMA segments, linearize mbuf. */
4221 		if (m_defrag(m, M_DONTWAIT)) {
4222 			m_freem(m);
4223 			return ENOBUFS;
4224 		}
4225 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4226 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4227 		if (err) {
4228 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
4229 			    err);
4230 			m_freem(m);
4231 			return err;
4232 		}
4233 	}
4234 	data->m = m;
4235 	data->in = in;
4236 
4237 	/* Fill TX descriptor. */
4238 	num_tbs = 2 + data->map->dm_nsegs;
4239 	desc->num_tbs = htole16(num_tbs);
4240 
4241 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
4242 	paddr = htole64(data->cmd_paddr);
4243 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
4244 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
4245 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
4246 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
4247 	    sizeof(*tx) + hdrlen + pad - IWX_FIRST_TB_SIZE);
4248 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
4249 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
4250 
4251 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
4252 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
4253 
4254 	/* Other DMA segments are for data payload. */
4255 	seg = data->map->dm_segs;
4256 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4257 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
4258 		paddr = htole64(seg->ds_addr);
4259 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
4260 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
4261 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
4262 	}
4263 
4264 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4265 	    BUS_DMASYNC_PREWRITE);
4266 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4267 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4268 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4269 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4270 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4271 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4272 
4273 	iwx_tx_update_byte_tbl(ring, ring->cur, totlen, num_tbs);
4274 
4275 	/* Kick TX ring. */
4276 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
4277 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur);
4278 
4279 	/* Mark TX ring as full if we reach a certain threshold. */
4280 	if (++ring->queued > IWX_TX_RING_HIMARK) {
4281 		sc->qfullmsk |= 1 << ring->qid;
4282 	}
4283 
4284 	return 0;
4285 }
4286 
4287 int
4288 iwx_flush_tx_path(struct iwx_softc *sc)
4289 {
4290 	struct iwx_tx_path_flush_cmd flush_cmd = {
4291 		.sta_id = htole32(IWX_STATION_ID),
4292 		.tid_mask = htole16(0xffff),
4293 	};
4294 	int err;
4295 
4296 	err = iwx_send_cmd_pdu(sc, IWX_TXPATH_FLUSH, 0,
4297 	    sizeof(flush_cmd), &flush_cmd);
4298 	if (err)
4299                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
4300 	return err;
4301 }
4302 
4303 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
4304 
4305 int
4306 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
4307     struct iwx_beacon_filter_cmd *cmd)
4308 {
4309 	size_t len;
4310 
4311 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_BEACON_FILTER_V4))
4312 		len = sizeof(struct iwx_beacon_filter_cmd);
4313 	else
4314 		len = offsetof(struct iwx_beacon_filter_cmd,
4315 		    bf_threshold_absolute_low);
4316 
4317 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
4318 	    0, len, cmd);
4319 }
4320 
4321 int
4322 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
4323 {
4324 	struct iwx_beacon_filter_cmd cmd = {
4325 		IWX_BF_CMD_CONFIG_DEFAULTS,
4326 		.bf_enable_beacon_filter = htole32(1),
4327 		.ba_enable_beacon_abort = htole32(enable),
4328 	};
4329 
4330 	if (!sc->sc_bf.bf_enabled)
4331 		return 0;
4332 
4333 	sc->sc_bf.ba_enabled = enable;
4334 	return iwx_beacon_filter_send_cmd(sc, &cmd);
4335 }
4336 
4337 void
4338 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
4339     struct iwx_mac_power_cmd *cmd)
4340 {
4341 	struct ieee80211com *ic = &sc->sc_ic;
4342 	struct ieee80211_node *ni = &in->in_ni;
4343 	int dtim_period, dtim_msec, keep_alive;
4344 
4345 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
4346 	    in->in_color));
4347 	if (ni->ni_dtimperiod)
4348 		dtim_period = ni->ni_dtimperiod;
4349 	else
4350 		dtim_period = 1;
4351 
4352 	/*
4353 	 * Regardless of power management state the driver must set
4354 	 * keep alive period. FW will use it for sending keep alive NDPs
4355 	 * immediately after association. Check that keep alive period
4356 	 * is at least 3 * DTIM.
4357 	 */
4358 	dtim_msec = dtim_period * ni->ni_intval;
4359 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
4360 	keep_alive = roundup(keep_alive, 1000) / 1000;
4361 	cmd->keep_alive_seconds = htole16(keep_alive);
4362 
4363 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
4364 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4365 }
4366 
4367 int
4368 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
4369 {
4370 	int err;
4371 	int ba_enable;
4372 	struct iwx_mac_power_cmd cmd;
4373 
4374 	memset(&cmd, 0, sizeof(cmd));
4375 
4376 	iwx_power_build_cmd(sc, in, &cmd);
4377 
4378 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
4379 	    sizeof(cmd), &cmd);
4380 	if (err != 0)
4381 		return err;
4382 
4383 	ba_enable = !!(cmd.flags &
4384 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4385 	return iwx_update_beacon_abort(sc, in, ba_enable);
4386 }
4387 
4388 int
4389 iwx_power_update_device(struct iwx_softc *sc)
4390 {
4391 	struct iwx_device_power_cmd cmd = { };
4392 	struct ieee80211com *ic = &sc->sc_ic;
4393 
4394 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
4395 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4396 
4397 	return iwx_send_cmd_pdu(sc,
4398 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4399 }
4400 
4401 int
4402 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
4403 {
4404 	struct iwx_beacon_filter_cmd cmd = {
4405 		IWX_BF_CMD_CONFIG_DEFAULTS,
4406 		.bf_enable_beacon_filter = htole32(1),
4407 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
4408 	};
4409 	int err;
4410 
4411 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
4412 	if (err == 0)
4413 		sc->sc_bf.bf_enabled = 1;
4414 
4415 	return err;
4416 }
4417 
4418 int
4419 iwx_disable_beacon_filter(struct iwx_softc *sc)
4420 {
4421 	struct iwx_beacon_filter_cmd cmd;
4422 	int err;
4423 
4424 	memset(&cmd, 0, sizeof(cmd));
4425 
4426 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
4427 	if (err == 0)
4428 		sc->sc_bf.bf_enabled = 0;
4429 
4430 	return err;
4431 }
4432 
4433 int
4434 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
4435 {
4436 	struct iwx_add_sta_cmd add_sta_cmd;
4437 	int err;
4438 	uint32_t status;
4439 	struct ieee80211com *ic = &sc->sc_ic;
4440 
4441 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
4442 		panic("STA already added");
4443 
4444 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4445 
4446 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4447 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
4448 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
4449 	} else {
4450 		add_sta_cmd.sta_id = IWX_STATION_ID;
4451 		add_sta_cmd.station_type = IWX_STA_LINK;
4452 	}
4453 	add_sta_cmd.mac_id_n_color
4454 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4455 	if (!update) {
4456 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
4457 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
4458 			    etheranyaddr);
4459 		else
4460 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
4461 			    in->in_ni.ni_bssid);
4462 	}
4463 	add_sta_cmd.add_modify = update ? 1 : 0;
4464 	add_sta_cmd.station_flags_msk
4465 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
4466 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
4467 	if (update)
4468 		add_sta_cmd.modify_mask |= (IWX_STA_MODIFY_TID_DISABLE_TX);
4469 
4470 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4471 		add_sta_cmd.station_flags_msk
4472 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
4473 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
4474 
4475 		add_sta_cmd.station_flags
4476 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_64K);
4477 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4478 		case IEEE80211_AMPDU_PARAM_SS_2:
4479 			add_sta_cmd.station_flags
4480 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
4481 			break;
4482 		case IEEE80211_AMPDU_PARAM_SS_4:
4483 			add_sta_cmd.station_flags
4484 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
4485 			break;
4486 		case IEEE80211_AMPDU_PARAM_SS_8:
4487 			add_sta_cmd.station_flags
4488 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
4489 			break;
4490 		case IEEE80211_AMPDU_PARAM_SS_16:
4491 			add_sta_cmd.station_flags
4492 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
4493 			break;
4494 		default:
4495 			break;
4496 		}
4497 	}
4498 
4499 	status = IWX_ADD_STA_SUCCESS;
4500 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
4501 	    &add_sta_cmd, &status);
4502 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
4503 		err = EIO;
4504 
4505 	return err;
4506 }
4507 
4508 int
4509 iwx_add_aux_sta(struct iwx_softc *sc)
4510 {
4511 	struct iwx_add_sta_cmd cmd;
4512 	int err, qid = IWX_DQA_AUX_QUEUE;
4513 	uint32_t status;
4514 
4515 	memset(&cmd, 0, sizeof(cmd));
4516 	cmd.sta_id = IWX_AUX_STA_ID;
4517 	cmd.station_type = IWX_STA_AUX_ACTIVITY;
4518 	cmd.mac_id_n_color =
4519 	    htole32(IWX_FW_CMD_ID_AND_COLOR(IWX_MAC_INDEX_AUX, 0));
4520 	cmd.tid_disable_tx = htole16(0xffff);
4521 
4522 	status = IWX_ADD_STA_SUCCESS;
4523 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
4524 	    &status);
4525 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
4526 		return EIO;
4527 
4528 	return iwx_enable_txq(sc, IWX_AUX_STA_ID, qid, IWX_MGMT_TID,
4529 	    IWX_TX_RING_COUNT);
4530 }
4531 
4532 int
4533 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
4534 {
4535 	struct ieee80211com *ic = &sc->sc_ic;
4536 	struct iwx_rm_sta_cmd rm_sta_cmd;
4537 	int err;
4538 
4539 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
4540 		panic("sta already removed");
4541 
4542 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
4543 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
4544 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
4545 	else
4546 		rm_sta_cmd.sta_id = IWX_STATION_ID;
4547 
4548 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
4549 	    &rm_sta_cmd);
4550 
4551 	return err;
4552 }
4553 
4554 uint8_t
4555 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
4556     struct iwx_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
4557 {
4558 	struct ieee80211com *ic = &sc->sc_ic;
4559 	struct ieee80211_channel *c;
4560 	uint8_t nchan;
4561 
4562 	for (nchan = 0, c = &ic->ic_channels[1];
4563 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4564 	    nchan < sc->sc_capa_n_scan_channels;
4565 	    c++) {
4566 		uint8_t channel_num;
4567 
4568 		if (c->ic_flags == 0)
4569 			continue;
4570 
4571 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4572 		if (isset(sc->sc_ucode_api,
4573 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
4574 			chan->v2.channel_num = channel_num;
4575 			if (IEEE80211_IS_CHAN_2GHZ(c))
4576 				chan->v2.band = IWX_PHY_BAND_24;
4577 			else
4578 				chan->v2.band = IWX_PHY_BAND_5;
4579 			chan->v2.iter_count = 1;
4580 			chan->v2.iter_interval = 0;
4581 		} else {
4582 			chan->v1.channel_num = channel_num;
4583 			chan->v1.iter_count = 1;
4584 			chan->v1.iter_interval = htole16(0);
4585 		}
4586 		if (n_ssids != 0 && !bgscan)
4587 			chan->flags = htole32(1 << 0); /* select SSID 0 */
4588 		chan++;
4589 		nchan++;
4590 	}
4591 
4592 	return nchan;
4593 }
4594 
4595 int
4596 iwx_fill_probe_req_v1(struct iwx_softc *sc, struct iwx_scan_probe_req_v1 *preq1)
4597 {
4598 	struct iwx_scan_probe_req preq2;
4599 	int err, i;
4600 
4601 	err = iwx_fill_probe_req(sc, &preq2);
4602 	if (err)
4603 		return err;
4604 
4605 	preq1->mac_header = preq2.mac_header;
4606 	for (i = 0; i < nitems(preq1->band_data); i++)
4607 		preq1->band_data[i] = preq2.band_data[i];
4608 	preq1->common_data = preq2.common_data;
4609 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
4610 	return 0;
4611 }
4612 
4613 int
4614 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
4615 {
4616 	struct ieee80211com *ic = &sc->sc_ic;
4617 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4618 	struct ieee80211_rateset *rs;
4619 	size_t remain = sizeof(preq->buf);
4620 	uint8_t *frm, *pos;
4621 
4622 	memset(preq, 0, sizeof(*preq));
4623 
4624 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4625 		return ENOBUFS;
4626 
4627 	/*
4628 	 * Build a probe request frame.  Most of the following code is a
4629 	 * copy & paste of what is done in net80211.
4630 	 */
4631 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4632 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4633 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4634 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4635 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4636 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4637 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
4638 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
4639 
4640 	frm = (uint8_t *)(wh + 1);
4641 	*frm++ = IEEE80211_ELEMID_SSID;
4642 	*frm++ = 0;
4643 	/* hardware inserts SSID */
4644 
4645 	/* Tell the firmware where the MAC header is. */
4646 	preq->mac_header.offset = 0;
4647 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4648 	remain -= frm - (uint8_t *)wh;
4649 
4650 	/* Fill in 2GHz IEs and tell firmware where they are. */
4651 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4652 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4653 		if (remain < 4 + rs->rs_nrates)
4654 			return ENOBUFS;
4655 	} else if (remain < 2 + rs->rs_nrates)
4656 		return ENOBUFS;
4657 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4658 	pos = frm;
4659 	frm = ieee80211_add_rates(frm, rs);
4660 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4661 		frm = ieee80211_add_xrates(frm, rs);
4662 	remain -= frm - pos;
4663 
4664 	if (isset(sc->sc_enabled_capa,
4665 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4666 		if (remain < 3)
4667 			return ENOBUFS;
4668 		*frm++ = IEEE80211_ELEMID_DSPARMS;
4669 		*frm++ = 1;
4670 		*frm++ = 0;
4671 		remain -= 3;
4672 	}
4673 	preq->band_data[0].len = htole16(frm - pos);
4674 
4675 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4676 		/* Fill in 5GHz IEs. */
4677 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4678 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4679 			if (remain < 4 + rs->rs_nrates)
4680 				return ENOBUFS;
4681 		} else if (remain < 2 + rs->rs_nrates)
4682 			return ENOBUFS;
4683 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4684 		pos = frm;
4685 		frm = ieee80211_add_rates(frm, rs);
4686 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4687 			frm = ieee80211_add_xrates(frm, rs);
4688 		preq->band_data[1].len = htole16(frm - pos);
4689 		remain -= frm - pos;
4690 	}
4691 
4692 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
4693 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4694 	pos = frm;
4695 	if (ic->ic_flags & IEEE80211_F_HTON) {
4696 		if (remain < 28)
4697 			return ENOBUFS;
4698 		frm = ieee80211_add_htcaps(frm, ic);
4699 		/* XXX add WME info? */
4700 	}
4701 	preq->common_data.len = htole16(frm - pos);
4702 
4703 	return 0;
4704 }
4705 
4706 int
4707 iwx_config_umac_scan(struct iwx_softc *sc)
4708 {
4709 	struct ieee80211com *ic = &sc->sc_ic;
4710 	struct iwx_scan_config *scan_config;
4711 	int err, nchan;
4712 	size_t cmd_size;
4713 	struct ieee80211_channel *c;
4714 	struct iwx_host_cmd hcmd = {
4715 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
4716 		.flags = 0,
4717 	};
4718 	static const uint32_t rates = (IWX_SCAN_CONFIG_RATE_1M |
4719 	    IWX_SCAN_CONFIG_RATE_2M | IWX_SCAN_CONFIG_RATE_5M |
4720 	    IWX_SCAN_CONFIG_RATE_11M | IWX_SCAN_CONFIG_RATE_6M |
4721 	    IWX_SCAN_CONFIG_RATE_9M | IWX_SCAN_CONFIG_RATE_12M |
4722 	    IWX_SCAN_CONFIG_RATE_18M | IWX_SCAN_CONFIG_RATE_24M |
4723 	    IWX_SCAN_CONFIG_RATE_36M | IWX_SCAN_CONFIG_RATE_48M |
4724 	    IWX_SCAN_CONFIG_RATE_54M);
4725 
4726 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
4727 
4728 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
4729 	if (scan_config == NULL)
4730 		return ENOMEM;
4731 
4732 	scan_config->tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
4733 	scan_config->rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
4734 	scan_config->legacy_rates = htole32(rates |
4735 	    IWX_SCAN_CONFIG_SUPPORTED_RATE(rates));
4736 
4737 	/* These timings correspond to iwlwifi's UNASSOC scan. */
4738 	scan_config->dwell.active = 10;
4739 	scan_config->dwell.passive = 110;
4740 	scan_config->dwell.fragmented = 44;
4741 	scan_config->dwell.extended = 90;
4742 	scan_config->out_of_channel_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
4743 	scan_config->out_of_channel_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
4744 	scan_config->suspend_time[IWX_SCAN_LB_LMAC_IDX] = htole32(0);
4745 	scan_config->suspend_time[IWX_SCAN_HB_LMAC_IDX] = htole32(0);
4746 
4747 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
4748 
4749 	scan_config->bcast_sta_id = IWX_AUX_STA_ID;
4750 	scan_config->channel_flags = 0;
4751 
4752 	for (c = &ic->ic_channels[1], nchan = 0;
4753 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4754 	    nchan < sc->sc_capa_n_scan_channels; c++) {
4755 		if (c->ic_flags == 0)
4756 			continue;
4757 		scan_config->channel_array[nchan++] =
4758 		    ieee80211_mhz2ieee(c->ic_freq, 0);
4759 	}
4760 
4761 	scan_config->flags = htole32(IWX_SCAN_CONFIG_FLAG_ACTIVATE |
4762 	    IWX_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
4763 	    IWX_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
4764 	    IWX_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
4765 	    IWX_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
4766 	    IWX_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
4767 	    IWX_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
4768 	    IWX_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
4769 	    IWX_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
4770 	    IWX_SCAN_CONFIG_N_CHANNELS(nchan) |
4771 	    IWX_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
4772 
4773 	hcmd.data[0] = scan_config;
4774 	hcmd.len[0] = cmd_size;
4775 
4776 	err = iwx_send_cmd(sc, &hcmd);
4777 	free(scan_config, M_DEVBUF, cmd_size);
4778 	return err;
4779 }
4780 
4781 int
4782 iwx_umac_scan_size(struct iwx_softc *sc)
4783 {
4784 	int base_size = IWX_SCAN_REQ_UMAC_SIZE_V1;
4785 	int tail_size;
4786 
4787 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
4788 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V8;
4789 	else if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
4790 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V7;
4791 #ifdef notyet
4792 	else if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
4793 		base_size = IWX_SCAN_REQ_UMAC_SIZE_V6;
4794 #endif
4795 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
4796 		tail_size = sizeof(struct iwx_scan_req_umac_tail_v2);
4797 	else
4798 		tail_size = sizeof(struct iwx_scan_req_umac_tail_v1);
4799 
4800 	return base_size + sizeof(struct iwx_scan_channel_cfg_umac) *
4801 	    sc->sc_capa_n_scan_channels + tail_size;
4802 }
4803 
4804 struct iwx_scan_umac_chan_param *
4805 iwx_get_scan_req_umac_chan_param(struct iwx_softc *sc,
4806     struct iwx_scan_req_umac *req)
4807 {
4808 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
4809 		return &req->v8.channel;
4810 
4811 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
4812 		return &req->v7.channel;
4813 #ifdef notyet
4814 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
4815 		return &req->v6.channel;
4816 #endif
4817 	return &req->v1.channel;
4818 }
4819 
4820 void *
4821 iwx_get_scan_req_umac_data(struct iwx_softc *sc, struct iwx_scan_req_umac *req)
4822 {
4823 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
4824 		return (void *)&req->v8.data;
4825 
4826 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL))
4827 		return (void *)&req->v7.data;
4828 #ifdef notyet
4829 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_22000)
4830 		return (void *)&req->v6.data;
4831 #endif
4832 	return (void *)&req->v1.data;
4833 
4834 }
4835 
4836 /* adaptive dwell max budget time [TU] for full scan */
4837 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
4838 /* adaptive dwell max budget time [TU] for directed scan */
4839 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
4840 /* adaptive dwell default high band APs number */
4841 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
4842 /* adaptive dwell default low band APs number */
4843 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
4844 /* adaptive dwell default APs number in social channels (1, 6, 11) */
4845 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
4846 
4847 int
4848 iwx_umac_scan(struct iwx_softc *sc, int bgscan)
4849 {
4850 	struct ieee80211com *ic = &sc->sc_ic;
4851 	struct iwx_host_cmd hcmd = {
4852 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
4853 		.len = { 0, },
4854 		.data = { NULL, },
4855 		.flags = 0,
4856 	};
4857 	struct iwx_scan_req_umac *req;
4858 	void *cmd_data, *tail_data;
4859 	struct iwx_scan_req_umac_tail_v2 *tail;
4860 	struct iwx_scan_req_umac_tail_v1 *tailv1;
4861 	struct iwx_scan_umac_chan_param *chanparam;
4862 	size_t req_len;
4863 	int err, async = bgscan;
4864 
4865 	req_len = iwx_umac_scan_size(sc);
4866 	if ((req_len < IWX_SCAN_REQ_UMAC_SIZE_V1 +
4867 	    sizeof(struct iwx_scan_req_umac_tail_v1)) ||
4868 	    req_len > IWX_MAX_CMD_PAYLOAD_SIZE)
4869 		return ERANGE;
4870 	req = malloc(req_len, M_DEVBUF,
4871 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
4872 	if (req == NULL)
4873 		return ENOMEM;
4874 
4875 	hcmd.len[0] = (uint16_t)req_len;
4876 	hcmd.data[0] = (void *)req;
4877 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
4878 
4879 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
4880 		req->v7.adwell_default_n_aps_social =
4881 			IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
4882 		req->v7.adwell_default_n_aps =
4883 			IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
4884 
4885 		if (ic->ic_des_esslen != 0)
4886 			req->v7.adwell_max_budget =
4887 			    htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
4888 		else
4889 			req->v7.adwell_max_budget =
4890 			    htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
4891 
4892 		req->v7.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
4893 		req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = 0;
4894 		req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = 0;
4895 
4896 		if (isset(sc->sc_ucode_api,
4897 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
4898 			req->v8.active_dwell[IWX_SCAN_LB_LMAC_IDX] = 10;
4899 			req->v8.passive_dwell[IWX_SCAN_LB_LMAC_IDX] = 110;
4900 		} else {
4901 			req->v7.active_dwell = 10;
4902 			req->v7.passive_dwell = 110;
4903 			req->v7.fragmented_dwell = 44;
4904 		}
4905 	} else {
4906 		/* These timings correspond to iwlwifi's UNASSOC scan. */
4907 		req->v1.active_dwell = 10;
4908 		req->v1.passive_dwell = 110;
4909 		req->v1.fragmented_dwell = 44;
4910 		req->v1.extended_dwell = 90;
4911 
4912 		req->v1.scan_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
4913 	}
4914 
4915 	if (bgscan) {
4916 		const uint32_t timeout = htole32(120);
4917 		if (isset(sc->sc_ucode_api,
4918 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
4919 			req->v8.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4920 			req->v8.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4921 		} else if (isset(sc->sc_ucode_api,
4922 		    IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
4923 			req->v7.max_out_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4924 			req->v7.suspend_time[IWX_SCAN_LB_LMAC_IDX] = timeout;
4925 		} else {
4926 			req->v1.max_out_time = timeout;
4927 			req->v1.suspend_time = timeout;
4928 		}
4929 	}
4930 
4931 	req->ooc_priority = htole32(IWX_SCAN_PRIORITY_HIGH);
4932 
4933 	cmd_data = iwx_get_scan_req_umac_data(sc, req);
4934 	chanparam = iwx_get_scan_req_umac_chan_param(sc, req);
4935 	chanparam->count = iwx_umac_scan_fill_channels(sc,
4936 	    (struct iwx_scan_channel_cfg_umac *)cmd_data,
4937 	    ic->ic_des_esslen != 0, bgscan);
4938 	chanparam->flags = 0;
4939 
4940 	tail_data = cmd_data + sizeof(struct iwx_scan_channel_cfg_umac) *
4941 	    sc->sc_capa_n_scan_channels;
4942 	tail = tail_data;
4943 	/* tail v1 layout differs in preq and direct_scan member fields. */
4944 	tailv1 = tail_data;
4945 
4946 	req->general_flags = htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
4947 	    IWX_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
4948 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
4949 		req->v8.general_flags2 =
4950 			IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
4951 	}
4952 
4953 #if 0 /* XXX Active scan causes firmware errors after association. */
4954 	/* Check if we're doing an active directed scan. */
4955 	if (ic->ic_des_esslen != 0) {
4956 		if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
4957 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4958 			tail->direct_scan[0].len = ic->ic_des_esslen;
4959 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
4960 			    ic->ic_des_esslen);
4961 		} else {
4962 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4963 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
4964 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
4965 			    ic->ic_des_esslen);
4966 		}
4967 		req->general_flags |=
4968 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
4969 	} else
4970 #endif
4971 		req->general_flags |= htole32(IWX_UMAC_SCAN_GEN_FLAGS_PASSIVE);
4972 
4973 	if (isset(sc->sc_enabled_capa,
4974 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4975 		req->general_flags |=
4976 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
4977 
4978 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_ADAPTIVE_DWELL)) {
4979 		req->general_flags |=
4980 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
4981 	} else {
4982 		req->general_flags |=
4983 		    htole32(IWX_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
4984 	}
4985 
4986 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
4987 		err = iwx_fill_probe_req(sc, &tail->preq);
4988 	else
4989 		err = iwx_fill_probe_req_v1(sc, &tailv1->preq);
4990 	if (err) {
4991 		free(req, M_DEVBUF, req_len);
4992 		return err;
4993 	}
4994 
4995 	/* Specify the scan plan: We'll do one iteration. */
4996 	tail->schedule[0].interval = 0;
4997 	tail->schedule[0].iter_count = 1;
4998 
4999 	err = iwx_send_cmd(sc, &hcmd);
5000 	free(req, M_DEVBUF, req_len);
5001 	return err;
5002 }
5003 
5004 void
5005 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
5006 {
5007 	struct ieee80211com *ic = &sc->sc_ic;
5008 	struct ifnet *ifp = IC2IFP(ic);
5009 	char alpha2[3];
5010 
5011 	snprintf(alpha2, sizeof(alpha2), "%c%c",
5012 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
5013 
5014 	if (ifp->if_flags & IFF_DEBUG) {
5015 		printf("%s: firmware has detected regulatory domain '%s' "
5016 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
5017 	}
5018 
5019 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
5020 }
5021 
5022 uint8_t
5023 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5024 {
5025 	int i;
5026 	uint8_t rval;
5027 
5028 	for (i = 0; i < rs->rs_nrates; i++) {
5029 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5030 		if (rval == iwx_rates[ridx].rate)
5031 			return rs->rs_rates[i];
5032 	}
5033 
5034 	return 0;
5035 }
5036 
5037 int
5038 iwx_rval2ridx(int rval)
5039 {
5040 	int ridx;
5041 
5042 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
5043 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
5044 			continue;
5045 		if (rval == iwx_rates[ridx].rate)
5046 			break;
5047 	}
5048 
5049        return ridx;
5050 }
5051 
5052 void
5053 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
5054     int *ofdm_rates)
5055 {
5056 	struct ieee80211_node *ni = &in->in_ni;
5057 	struct ieee80211_rateset *rs = &ni->ni_rates;
5058 	int lowest_present_ofdm = -1;
5059 	int lowest_present_cck = -1;
5060 	uint8_t cck = 0;
5061 	uint8_t ofdm = 0;
5062 	int i;
5063 
5064 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5065 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5066 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
5067 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5068 				continue;
5069 			cck |= (1 << i);
5070 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5071 				lowest_present_cck = i;
5072 		}
5073 	}
5074 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
5075 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5076 			continue;
5077 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
5078 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5079 			lowest_present_ofdm = i;
5080 	}
5081 
5082 	/*
5083 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5084 	 * variables. This isn't sufficient though, as there might not
5085 	 * be all the right rates in the bitmap. E.g. if the only basic
5086 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5087 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5088 	 *
5089 	 *    [...] a STA responding to a received frame shall transmit
5090 	 *    its Control Response frame [...] at the highest rate in the
5091 	 *    BSSBasicRateSet parameter that is less than or equal to the
5092 	 *    rate of the immediately previous frame in the frame exchange
5093 	 *    sequence ([...]) and that is of the same modulation class
5094 	 *    ([...]) as the received frame. If no rate contained in the
5095 	 *    BSSBasicRateSet parameter meets these conditions, then the
5096 	 *    control frame sent in response to a received frame shall be
5097 	 *    transmitted at the highest mandatory rate of the PHY that is
5098 	 *    less than or equal to the rate of the received frame, and
5099 	 *    that is of the same modulation class as the received frame.
5100 	 *
5101 	 * As a consequence, we need to add all mandatory rates that are
5102 	 * lower than all of the basic rates to these bitmaps.
5103 	 */
5104 
5105 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
5106 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
5107 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
5108 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
5109 	/* 6M already there or needed so always add */
5110 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
5111 
5112 	/*
5113 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5114 	 * Note, however:
5115 	 *  - if no CCK rates are basic, it must be ERP since there must
5116 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5117 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5118 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5119 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5120 	 *  - if 2M is basic, 1M is mandatory
5121 	 *  - if 1M is basic, that's the only valid ACK rate.
5122 	 * As a consequence, it's not as complicated as it sounds, just add
5123 	 * any lower rates to the ACK rate bitmap.
5124 	 */
5125 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
5126 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
5127 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
5128 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
5129 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
5130 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
5131 	/* 1M already there or needed so always add */
5132 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
5133 
5134 	*cck_rates = cck;
5135 	*ofdm_rates = ofdm;
5136 }
5137 
5138 void
5139 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
5140     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
5141 {
5142 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5143 	struct ieee80211com *ic = &sc->sc_ic;
5144 	struct ieee80211_node *ni = ic->ic_bss;
5145 	int cck_ack_rates, ofdm_ack_rates;
5146 	int i;
5147 
5148 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5149 	    in->in_color));
5150 	cmd->action = htole32(action);
5151 
5152 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
5153 		return;
5154 
5155 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5156 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
5157 	else if (ic->ic_opmode == IEEE80211_M_STA)
5158 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
5159 	else
5160 		panic("unsupported operating mode %d\n", ic->ic_opmode);
5161 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
5162 
5163 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5164 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5165 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
5166 		return;
5167 	}
5168 
5169 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5170 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5171 	cmd->cck_rates = htole32(cck_ack_rates);
5172 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5173 
5174 	cmd->cck_short_preamble
5175 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5176 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
5177 	cmd->short_slot
5178 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5179 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
5180 
5181 	for (i = 0; i < EDCA_NUM_AC; i++) {
5182 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
5183 		int txf = iwx_ac_to_tx_fifo[i];
5184 
5185 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
5186 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
5187 		cmd->ac[txf].aifsn = ac->ac_aifsn;
5188 		cmd->ac[txf].fifos_mask = (1 << txf);
5189 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
5190 	}
5191 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5192 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
5193 
5194 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5195 		enum ieee80211_htprot htprot =
5196 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5197 		switch (htprot) {
5198 		case IEEE80211_HTPROT_NONE:
5199 			break;
5200 		case IEEE80211_HTPROT_NONMEMBER:
5201 		case IEEE80211_HTPROT_NONHT_MIXED:
5202 			cmd->protection_flags |=
5203 			    htole32(IWX_MAC_PROT_FLG_HT_PROT);
5204 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5205 				cmd->protection_flags |=
5206 				    htole32(IWX_MAC_PROT_FLG_SELF_CTS_EN);
5207 			break;
5208 		case IEEE80211_HTPROT_20MHZ:
5209 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
5210 				/* XXX ... and if our channel is 40 MHz ... */
5211 				cmd->protection_flags |=
5212 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
5213 				    IWX_MAC_PROT_FLG_FAT_PROT);
5214 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5215 					cmd->protection_flags |= htole32(
5216 					    IWX_MAC_PROT_FLG_SELF_CTS_EN);
5217 			}
5218 			break;
5219 		default:
5220 			break;
5221 		}
5222 
5223 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
5224 	}
5225 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5226 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
5227 
5228 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
5229 #undef IWX_EXP2
5230 }
5231 
5232 void
5233 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
5234     struct iwx_mac_data_sta *sta, int assoc)
5235 {
5236 	struct ieee80211_node *ni = &in->in_ni;
5237 	uint32_t dtim_off;
5238 	uint64_t tsf;
5239 
5240 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
5241 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
5242 	tsf = letoh64(tsf);
5243 
5244 	sta->is_assoc = htole32(assoc);
5245 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5246 	sta->dtim_tsf = htole64(tsf + dtim_off);
5247 	sta->bi = htole32(ni->ni_intval);
5248 	sta->bi_reciprocal = htole32(iwx_reciprocal(ni->ni_intval));
5249 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
5250 	sta->dtim_reciprocal = htole32(iwx_reciprocal(sta->dtim_interval));
5251 	sta->listen_interval = htole32(10);
5252 	sta->assoc_id = htole32(ni->ni_associd);
5253 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5254 }
5255 
5256 int
5257 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
5258     int assoc)
5259 {
5260 	struct ieee80211com *ic = &sc->sc_ic;
5261 	struct ieee80211_node *ni = &in->in_ni;
5262 	struct iwx_mac_ctx_cmd cmd;
5263 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
5264 
5265 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
5266 		panic("MAC already added");
5267 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
5268 		panic("MAC already removed");
5269 
5270 	memset(&cmd, 0, sizeof(cmd));
5271 
5272 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
5273 
5274 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
5275 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
5276 		    sizeof(cmd), &cmd);
5277 	}
5278 
5279 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5280 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
5281 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
5282 		    IWX_MAC_FILTER_ACCEPT_GRP |
5283 		    IWX_MAC_FILTER_IN_BEACON |
5284 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
5285 		    IWX_MAC_FILTER_IN_CRC32);
5286 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
5287 		/*
5288 		 * Allow beacons to pass through as long as we are not
5289 		 * associated or we do not have dtim period information.
5290 		 */
5291 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
5292 	else
5293 		iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5294 
5295 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5296 }
5297 
5298 int
5299 iwx_clear_statistics(struct iwx_softc *sc)
5300 {
5301 	struct iwx_statistics_cmd scmd = {
5302 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
5303 	};
5304 	struct iwx_host_cmd cmd = {
5305 		.id = IWX_STATISTICS_CMD,
5306 		.len[0] = sizeof(scmd),
5307 		.data[0] = &scmd,
5308 		.flags = IWX_CMD_WANT_RESP,
5309 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
5310 	};
5311 	int err;
5312 
5313 	err = iwx_send_cmd(sc, &cmd);
5314 	if (err)
5315 		return err;
5316 
5317 	iwx_free_resp(sc, &cmd);
5318 	return 0;
5319 }
5320 
5321 int
5322 iwx_update_quotas(struct iwx_softc *sc, struct iwx_node *in, int running)
5323 {
5324 	struct iwx_time_quota_cmd cmd;
5325 	int i, idx, num_active_macs, quota, quota_rem;
5326 	int colors[IWX_MAX_BINDINGS] = { -1, -1, -1, -1, };
5327 	int n_ifs[IWX_MAX_BINDINGS] = {0, };
5328 	uint16_t id;
5329 
5330 	memset(&cmd, 0, sizeof(cmd));
5331 
5332 	/* currently, PHY ID == binding ID */
5333 	if (in && in->in_phyctxt) {
5334 		id = in->in_phyctxt->id;
5335 		KASSERT(id < IWX_MAX_BINDINGS);
5336 		colors[id] = in->in_phyctxt->color;
5337 		if (running)
5338 			n_ifs[id] = 1;
5339 	}
5340 
5341 	/*
5342 	 * The FW's scheduling session consists of
5343 	 * IWX_MAX_QUOTA fragments. Divide these fragments
5344 	 * equally between all the bindings that require quota
5345 	 */
5346 	num_active_macs = 0;
5347 	for (i = 0; i < IWX_MAX_BINDINGS; i++) {
5348 		cmd.quotas[i].id_and_color = htole32(IWX_FW_CTXT_INVALID);
5349 		num_active_macs += n_ifs[i];
5350 	}
5351 
5352 	quota = 0;
5353 	quota_rem = 0;
5354 	if (num_active_macs) {
5355 		quota = IWX_MAX_QUOTA / num_active_macs;
5356 		quota_rem = IWX_MAX_QUOTA % num_active_macs;
5357 	}
5358 
5359 	for (idx = 0, i = 0; i < IWX_MAX_BINDINGS; i++) {
5360 		if (colors[i] < 0)
5361 			continue;
5362 
5363 		cmd.quotas[idx].id_and_color =
5364 			htole32(IWX_FW_CMD_ID_AND_COLOR(i, colors[i]));
5365 
5366 		if (n_ifs[i] <= 0) {
5367 			cmd.quotas[idx].quota = htole32(0);
5368 			cmd.quotas[idx].max_duration = htole32(0);
5369 		} else {
5370 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5371 			cmd.quotas[idx].max_duration = htole32(0);
5372 		}
5373 		idx++;
5374 	}
5375 
5376 	/* Give the remainder of the session to the first binding */
5377 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5378 
5379 	return iwx_send_cmd_pdu(sc, IWX_TIME_QUOTA_CMD, 0,
5380 	    sizeof(cmd), &cmd);
5381 }
5382 
5383 void
5384 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
5385 {
5386 	int s = splnet();
5387 
5388 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
5389 		splx(s);
5390 		return;
5391 	}
5392 
5393 	refcnt_take(&sc->task_refs);
5394 	if (!task_add(taskq, task))
5395 		refcnt_rele_wake(&sc->task_refs);
5396 	splx(s);
5397 }
5398 
5399 void
5400 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
5401 {
5402 	if (task_del(taskq, task))
5403 		refcnt_rele(&sc->task_refs);
5404 }
5405 
5406 int
5407 iwx_scan(struct iwx_softc *sc)
5408 {
5409 	struct ieee80211com *ic = &sc->sc_ic;
5410 	struct ifnet *ifp = IC2IFP(ic);
5411 	int err;
5412 
5413 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
5414 		err = iwx_scan_abort(sc);
5415 		if (err) {
5416 			printf("%s: could not abort background scan\n",
5417 			    DEVNAME(sc));
5418 			return err;
5419 		}
5420 	}
5421 
5422 	err = iwx_umac_scan(sc, 0);
5423 	if (err) {
5424 		printf("%s: could not initiate scan\n", DEVNAME(sc));
5425 		return err;
5426 	}
5427 
5428 	/*
5429 	 * The current mode might have been fixed during association.
5430 	 * Ensure all channels get scanned.
5431 	 */
5432 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
5433 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
5434 
5435 	sc->sc_flags |= IWX_FLAG_SCANNING;
5436 	if (ifp->if_flags & IFF_DEBUG)
5437 		printf("%s: %s -> %s\n", ifp->if_xname,
5438 		    ieee80211_state_name[ic->ic_state],
5439 		    ieee80211_state_name[IEEE80211_S_SCAN]);
5440 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
5441 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
5442 		ieee80211_node_cleanup(ic, ic->ic_bss);
5443 	}
5444 	ic->ic_state = IEEE80211_S_SCAN;
5445 	wakeup(&ic->ic_state); /* wake iwx_init() */
5446 
5447 	return 0;
5448 }
5449 
5450 int
5451 iwx_bgscan(struct ieee80211com *ic)
5452 {
5453 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
5454 	int err;
5455 
5456 	if (sc->sc_flags & IWX_FLAG_SCANNING)
5457 		return 0;
5458 
5459 	err = iwx_umac_scan(sc, 1);
5460 	if (err) {
5461 		printf("%s: could not initiate scan\n", DEVNAME(sc));
5462 		return err;
5463 	}
5464 
5465 	sc->sc_flags |= IWX_FLAG_BGSCAN;
5466 	return 0;
5467 }
5468 
5469 int
5470 iwx_umac_scan_abort(struct iwx_softc *sc)
5471 {
5472 	struct iwx_umac_scan_abort cmd = { 0 };
5473 
5474 	return iwx_send_cmd_pdu(sc,
5475 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
5476 	    0, sizeof(cmd), &cmd);
5477 }
5478 
5479 int
5480 iwx_scan_abort(struct iwx_softc *sc)
5481 {
5482 	int err;
5483 
5484 	err = iwx_umac_scan_abort(sc);
5485 	if (err == 0)
5486 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
5487 	return err;
5488 }
5489 
5490 int
5491 iwx_enable_data_tx_queues(struct iwx_softc *sc)
5492 {
5493 	int err, ac;
5494 
5495 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
5496 		int qid = ac + IWX_DQA_AUX_QUEUE + 1;
5497 		/*
5498 		 * Regular data frames use the "MGMT" TID and queue.
5499 		 * Other TIDs and queues are reserved for frame aggregation.
5500 		 */
5501 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, IWX_TID_NON_QOS,
5502 		    IWX_TX_RING_COUNT);
5503 		if (err) {
5504 			printf("%s: could not enable Tx queue %d (error %d)\n",
5505 			    DEVNAME(sc), ac, err);
5506 			return err;
5507 		}
5508 	}
5509 
5510 	return 0;
5511 }
5512 
5513 int
5514 iwx_rs_rval2idx(uint8_t rval)
5515 {
5516 	/* Firmware expects indices which match our 11g rate set. */
5517 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
5518 	int i;
5519 
5520 	for (i = 0; i < rs->rs_nrates; i++) {
5521 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5522 			return i;
5523 	}
5524 
5525 	return -1;
5526 }
5527 
5528 uint16_t
5529 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
5530 {
5531 	struct ieee80211com *ic = &sc->sc_ic;
5532 	const struct ieee80211_ht_rateset *rs;
5533 	uint16_t htrates = 0;
5534 	int mcs;
5535 
5536 	rs = &ieee80211_std_ratesets_11n[rsidx];
5537 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
5538 		if (!isset(ni->ni_rxmcs, mcs) ||
5539 		    !isset(ic->ic_sup_mcs, mcs))
5540 			continue;
5541 		htrates |= (1 << (mcs - rs->min_mcs));
5542 	}
5543 
5544 	return htrates;
5545 }
5546 
5547 int
5548 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
5549 {
5550 	struct ieee80211_node *ni = &in->in_ni;
5551 	struct ieee80211_rateset *rs = &ni->ni_rates;
5552 	struct iwx_tlc_config_cmd cfg_cmd;
5553 	uint32_t cmd_id;
5554 	int i;
5555 
5556 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
5557 
5558 	for (i = 0; i < rs->rs_nrates; i++) {
5559 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
5560 		int idx = iwx_rs_rval2idx(rval);
5561 		if (idx == -1)
5562 			return EINVAL;
5563 		cfg_cmd.non_ht_rates |= (1 << idx);
5564 	}
5565 
5566 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5567 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
5568 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_HT_BW_NONE_160] =
5569 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_SISO);
5570 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_HT_BW_NONE_160] =
5571 		    iwx_rs_ht_rates(sc, ni, IEEE80211_HT_RATESET_MIMO2);
5572 	} else
5573 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
5574 
5575 	cfg_cmd.sta_id = IWX_STATION_ID;
5576 	cfg_cmd.max_ch_width = IWX_RATE_MCS_CHAN_WIDTH_20;
5577 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
5578 	cfg_cmd.max_mpdu_len = IEEE80211_MAX_LEN;
5579 	if (ieee80211_node_supports_ht_sgi20(ni))
5580 		cfg_cmd.sgi_ch_width_supp = (1 << IWX_TLC_MNG_CH_WIDTH_20MHZ);
5581 
5582 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
5583 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, sizeof(cfg_cmd),
5584 	    &cfg_cmd);
5585 }
5586 
5587 void
5588 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
5589 {
5590 	struct ieee80211com *ic = &sc->sc_ic;
5591 	struct ieee80211_node *ni = ic->ic_bss;
5592 	struct ieee80211_rateset *rs = &ni->ni_rates;
5593 	uint32_t rate_n_flags;
5594 	int i;
5595 
5596 	if (notif->sta_id != IWX_STATION_ID ||
5597 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
5598 		return;
5599 
5600 	rate_n_flags = le32toh(notif->rate);
5601 	if (rate_n_flags & IWX_RATE_MCS_HT_MSK) {
5602 		ni->ni_txmcs = (rate_n_flags &
5603 		    (IWX_RATE_HT_MCS_RATE_CODE_MSK |
5604 		    IWX_RATE_HT_MCS_NSS_MSK));
5605 	} else {
5606 		uint8_t plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
5607 		uint8_t rval = 0;
5608 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
5609 			if (iwx_rates[i].plcp == plcp) {
5610 				rval = iwx_rates[i].rate;
5611 				break;
5612 			}
5613 		}
5614 		if (rval) {
5615 			uint8_t rv;
5616 			for (i = 0; i < rs->rs_nrates; i++) {
5617 				rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
5618 				if (rv == rval) {
5619 					ni->ni_txrate = i;
5620 					break;
5621 				}
5622 			}
5623 		}
5624 	}
5625 }
5626 
5627 int
5628 iwx_auth(struct iwx_softc *sc)
5629 {
5630 	struct ieee80211com *ic = &sc->sc_ic;
5631 	struct iwx_node *in = (void *)ic->ic_bss;
5632 	uint32_t duration;
5633 	int generation = sc->sc_generation, err;
5634 
5635 	splassert(IPL_NET);
5636 
5637 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5638 		sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
5639 	else
5640 		sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5641 	err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5642 	    IWX_FW_CTXT_ACTION_MODIFY, 0);
5643 	if (err) {
5644 		printf("%s: could not update PHY context (error %d)\n",
5645 		    DEVNAME(sc), err);
5646 		return err;
5647 	}
5648 	in->in_phyctxt = &sc->sc_phyctxt[0];
5649 
5650 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
5651 	if (err) {
5652 		printf("%s: could not add MAC context (error %d)\n",
5653 		    DEVNAME(sc), err);
5654 		return err;
5655  	}
5656 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
5657 
5658 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
5659 	if (err) {
5660 		printf("%s: could not add binding (error %d)\n",
5661 		    DEVNAME(sc), err);
5662 		goto rm_mac_ctxt;
5663 	}
5664 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
5665 
5666 	err = iwx_add_sta_cmd(sc, in, 0);
5667 	if (err) {
5668 		printf("%s: could not add sta (error %d)\n",
5669 		    DEVNAME(sc), err);
5670 		goto rm_binding;
5671 	}
5672 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
5673 
5674 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5675 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
5676 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
5677 		    IWX_TX_RING_COUNT);
5678 		if (err)
5679 			goto rm_sta;
5680 		return 0;
5681 	}
5682 
5683 	err = iwx_enable_data_tx_queues(sc);
5684 	if (err)
5685 		goto rm_sta;
5686 
5687 	err = iwx_clear_statistics(sc);
5688 	if (err)
5689 		goto rm_sta;
5690 
5691 	/*
5692 	 * Prevent the FW from wandering off channel during association
5693 	 * by "protecting" the session with a time event.
5694 	 */
5695 	if (in->in_ni.ni_intval)
5696 		duration = in->in_ni.ni_intval * 2;
5697 	else
5698 		duration = IEEE80211_DUR_TU;
5699 	iwx_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5700 
5701 	return 0;
5702 
5703 rm_sta:
5704 	if (generation == sc->sc_generation) {
5705 		iwx_rm_sta_cmd(sc, in);
5706 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
5707 	}
5708 rm_binding:
5709 	if (generation == sc->sc_generation) {
5710 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
5711 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
5712 	}
5713 rm_mac_ctxt:
5714 	if (generation == sc->sc_generation) {
5715 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
5716 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
5717 	}
5718 	return err;
5719 }
5720 
5721 int
5722 iwx_deauth(struct iwx_softc *sc)
5723 {
5724 	struct ieee80211com *ic = &sc->sc_ic;
5725 	struct iwx_node *in = (void *)ic->ic_bss;
5726 	int err;
5727 
5728 	splassert(IPL_NET);
5729 
5730 	iwx_unprotect_session(sc, in);
5731 
5732 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
5733 		err = iwx_flush_tx_path(sc);
5734 		if (err) {
5735 			printf("%s: could not flush Tx path (error %d)\n",
5736 			    DEVNAME(sc), err);
5737 			return err;
5738 		}
5739 		err = iwx_rm_sta_cmd(sc, in);
5740 		if (err) {
5741 			printf("%s: could not remove STA (error %d)\n",
5742 			    DEVNAME(sc), err);
5743 			return err;
5744 		}
5745 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
5746 		sc->sc_rx_ba_sessions = 0;
5747 	}
5748 
5749 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
5750 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
5751 		if (err) {
5752 			printf("%s: could not remove binding (error %d)\n",
5753 			    DEVNAME(sc), err);
5754 			return err;
5755 		}
5756 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
5757 	}
5758 
5759 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
5760 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
5761 		if (err) {
5762 			printf("%s: could not remove MAC context (error %d)\n",
5763 			    DEVNAME(sc), err);
5764 			return err;
5765 		}
5766 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
5767 	}
5768 
5769 	return 0;
5770 }
5771 
5772 int
5773 iwx_assoc(struct iwx_softc *sc)
5774 {
5775 	struct ieee80211com *ic = &sc->sc_ic;
5776 	struct iwx_node *in = (void *)ic->ic_bss;
5777 	int update_sta = (sc->sc_flags & IWX_FLAG_STA_ACTIVE);
5778 	int err;
5779 
5780 	splassert(IPL_NET);
5781 
5782 	err = iwx_add_sta_cmd(sc, in, update_sta);
5783 	if (err) {
5784 		printf("%s: could not %s STA (error %d)\n",
5785 		    DEVNAME(sc), update_sta ? "update" : "add", err);
5786 		return err;
5787 	}
5788 
5789 	if (!update_sta)
5790 		err = iwx_enable_data_tx_queues(sc);
5791 
5792 	return err;
5793 }
5794 
5795 int
5796 iwx_disassoc(struct iwx_softc *sc)
5797 {
5798 	struct ieee80211com *ic = &sc->sc_ic;
5799 	struct iwx_node *in = (void *)ic->ic_bss;
5800 	int err;
5801 
5802 	splassert(IPL_NET);
5803 
5804 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
5805 		err = iwx_rm_sta_cmd(sc, in);
5806 		if (err) {
5807 			printf("%s: could not remove STA (error %d)\n",
5808 			    DEVNAME(sc), err);
5809 			return err;
5810 		}
5811 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
5812 		sc->sc_rx_ba_sessions = 0;
5813 	}
5814 
5815 	return 0;
5816 }
5817 
5818 int
5819 iwx_run(struct iwx_softc *sc)
5820 {
5821 	struct ieee80211com *ic = &sc->sc_ic;
5822 	struct iwx_node *in = (void *)ic->ic_bss;
5823 	int err;
5824 
5825 	splassert(IPL_NET);
5826 
5827 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5828 		/* Add a MAC context and a sniffing STA. */
5829 		err = iwx_auth(sc);
5830 		if (err)
5831 			return err;
5832 	}
5833 
5834 	/* Configure Rx chains for MIMO. */
5835 	if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
5836 	    (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
5837 	    iwx_mimo_enabled(sc)) {
5838 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
5839 		    2, 2, IWX_FW_CTXT_ACTION_MODIFY, 0);
5840 		if (err) {
5841 			printf("%s: failed to update PHY\n",
5842 			    DEVNAME(sc));
5843 			return err;
5844 		}
5845 	}
5846 
5847 	/* We have now been assigned an associd by the AP. */
5848 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
5849 	if (err) {
5850 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5851 		return err;
5852 	}
5853 
5854 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
5855 	if (err) {
5856 		printf("%s: could not set sf full on (error %d)\n",
5857 		    DEVNAME(sc), err);
5858 		return err;
5859 	}
5860 
5861 	err = iwx_allow_mcast(sc);
5862 	if (err) {
5863 		printf("%s: could not allow mcast (error %d)\n",
5864 		    DEVNAME(sc), err);
5865 		return err;
5866 	}
5867 
5868 	err = iwx_power_update_device(sc);
5869 	if (err) {
5870 		printf("%s: could not send power command (error %d)\n",
5871 		    DEVNAME(sc), err);
5872 		return err;
5873 	}
5874 #ifdef notyet
5875 	/*
5876 	 * Disabled for now. Default beacon filter settings
5877 	 * prevent net80211 from getting ERP and HT protection
5878 	 * updates from beacons.
5879 	 */
5880 	err = iwx_enable_beacon_filter(sc, in);
5881 	if (err) {
5882 		printf("%s: could not enable beacon filter\n",
5883 		    DEVNAME(sc));
5884 		return err;
5885 	}
5886 #endif
5887 	err = iwx_power_mac_update_mode(sc, in);
5888 	if (err) {
5889 		printf("%s: could not update MAC power (error %d)\n",
5890 		    DEVNAME(sc), err);
5891 		return err;
5892 	}
5893 
5894 	if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
5895 		err = iwx_update_quotas(sc, in, 1);
5896 		if (err) {
5897 			printf("%s: could not update quotas (error %d)\n",
5898 			    DEVNAME(sc), err);
5899 			return err;
5900 		}
5901 	}
5902 
5903 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5904 		return 0;
5905 
5906 	/* Start at lowest available bit-rate. Firmware will raise. */
5907 	in->in_ni.ni_txrate = 0;
5908 	in->in_ni.ni_txmcs = 0;
5909 
5910 	err = iwx_rs_init(sc, in);
5911 	if (err) {
5912 		printf("%s: could not init rate scaling (error %d)\n",
5913 		    DEVNAME(sc), err);
5914 		return err;
5915 	}
5916 
5917 	return 0;
5918 }
5919 
5920 int
5921 iwx_run_stop(struct iwx_softc *sc)
5922 {
5923 	struct ieee80211com *ic = &sc->sc_ic;
5924 	struct iwx_node *in = (void *)ic->ic_bss;
5925 	int err;
5926 
5927 	splassert(IPL_NET);
5928 
5929 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
5930 	if (err)
5931 		return err;
5932 
5933 	err = iwx_disable_beacon_filter(sc);
5934 	if (err) {
5935 		printf("%s: could not disable beacon filter (error %d)\n",
5936 		    DEVNAME(sc), err);
5937 		return err;
5938 	}
5939 
5940 	if (!isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
5941 		err = iwx_update_quotas(sc, in, 0);
5942 		if (err) {
5943 			printf("%s: could not update quotas (error %d)\n",
5944 			    DEVNAME(sc), err);
5945 			return err;
5946 		}
5947 	}
5948 
5949 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
5950 	if (err) {
5951 		printf("%s: failed to update MAC\n", DEVNAME(sc));
5952 		return err;
5953 	}
5954 
5955 	/* Reset Tx chains in case MIMO was enabled. */
5956 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
5957 	    iwx_mimo_enabled(sc)) {
5958 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5959 		    IWX_FW_CTXT_ACTION_MODIFY, 0);
5960 		if (err) {
5961 			printf("%s: failed to update PHY\n", DEVNAME(sc));
5962 			return err;
5963 		}
5964 	}
5965 
5966 	return 0;
5967 }
5968 
5969 struct ieee80211_node *
5970 iwx_node_alloc(struct ieee80211com *ic)
5971 {
5972 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
5973 }
5974 
5975 int
5976 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
5977     struct ieee80211_key *k)
5978 {
5979 	struct iwx_softc *sc = ic->ic_softc;
5980 	struct iwx_add_sta_key_cmd cmd;
5981 
5982 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
5983 		/* Fallback to software crypto for other ciphers. */
5984 		return (ieee80211_set_key(ic, ni, k));
5985 	}
5986 
5987 	memset(&cmd, 0, sizeof(cmd));
5988 
5989 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
5990 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
5991 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
5992 	    IWX_STA_KEY_FLG_KEYID_MSK));
5993 	if (k->k_flags & IEEE80211_KEY_GROUP) {
5994 		cmd.common.key_offset = 1;
5995 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
5996 	} else
5997 		cmd.common.key_offset = 0;
5998 
5999 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6000 	cmd.common.sta_id = IWX_STATION_ID;
6001 
6002 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
6003 
6004 	return iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC,
6005 	    sizeof(cmd), &cmd);
6006 }
6007 
6008 void
6009 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6010     struct ieee80211_key *k)
6011 {
6012 	struct iwx_softc *sc = ic->ic_softc;
6013 	struct iwx_add_sta_key_cmd cmd;
6014 
6015 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
6016 		/* Fallback to software crypto for other ciphers. */
6017                 ieee80211_delete_key(ic, ni, k);
6018 		return;
6019 	}
6020 
6021 	memset(&cmd, 0, sizeof(cmd));
6022 
6023 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
6024 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
6025 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
6026 	    IWX_STA_KEY_FLG_KEYID_MSK));
6027 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6028 	if (k->k_flags & IEEE80211_KEY_GROUP)
6029 		cmd.common.key_offset = 1;
6030 	else
6031 		cmd.common.key_offset = 0;
6032 	cmd.common.sta_id = IWX_STATION_ID;
6033 
6034 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
6035 }
6036 
6037 int
6038 iwx_media_change(struct ifnet *ifp)
6039 {
6040 	struct iwx_softc *sc = ifp->if_softc;
6041 	struct ieee80211com *ic = &sc->sc_ic;
6042 	uint8_t rate, ridx;
6043 	int err;
6044 
6045 	err = ieee80211_media_change(ifp);
6046 	if (err != ENETRESET)
6047 		return err;
6048 
6049 	if (ic->ic_fixed_mcs != -1)
6050 		sc->sc_fixed_ridx = iwx_mcs2ridx[ic->ic_fixed_mcs];
6051 	else if (ic->ic_fixed_rate != -1) {
6052 		rate = ic->ic_sup_rates[ic->ic_curmode].
6053 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6054 		/* Map 802.11 rate to HW rate index. */
6055 		for (ridx = 0; ridx <= IWX_RIDX_MAX; ridx++)
6056 			if (iwx_rates[ridx].rate == rate)
6057 				break;
6058 		sc->sc_fixed_ridx = ridx;
6059 	}
6060 
6061 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6062 	    (IFF_UP | IFF_RUNNING)) {
6063 		iwx_stop(ifp);
6064 		err = iwx_init(ifp);
6065 	}
6066 	return err;
6067 }
6068 
6069 void
6070 iwx_newstate_task(void *psc)
6071 {
6072 	struct iwx_softc *sc = (struct iwx_softc *)psc;
6073 	struct ieee80211com *ic = &sc->sc_ic;
6074 	enum ieee80211_state nstate = sc->ns_nstate;
6075 	enum ieee80211_state ostate = ic->ic_state;
6076 	int arg = sc->ns_arg;
6077 	int err = 0, s = splnet();
6078 
6079 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6080 		/* iwx_stop() is waiting for us. */
6081 		refcnt_rele_wake(&sc->task_refs);
6082 		splx(s);
6083 		return;
6084 	}
6085 
6086 	if (ostate == IEEE80211_S_SCAN) {
6087 		if (nstate == ostate) {
6088 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
6089 				refcnt_rele_wake(&sc->task_refs);
6090 				splx(s);
6091 				return;
6092 			}
6093 			/* Firmware is no longer scanning. Do another scan. */
6094 			goto next_scan;
6095 		}
6096 	}
6097 
6098 	if (nstate <= ostate) {
6099 		switch (ostate) {
6100 		case IEEE80211_S_RUN:
6101 			err = iwx_run_stop(sc);
6102 			if (err)
6103 				goto out;
6104 			/* FALLTHROUGH */
6105 		case IEEE80211_S_ASSOC:
6106 			if (nstate <= IEEE80211_S_ASSOC) {
6107 				err = iwx_disassoc(sc);
6108 				if (err)
6109 					goto out;
6110 			}
6111 			/* FALLTHROUGH */
6112 		case IEEE80211_S_AUTH:
6113 			if (nstate <= IEEE80211_S_AUTH) {
6114 				err = iwx_deauth(sc);
6115 				if (err)
6116 					goto out;
6117 			}
6118 			/* FALLTHROUGH */
6119 		case IEEE80211_S_SCAN:
6120 		case IEEE80211_S_INIT:
6121 			break;
6122 		}
6123 
6124 		/* Die now if iwx_stop() was called while we were sleeping. */
6125 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
6126 			refcnt_rele_wake(&sc->task_refs);
6127 			splx(s);
6128 			return;
6129 		}
6130 	}
6131 
6132 	switch (nstate) {
6133 	case IEEE80211_S_INIT:
6134 		break;
6135 
6136 	case IEEE80211_S_SCAN:
6137 next_scan:
6138 		err = iwx_scan(sc);
6139 		if (err)
6140 			break;
6141 		refcnt_rele_wake(&sc->task_refs);
6142 		splx(s);
6143 		return;
6144 
6145 	case IEEE80211_S_AUTH:
6146 		err = iwx_auth(sc);
6147 		break;
6148 
6149 	case IEEE80211_S_ASSOC:
6150 		err = iwx_assoc(sc);
6151 		break;
6152 
6153 	case IEEE80211_S_RUN:
6154 		err = iwx_run(sc);
6155 		break;
6156 	}
6157 
6158 out:
6159 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
6160 		if (err)
6161 			task_add(systq, &sc->init_task);
6162 		else
6163 			sc->sc_newstate(ic, nstate, arg);
6164 	}
6165 	refcnt_rele_wake(&sc->task_refs);
6166 	splx(s);
6167 }
6168 
6169 int
6170 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6171 {
6172 	struct ifnet *ifp = IC2IFP(ic);
6173 	struct iwx_softc *sc = ifp->if_softc;
6174 
6175 	if (ic->ic_state == IEEE80211_S_RUN) {
6176 		iwx_del_task(sc, systq, &sc->ba_task);
6177 		iwx_del_task(sc, systq, &sc->htprot_task);
6178 	}
6179 
6180 	sc->ns_nstate = nstate;
6181 	sc->ns_arg = arg;
6182 
6183 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
6184 
6185 	return 0;
6186 }
6187 
6188 void
6189 iwx_endscan(struct iwx_softc *sc)
6190 {
6191 	struct ieee80211com *ic = &sc->sc_ic;
6192 
6193 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
6194 		return;
6195 
6196 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6197 	ieee80211_end_scan(&ic->ic_if);
6198 }
6199 
6200 /*
6201  * Aging and idle timeouts for the different possible scenarios
6202  * in default configuration
6203  */
6204 static const uint32_t
6205 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
6206 	{
6207 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6208 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6209 	},
6210 	{
6211 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
6212 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6213 	},
6214 	{
6215 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
6216 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
6217 	},
6218 	{
6219 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
6220 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
6221 	},
6222 	{
6223 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
6224 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
6225 	},
6226 };
6227 
6228 /*
6229  * Aging and idle timeouts for the different possible scenarios
6230  * in single BSS MAC configuration.
6231  */
6232 static const uint32_t
6233 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
6234 	{
6235 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
6236 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
6237 	},
6238 	{
6239 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
6240 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
6241 	},
6242 	{
6243 		htole32(IWX_SF_MCAST_AGING_TIMER),
6244 		htole32(IWX_SF_MCAST_IDLE_TIMER)
6245 	},
6246 	{
6247 		htole32(IWX_SF_BA_AGING_TIMER),
6248 		htole32(IWX_SF_BA_IDLE_TIMER)
6249 	},
6250 	{
6251 		htole32(IWX_SF_TX_RE_AGING_TIMER),
6252 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
6253 	},
6254 };
6255 
6256 void
6257 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
6258     struct ieee80211_node *ni)
6259 {
6260 	int i, j, watermark;
6261 
6262 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
6263 
6264 	/*
6265 	 * If we are in association flow - check antenna configuration
6266 	 * capabilities of the AP station, and choose the watermark accordingly.
6267 	 */
6268 	if (ni) {
6269 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6270 			if (ni->ni_rxmcs[1] != 0)
6271 				watermark = IWX_SF_W_MARK_MIMO2;
6272 			else
6273 				watermark = IWX_SF_W_MARK_SISO;
6274 		} else {
6275 			watermark = IWX_SF_W_MARK_LEGACY;
6276 		}
6277 	/* default watermark value for unassociated mode. */
6278 	} else {
6279 		watermark = IWX_SF_W_MARK_MIMO2;
6280 	}
6281 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
6282 
6283 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
6284 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
6285 			sf_cmd->long_delay_timeouts[i][j] =
6286 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
6287 		}
6288 	}
6289 
6290 	if (ni) {
6291 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
6292 		       sizeof(iwx_sf_full_timeout));
6293 	} else {
6294 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
6295 		       sizeof(iwx_sf_full_timeout_def));
6296 	}
6297 
6298 }
6299 
6300 int
6301 iwx_sf_config(struct iwx_softc *sc, int new_state)
6302 {
6303 	struct ieee80211com *ic = &sc->sc_ic;
6304 	struct iwx_sf_cfg_cmd sf_cmd = {
6305 		.state = htole32(new_state),
6306 	};
6307 	int err = 0;
6308 
6309 	switch (new_state) {
6310 	case IWX_SF_UNINIT:
6311 	case IWX_SF_INIT_OFF:
6312 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
6313 		break;
6314 	case IWX_SF_FULL_ON:
6315 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6316 		break;
6317 	default:
6318 		return EINVAL;
6319 	}
6320 
6321 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
6322 				   sizeof(sf_cmd), &sf_cmd);
6323 	return err;
6324 }
6325 
6326 int
6327 iwx_send_bt_init_conf(struct iwx_softc *sc)
6328 {
6329 	struct iwx_bt_coex_cmd bt_cmd;
6330 
6331 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
6332 	bt_cmd.enabled_modules = 0;
6333 
6334 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
6335 	    &bt_cmd);
6336 }
6337 
6338 int
6339 iwx_send_soc_conf(struct iwx_softc *sc)
6340 {
6341 	struct iwx_soc_configuration_cmd cmd;
6342 	int err;
6343 	uint32_t cmd_id, flags = 0;
6344 
6345 	memset(&cmd, 0, sizeof(cmd));
6346 
6347 	/*
6348 	 * In VER_1 of this command, the discrete value is considered
6349 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
6350 	 * values in VER_1, this is backwards-compatible with VER_2,
6351 	 * as long as we don't set any other flag bits.
6352 	 */
6353 	if (!sc->sc_integrated) { /* VER_1 */
6354 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
6355 	} else { /* VER_2 */
6356 		uint8_t scan_cmd_ver;
6357 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
6358 			flags |= (sc->sc_ltr_delay &
6359 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
6360 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
6361 		    IWX_SCAN_REQ_UMAC);
6362 		if (scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
6363 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
6364 	}
6365 	cmd.flags = htole32(flags);
6366 
6367 	cmd.latency = htole32(sc->sc_xtal_latency);
6368 
6369 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
6370 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
6371 	if (err)
6372 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
6373 	return err;
6374 }
6375 
6376 int
6377 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
6378 {
6379 	struct iwx_mcc_update_cmd mcc_cmd;
6380 	struct iwx_host_cmd hcmd = {
6381 		.id = IWX_MCC_UPDATE_CMD,
6382 		.flags = IWX_CMD_WANT_RESP,
6383 		.data = { &mcc_cmd },
6384 	};
6385 	struct iwx_rx_packet *pkt;
6386 	struct iwx_mcc_update_resp *resp;
6387 	size_t resp_len;
6388 	int err;
6389 
6390 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6391 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6392 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6393 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6394 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
6395 	else
6396 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
6397 
6398 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
6399 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
6400 
6401 	err = iwx_send_cmd(sc, &hcmd);
6402 	if (err)
6403 		return err;
6404 
6405 	pkt = hcmd.resp_pkt;
6406 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
6407 		err = EIO;
6408 		goto out;
6409 	}
6410 
6411 	resp_len = iwx_rx_packet_payload_len(pkt);
6412 	if (resp_len < sizeof(*resp)) {
6413 		err = EIO;
6414 		goto out;
6415 	}
6416 
6417 	resp = (void *)pkt->data;
6418 	if (resp_len != sizeof(*resp) +
6419 	    resp->n_channels * sizeof(resp->channels[0])) {
6420 		err = EIO;
6421 		goto out;
6422 	}
6423 
6424 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
6425 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
6426 
6427 	/* Update channel map for net80211 and our scan configuration. */
6428 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
6429 
6430 out:
6431 	iwx_free_resp(sc, &hcmd);
6432 
6433 	return err;
6434 }
6435 
6436 int
6437 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
6438 {
6439 	struct iwx_temp_report_ths_cmd cmd;
6440 	int err;
6441 
6442 	/*
6443 	 * In order to give responsibility for critical-temperature-kill
6444 	 * and TX backoff to FW we need to send an empty temperature
6445 	 * reporting command at init time.
6446 	 */
6447 	memset(&cmd, 0, sizeof(cmd));
6448 
6449 	err = iwx_send_cmd_pdu(sc,
6450 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
6451 	    0, sizeof(cmd), &cmd);
6452 	if (err)
6453 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
6454 		    DEVNAME(sc), err);
6455 
6456 	return err;
6457 }
6458 
6459 int
6460 iwx_init_hw(struct iwx_softc *sc)
6461 {
6462 	struct ieee80211com *ic = &sc->sc_ic;
6463 	int err, i;
6464 
6465 	err = iwx_preinit(sc);
6466 	if (err)
6467 		return err;
6468 
6469 	err = iwx_start_hw(sc);
6470 	if (err) {
6471 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6472 		return err;
6473 	}
6474 
6475 	err = iwx_run_init_mvm_ucode(sc, 0);
6476 	if (err)
6477 		return err;
6478 
6479 	if (!iwx_nic_lock(sc))
6480 		return EBUSY;
6481 
6482 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
6483 	if (err) {
6484 		printf("%s: could not init tx ant config (error %d)\n",
6485 		    DEVNAME(sc), err);
6486 		goto err;
6487 	}
6488 
6489 	if (sc->sc_tx_with_siso_diversity) {
6490 		err = iwx_send_phy_cfg_cmd(sc);
6491 		if (err) {
6492 			printf("%s: could not send phy config (error %d)\n",
6493 			    DEVNAME(sc), err);
6494 			goto err;
6495 		}
6496 	}
6497 
6498 	err = iwx_send_bt_init_conf(sc);
6499 	if (err) {
6500 		printf("%s: could not init bt coex (error %d)\n",
6501 		    DEVNAME(sc), err);
6502 		return err;
6503 	}
6504 
6505 	err = iwx_send_soc_conf(sc);
6506 	if (err)
6507 		return err;
6508 
6509 	err = iwx_send_dqa_cmd(sc);
6510 	if (err)
6511 		return err;
6512 
6513 	/* Add auxiliary station for scanning */
6514 	err = iwx_add_aux_sta(sc);
6515 	if (err) {
6516 		printf("%s: could not add aux station (error %d)\n",
6517 		    DEVNAME(sc), err);
6518 		goto err;
6519 	}
6520 
6521 	for (i = 0; i < 1; i++) {
6522 		/*
6523 		 * The channel used here isn't relevant as it's
6524 		 * going to be overwritten in the other flows.
6525 		 * For now use the first channel we have.
6526 		 */
6527 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6528 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6529 		    IWX_FW_CTXT_ACTION_ADD, 0);
6530 		if (err) {
6531 			printf("%s: could not add phy context %d (error %d)\n",
6532 			    DEVNAME(sc), i, err);
6533 			goto err;
6534 		}
6535 	}
6536 
6537 	err = iwx_config_ltr(sc);
6538 	if (err) {
6539 		printf("%s: PCIe LTR configuration failed (error %d)\n",
6540 		    DEVNAME(sc), err);
6541 	}
6542 
6543 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
6544 		err = iwx_send_temp_report_ths_cmd(sc);
6545 		if (err)
6546 			goto err;
6547 	}
6548 
6549 	err = iwx_power_update_device(sc);
6550 	if (err) {
6551 		printf("%s: could not send power command (error %d)\n",
6552 		    DEVNAME(sc), err);
6553 		goto err;
6554 	}
6555 
6556 	if (sc->sc_nvm.lar_enabled) {
6557 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
6558 		if (err) {
6559 			printf("%s: could not init LAR (error %d)\n",
6560 			    DEVNAME(sc), err);
6561 			goto err;
6562 		}
6563 	}
6564 
6565 	err = iwx_config_umac_scan(sc);
6566 	if (err) {
6567 		printf("%s: could not configure scan (error %d)\n",
6568 		    DEVNAME(sc), err);
6569 		goto err;
6570 	}
6571 
6572 	err = iwx_disable_beacon_filter(sc);
6573 	if (err) {
6574 		printf("%s: could not disable beacon filter (error %d)\n",
6575 		    DEVNAME(sc), err);
6576 		goto err;
6577 	}
6578 
6579 err:
6580 	iwx_nic_unlock(sc);
6581 	return err;
6582 }
6583 
6584 /* Allow multicast from our BSSID. */
6585 int
6586 iwx_allow_mcast(struct iwx_softc *sc)
6587 {
6588 	struct ieee80211com *ic = &sc->sc_ic;
6589 	struct ieee80211_node *ni = ic->ic_bss;
6590 	struct iwx_mcast_filter_cmd *cmd;
6591 	size_t size;
6592 	int err;
6593 
6594 	size = roundup(sizeof(*cmd), 4);
6595 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
6596 	if (cmd == NULL)
6597 		return ENOMEM;
6598 	cmd->filter_own = 1;
6599 	cmd->port_id = 0;
6600 	cmd->count = 0;
6601 	cmd->pass_all = 1;
6602 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6603 
6604 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
6605 	    0, size, cmd);
6606 	free(cmd, M_DEVBUF, size);
6607 	return err;
6608 }
6609 
6610 int
6611 iwx_init(struct ifnet *ifp)
6612 {
6613 	struct iwx_softc *sc = ifp->if_softc;
6614 	struct ieee80211com *ic = &sc->sc_ic;
6615 	int err, generation;
6616 
6617 	rw_assert_wrlock(&sc->ioctl_rwl);
6618 
6619 	generation = ++sc->sc_generation;
6620 
6621 	KASSERT(sc->task_refs.refs == 0);
6622 	refcnt_init(&sc->task_refs);
6623 
6624 	err = iwx_init_hw(sc);
6625 	if (err) {
6626 		if (generation == sc->sc_generation)
6627 			iwx_stop(ifp);
6628 		return err;
6629 	}
6630 
6631 	if (sc->sc_nvm.sku_cap_11n_enable)
6632 		iwx_setup_ht_rates(sc);
6633 
6634 	ifq_clr_oactive(&ifp->if_snd);
6635 	ifp->if_flags |= IFF_RUNNING;
6636 
6637 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6638 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
6639 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
6640 		return 0;
6641 	}
6642 
6643 	ieee80211_begin_scan(ifp);
6644 
6645 	/*
6646 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
6647 	 * Wait until the transition to SCAN state has completed.
6648 	 */
6649 	do {
6650 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
6651 		    SEC_TO_NSEC(1));
6652 		if (generation != sc->sc_generation)
6653 			return ENXIO;
6654 		if (err)
6655 			return err;
6656 	} while (ic->ic_state != IEEE80211_S_SCAN);
6657 
6658 	return 0;
6659 }
6660 
6661 void
6662 iwx_start(struct ifnet *ifp)
6663 {
6664 	struct iwx_softc *sc = ifp->if_softc;
6665 	struct ieee80211com *ic = &sc->sc_ic;
6666 	struct ieee80211_node *ni;
6667 	struct ether_header *eh;
6668 	struct mbuf *m;
6669 	int ac = EDCA_AC_BE; /* XXX */
6670 
6671 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
6672 		return;
6673 
6674 	for (;;) {
6675 		/* why isn't this done per-queue? */
6676 		if (sc->qfullmsk != 0) {
6677 			ifq_set_oactive(&ifp->if_snd);
6678 			break;
6679 		}
6680 
6681 		/* need to send management frames even if we're not RUNning */
6682 		m = mq_dequeue(&ic->ic_mgtq);
6683 		if (m) {
6684 			ni = m->m_pkthdr.ph_cookie;
6685 			goto sendit;
6686 		}
6687 
6688 		if (ic->ic_state != IEEE80211_S_RUN ||
6689 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
6690 			break;
6691 
6692 		m = ifq_dequeue(&ifp->if_snd);
6693 		if (!m)
6694 			break;
6695 		if (m->m_len < sizeof (*eh) &&
6696 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
6697 			ifp->if_oerrors++;
6698 			continue;
6699 		}
6700 #if NBPFILTER > 0
6701 		if (ifp->if_bpf != NULL)
6702 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
6703 #endif
6704 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
6705 			ifp->if_oerrors++;
6706 			continue;
6707 		}
6708 
6709  sendit:
6710 #if NBPFILTER > 0
6711 		if (ic->ic_rawbpf != NULL)
6712 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
6713 #endif
6714 		if (iwx_tx(sc, m, ni, ac) != 0) {
6715 			ieee80211_release_node(ic, ni);
6716 			ifp->if_oerrors++;
6717 			continue;
6718 		}
6719 
6720 		if (ifp->if_flags & IFF_UP) {
6721 			sc->sc_tx_timer = 15;
6722 			ifp->if_timer = 1;
6723 		}
6724 	}
6725 
6726 	return;
6727 }
6728 
6729 void
6730 iwx_stop(struct ifnet *ifp)
6731 {
6732 	struct iwx_softc *sc = ifp->if_softc;
6733 	struct ieee80211com *ic = &sc->sc_ic;
6734 	struct iwx_node *in = (void *)ic->ic_bss;
6735 	int i, s = splnet();
6736 
6737 	rw_assert_wrlock(&sc->ioctl_rwl);
6738 
6739 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
6740 
6741 	/* Cancel scheduled tasks and let any stale tasks finish up. */
6742 	task_del(systq, &sc->init_task);
6743 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
6744 	iwx_del_task(sc, systq, &sc->ba_task);
6745 	iwx_del_task(sc, systq, &sc->htprot_task);
6746 	KASSERT(sc->task_refs.refs >= 1);
6747 	refcnt_finalize(&sc->task_refs, "iwxstop");
6748 
6749 	iwx_stop_device(sc);
6750 
6751 	/* Reset soft state. */
6752 
6753 	sc->sc_generation++;
6754 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
6755 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
6756 		sc->sc_cmd_resp_pkt[i] = NULL;
6757 		sc->sc_cmd_resp_len[i] = 0;
6758 	}
6759 	ifp->if_flags &= ~IFF_RUNNING;
6760 	ifq_clr_oactive(&ifp->if_snd);
6761 
6762 	in->in_phyctxt = NULL;
6763 
6764 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
6765 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
6766 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
6767 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
6768 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
6769 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
6770 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
6771 
6772 	sc->sc_rx_ba_sessions = 0;
6773 
6774 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6775 
6776 	ifp->if_timer = sc->sc_tx_timer = 0;
6777 
6778 	splx(s);
6779 }
6780 
6781 void
6782 iwx_watchdog(struct ifnet *ifp)
6783 {
6784 	struct iwx_softc *sc = ifp->if_softc;
6785 
6786 	ifp->if_timer = 0;
6787 	if (sc->sc_tx_timer > 0) {
6788 		if (--sc->sc_tx_timer == 0) {
6789 			printf("%s: device timeout\n", DEVNAME(sc));
6790 #ifdef IWX_DEBUG
6791 			iwx_nic_error(sc);
6792 #endif
6793 			if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
6794 				task_add(systq, &sc->init_task);
6795 			ifp->if_oerrors++;
6796 			return;
6797 		}
6798 		ifp->if_timer = 1;
6799 	}
6800 
6801 	ieee80211_watchdog(ifp);
6802 }
6803 
6804 int
6805 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6806 {
6807 	struct iwx_softc *sc = ifp->if_softc;
6808 	int s, err = 0, generation = sc->sc_generation;
6809 
6810 	/*
6811 	 * Prevent processes from entering this function while another
6812 	 * process is tsleep'ing in it.
6813 	 */
6814 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
6815 	if (err == 0 && generation != sc->sc_generation) {
6816 		rw_exit(&sc->ioctl_rwl);
6817 		return ENXIO;
6818 	}
6819 	if (err)
6820 		return err;
6821 	s = splnet();
6822 
6823 	switch (cmd) {
6824 	case SIOCSIFADDR:
6825 		ifp->if_flags |= IFF_UP;
6826 		/* FALLTHROUGH */
6827 	case SIOCSIFFLAGS:
6828 		if (ifp->if_flags & IFF_UP) {
6829 			if (!(ifp->if_flags & IFF_RUNNING)) {
6830 				err = iwx_init(ifp);
6831 			}
6832 		} else {
6833 			if (ifp->if_flags & IFF_RUNNING)
6834 				iwx_stop(ifp);
6835 		}
6836 		break;
6837 
6838 	default:
6839 		err = ieee80211_ioctl(ifp, cmd, data);
6840 	}
6841 
6842 	if (err == ENETRESET) {
6843 		err = 0;
6844 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6845 		    (IFF_UP | IFF_RUNNING)) {
6846 			iwx_stop(ifp);
6847 			err = iwx_init(ifp);
6848 		}
6849 	}
6850 
6851 	splx(s);
6852 	rw_exit(&sc->ioctl_rwl);
6853 
6854 	return err;
6855 }
6856 
6857 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
6858 /*
6859  * Note: This structure is read from the device with IO accesses,
6860  * and the reading already does the endian conversion. As it is
6861  * read with uint32_t-sized accesses, any members with a different size
6862  * need to be ordered correctly though!
6863  */
6864 struct iwx_error_event_table {
6865 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6866 	uint32_t error_id;		/* type of error */
6867 	uint32_t trm_hw_status0;	/* TRM HW status */
6868 	uint32_t trm_hw_status1;	/* TRM HW status */
6869 	uint32_t blink2;		/* branch link */
6870 	uint32_t ilink1;		/* interrupt link */
6871 	uint32_t ilink2;		/* interrupt link */
6872 	uint32_t data1;		/* error-specific data */
6873 	uint32_t data2;		/* error-specific data */
6874 	uint32_t data3;		/* error-specific data */
6875 	uint32_t bcon_time;		/* beacon timer */
6876 	uint32_t tsf_low;		/* network timestamp function timer */
6877 	uint32_t tsf_hi;		/* network timestamp function timer */
6878 	uint32_t gp1;		/* GP1 timer register */
6879 	uint32_t gp2;		/* GP2 timer register */
6880 	uint32_t fw_rev_type;	/* firmware revision type */
6881 	uint32_t major;		/* uCode version major */
6882 	uint32_t minor;		/* uCode version minor */
6883 	uint32_t hw_ver;		/* HW Silicon version */
6884 	uint32_t brd_ver;		/* HW board version */
6885 	uint32_t log_pc;		/* log program counter */
6886 	uint32_t frame_ptr;		/* frame pointer */
6887 	uint32_t stack_ptr;		/* stack pointer */
6888 	uint32_t hcmd;		/* last host command header */
6889 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
6890 				 * rxtx_flag */
6891 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
6892 				 * host_flag */
6893 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
6894 				 * enc_flag */
6895 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
6896 				 * time_flag */
6897 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
6898 				 * wico interrupt */
6899 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
6900 	uint32_t wait_event;		/* wait event() caller address */
6901 	uint32_t l2p_control;	/* L2pControlField */
6902 	uint32_t l2p_duration;	/* L2pDurationField */
6903 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
6904 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
6905 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
6906 				 * (LMPM_PMG_SEL) */
6907 	uint32_t u_timestamp;	/* indicate when the date and time of the
6908 				 * compilation */
6909 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
6910 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6911 
6912 /*
6913  * UMAC error struct - relevant starting from family 8000 chip.
6914  * Note: This structure is read from the device with IO accesses,
6915  * and the reading already does the endian conversion. As it is
6916  * read with u32-sized accesses, any members with a different size
6917  * need to be ordered correctly though!
6918  */
6919 struct iwx_umac_error_event_table {
6920 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6921 	uint32_t error_id;	/* type of error */
6922 	uint32_t blink1;	/* branch link */
6923 	uint32_t blink2;	/* branch link */
6924 	uint32_t ilink1;	/* interrupt link */
6925 	uint32_t ilink2;	/* interrupt link */
6926 	uint32_t data1;		/* error-specific data */
6927 	uint32_t data2;		/* error-specific data */
6928 	uint32_t data3;		/* error-specific data */
6929 	uint32_t umac_major;
6930 	uint32_t umac_minor;
6931 	uint32_t frame_pointer;	/* core register 27*/
6932 	uint32_t stack_pointer;	/* core register 28 */
6933 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
6934 	uint32_t nic_isr_pref;	/* ISR status register */
6935 } __packed;
6936 
6937 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
6938 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
6939 
6940 void
6941 iwx_nic_umac_error(struct iwx_softc *sc)
6942 {
6943 	struct iwx_umac_error_event_table table;
6944 	uint32_t base;
6945 
6946 	base = sc->sc_uc.uc_umac_error_event_table;
6947 
6948 	if (base < 0x800000) {
6949 		printf("%s: Invalid error log pointer 0x%08x\n",
6950 		    DEVNAME(sc), base);
6951 		return;
6952 	}
6953 
6954 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
6955 		printf("%s: reading errlog failed\n", DEVNAME(sc));
6956 		return;
6957 	}
6958 
6959 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6960 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
6961 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
6962 			sc->sc_flags, table.valid);
6963 	}
6964 
6965 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
6966 		iwx_desc_lookup(table.error_id));
6967 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
6968 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
6969 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
6970 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
6971 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
6972 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
6973 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
6974 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
6975 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
6976 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
6977 	    table.frame_pointer);
6978 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
6979 	    table.stack_pointer);
6980 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
6981 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
6982 	    table.nic_isr_pref);
6983 }
6984 
6985 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
6986 static struct {
6987 	const char *name;
6988 	uint8_t num;
6989 } advanced_lookup[] = {
6990 	{ "NMI_INTERRUPT_WDG", 0x34 },
6991 	{ "SYSASSERT", 0x35 },
6992 	{ "UCODE_VERSION_MISMATCH", 0x37 },
6993 	{ "BAD_COMMAND", 0x38 },
6994 	{ "BAD_COMMAND", 0x39 },
6995 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6996 	{ "FATAL_ERROR", 0x3D },
6997 	{ "NMI_TRM_HW_ERR", 0x46 },
6998 	{ "NMI_INTERRUPT_TRM", 0x4C },
6999 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
7000 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
7001 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
7002 	{ "NMI_INTERRUPT_HOST", 0x66 },
7003 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
7004 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
7005 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
7006 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
7007 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
7008 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7009 	{ "ADVANCED_SYSASSERT", 0 },
7010 };
7011 
7012 const char *
7013 iwx_desc_lookup(uint32_t num)
7014 {
7015 	int i;
7016 
7017 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
7018 		if (advanced_lookup[i].num ==
7019 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
7020 			return advanced_lookup[i].name;
7021 
7022 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7023 	return advanced_lookup[i].name;
7024 }
7025 
7026 /*
7027  * Support for dumping the error log seemed like a good idea ...
7028  * but it's mostly hex junk and the only sensible thing is the
7029  * hw/ucode revision (which we know anyway).  Since it's here,
7030  * I'll just leave it in, just in case e.g. the Intel guys want to
7031  * help us decipher some "ADVANCED_SYSASSERT" later.
7032  */
7033 void
7034 iwx_nic_error(struct iwx_softc *sc)
7035 {
7036 	struct iwx_error_event_table table;
7037 	uint32_t base;
7038 
7039 	printf("%s: dumping device error log\n", DEVNAME(sc));
7040 	base = sc->sc_uc.uc_lmac_error_event_table[0];
7041 	if (base < 0x800000) {
7042 		printf("%s: Invalid error log pointer 0x%08x\n",
7043 		    DEVNAME(sc), base);
7044 		return;
7045 	}
7046 
7047 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
7048 		printf("%s: reading errlog failed\n", DEVNAME(sc));
7049 		return;
7050 	}
7051 
7052 	if (!table.valid) {
7053 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
7054 		return;
7055 	}
7056 
7057 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
7058 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
7059 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
7060 		    sc->sc_flags, table.valid);
7061 	}
7062 
7063 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
7064 	    iwx_desc_lookup(table.error_id));
7065 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
7066 	    table.trm_hw_status0);
7067 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
7068 	    table.trm_hw_status1);
7069 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
7070 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
7071 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
7072 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
7073 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
7074 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
7075 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
7076 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
7077 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
7078 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
7079 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
7080 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
7081 	    table.fw_rev_type);
7082 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
7083 	    table.major);
7084 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
7085 	    table.minor);
7086 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
7087 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
7088 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
7089 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
7090 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
7091 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
7092 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
7093 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
7094 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
7095 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
7096 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
7097 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
7098 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
7099 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
7100 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
7101 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
7102 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
7103 
7104 	if (sc->sc_uc.uc_umac_error_event_table)
7105 		iwx_nic_umac_error(sc);
7106 }
7107 #endif
7108 
7109 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
7110 do {									\
7111 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7112 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
7113 	_var_ = (void *)((_pkt_)+1);					\
7114 } while (/*CONSTCOND*/0)
7115 
7116 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
7117 do {									\
7118 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7119 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
7120 	_ptr_ = (void *)((_pkt_)+1);					\
7121 } while (/*CONSTCOND*/0)
7122 
7123 int
7124 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
7125 {
7126 	int qid, idx, code;
7127 
7128 	qid = pkt->hdr.qid & ~0x80;
7129 	idx = pkt->hdr.idx;
7130 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7131 
7132 	return (!(qid == 0 && idx == 0 && code == 0) &&
7133 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
7134 }
7135 
7136 void
7137 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
7138 {
7139 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7140 	struct iwx_rx_packet *pkt, *nextpkt;
7141 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
7142 	struct mbuf *m0, *m;
7143 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
7144 	int qid, idx, code, handled = 1;
7145 
7146 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
7147 	    BUS_DMASYNC_POSTREAD);
7148 
7149 	m0 = data->m;
7150 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
7151 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
7152 		qid = pkt->hdr.qid;
7153 		idx = pkt->hdr.idx;
7154 
7155 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7156 
7157 		if (!iwx_rx_pkt_valid(pkt))
7158 			break;
7159 
7160 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
7161 		if (len < sizeof(pkt->hdr) ||
7162 		    len > (IWX_RBUF_SIZE - offset - minsz))
7163 			break;
7164 
7165 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
7166 			/* Take mbuf m0 off the RX ring. */
7167 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
7168 				ifp->if_ierrors++;
7169 				break;
7170 			}
7171 			KASSERT(data->m != m0);
7172 		}
7173 
7174 		switch (code) {
7175 		case IWX_REPLY_RX_PHY_CMD:
7176 			iwx_rx_rx_phy_cmd(sc, pkt, data);
7177 			break;
7178 
7179 		case IWX_REPLY_RX_MPDU_CMD: {
7180 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
7181 			nextoff = offset +
7182 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
7183 			nextpkt = (struct iwx_rx_packet *)
7184 			    (m0->m_data + nextoff);
7185 			if (nextoff + minsz >= IWX_RBUF_SIZE ||
7186 			    !iwx_rx_pkt_valid(nextpkt)) {
7187 				/* No need to copy last frame in buffer. */
7188 				if (offset > 0)
7189 					m_adj(m0, offset);
7190 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
7191 				m0 = NULL; /* stack owns m0 now; abort loop */
7192 			} else {
7193 				/*
7194 				 * Create an mbuf which points to the current
7195 				 * packet. Always copy from offset zero to
7196 				 * preserve m_pkthdr.
7197 				 */
7198 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
7199 				if (m == NULL) {
7200 					ifp->if_ierrors++;
7201 					m_freem(m0);
7202 					m0 = NULL;
7203 					break;
7204 				}
7205 				m_adj(m, offset);
7206 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
7207 			}
7208  			break;
7209 		}
7210 
7211 		case IWX_TX_CMD:
7212 			iwx_rx_tx_cmd(sc, pkt, data);
7213 			break;
7214 
7215 		case IWX_MISSED_BEACONS_NOTIFICATION:
7216 			iwx_rx_bmiss(sc, pkt, data);
7217 			break;
7218 
7219 		case IWX_MFUART_LOAD_NOTIFICATION:
7220 			break;
7221 
7222 		case IWX_ALIVE: {
7223 			struct iwx_alive_resp_v4 *resp4;
7224 
7225 			DPRINTF(("%s: firmware alive\n", __func__));
7226 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
7227 				SYNC_RESP_STRUCT(resp4, pkt);
7228 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
7229 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
7230 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
7231 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
7232 				sc->sc_uc.uc_log_event_table = le32toh(
7233 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
7234 				sc->sched_base = le32toh(
7235 				    resp4->lmac_data[0].dbg_ptrs.scd_base_ptr);
7236 				sc->sc_uc.uc_umac_error_event_table = le32toh(
7237 				    resp4->umac_data.dbg_ptrs.error_info_addr);
7238 				if (resp4->status == IWX_ALIVE_STATUS_OK)
7239 					sc->sc_uc.uc_ok = 1;
7240 				else
7241 					sc->sc_uc.uc_ok = 0;
7242 			}
7243 
7244 			sc->sc_uc.uc_intr = 1;
7245 			wakeup(&sc->sc_uc);
7246 			break;
7247 		}
7248 
7249 		case IWX_STATISTICS_NOTIFICATION: {
7250 			struct iwx_notif_statistics *stats;
7251 			SYNC_RESP_STRUCT(stats, pkt);
7252 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7253 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
7254 			break;
7255 		}
7256 
7257 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
7258 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
7259 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
7260 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
7261 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
7262 			break;
7263 
7264 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
7265 		    IWX_CT_KILL_NOTIFICATION): {
7266 			struct iwx_ct_kill_notif *notif;
7267 			SYNC_RESP_STRUCT(notif, pkt);
7268 			printf("%s: device at critical temperature (%u degC), "
7269 			    "stopping device\n",
7270 			    DEVNAME(sc), le16toh(notif->temperature));
7271 			sc->sc_flags |= IWX_FLAG_HW_ERR;
7272 			task_add(systq, &sc->init_task);
7273 			break;
7274 		}
7275 
7276 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
7277 		    IWX_NVM_GET_INFO):
7278 		case IWX_ADD_STA_KEY:
7279 		case IWX_PHY_CONFIGURATION_CMD:
7280 		case IWX_TX_ANT_CONFIGURATION_CMD:
7281 		case IWX_ADD_STA:
7282 		case IWX_MAC_CONTEXT_CMD:
7283 		case IWX_REPLY_SF_CFG_CMD:
7284 		case IWX_POWER_TABLE_CMD:
7285 		case IWX_LTR_CONFIG:
7286 		case IWX_PHY_CONTEXT_CMD:
7287 		case IWX_BINDING_CONTEXT_CMD:
7288 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
7289 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
7290 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
7291 		case IWX_REPLY_BEACON_FILTERING_CMD:
7292 		case IWX_MAC_PM_POWER_TABLE:
7293 		case IWX_TIME_QUOTA_CMD:
7294 		case IWX_REMOVE_STA:
7295 		case IWX_TXPATH_FLUSH:
7296 		case IWX_BT_CONFIG:
7297 		case IWX_MCC_UPDATE_CMD:
7298 		case IWX_TIME_EVENT_CMD:
7299 		case IWX_STATISTICS_CMD:
7300 		case IWX_SCD_QUEUE_CFG: {
7301 			size_t pkt_len;
7302 
7303 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
7304 				break;
7305 
7306 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7307 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7308 
7309 			pkt_len = sizeof(pkt->len_n_flags) +
7310 			    iwx_rx_packet_len(pkt);
7311 
7312 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
7313 			    pkt_len < sizeof(*pkt) ||
7314 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
7315 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
7316 				    sc->sc_cmd_resp_len[idx]);
7317 				sc->sc_cmd_resp_pkt[idx] = NULL;
7318 				break;
7319 			}
7320 
7321 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
7322 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7323 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
7324 			break;
7325 		}
7326 
7327 		case IWX_INIT_COMPLETE_NOTIF:
7328 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
7329 			wakeup(&sc->sc_init_complete);
7330 			break;
7331 
7332 		case IWX_SCAN_COMPLETE_UMAC: {
7333 			struct iwx_umac_scan_complete *notif;
7334 			SYNC_RESP_STRUCT(notif, pkt);
7335 			iwx_endscan(sc);
7336 			break;
7337 		}
7338 
7339 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
7340 			struct iwx_umac_scan_iter_complete_notif *notif;
7341 			SYNC_RESP_STRUCT(notif, pkt);
7342 			iwx_endscan(sc);
7343 			break;
7344 		}
7345 
7346 		case IWX_MCC_CHUB_UPDATE_CMD: {
7347 			struct iwx_mcc_chub_notif *notif;
7348 			SYNC_RESP_STRUCT(notif, pkt);
7349 			iwx_mcc_update(sc, notif);
7350 			break;
7351 		}
7352 
7353 		case IWX_REPLY_ERROR: {
7354 			struct iwx_error_resp *resp;
7355 			SYNC_RESP_STRUCT(resp, pkt);
7356 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
7357 				DEVNAME(sc), le32toh(resp->error_type),
7358 				resp->cmd_id);
7359 			break;
7360 		}
7361 
7362 		case IWX_TIME_EVENT_NOTIFICATION: {
7363 			struct iwx_time_event_notif *notif;
7364 			uint32_t action;
7365 			SYNC_RESP_STRUCT(notif, pkt);
7366 
7367 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
7368 				break;
7369 			action = le32toh(notif->action);
7370 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
7371 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
7372 			break;
7373 		}
7374 
7375 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
7376 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
7377 		    break;
7378 
7379 		/*
7380 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
7381 		 * messages. Just ignore them for now.
7382 		 */
7383 		case IWX_DEBUG_LOG_MSG:
7384 			break;
7385 
7386 		case IWX_MCAST_FILTER_CMD:
7387 			break;
7388 
7389 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
7390 			break;
7391 
7392 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
7393 			break;
7394 
7395 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
7396 			break;
7397 
7398 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
7399 		    IWX_NVM_ACCESS_COMPLETE):
7400 			break;
7401 
7402 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
7403 			break; /* happens in monitor mode; ignore for now */
7404 
7405 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
7406 			break;
7407 
7408 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
7409 		    IWX_TLC_MNG_UPDATE_NOTIF): {
7410 			struct iwx_tlc_update_notif *notif;
7411 			SYNC_RESP_STRUCT(notif, pkt);
7412 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
7413 				iwx_rs_update(sc, notif);
7414 			break;
7415 		}
7416 
7417 		default:
7418 			handled = 0;
7419 			printf("%s: unhandled firmware response 0x%x/0x%x "
7420 			    "rx ring %d[%d]\n",
7421 			    DEVNAME(sc), code, pkt->len_n_flags,
7422 			    (qid & ~0x80), idx);
7423 			break;
7424 		}
7425 
7426 		/*
7427 		 * uCode sets bit 0x80 when it originates the notification,
7428 		 * i.e. when the notification is not a direct response to a
7429 		 * command sent by the driver.
7430 		 * For example, uCode issues IWX_REPLY_RX when it sends a
7431 		 * received frame to the driver.
7432 		 */
7433 		if (handled && !(qid & (1 << 7))) {
7434 			iwx_cmd_done(sc, qid, idx, code);
7435 		}
7436 
7437 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
7438 	}
7439 
7440 	if (m0 && m0 != data->m)
7441 		m_freem(m0);
7442 }
7443 
7444 void
7445 iwx_notif_intr(struct iwx_softc *sc)
7446 {
7447 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
7448 	uint16_t hw;
7449 
7450 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7451 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7452 
7453 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7454 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
7455 	while (sc->rxq.cur != hw) {
7456 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7457 		iwx_rx_pkt(sc, data, &ml);
7458 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
7459 	}
7460 	if_input(&sc->sc_ic.ic_if, &ml);
7461 
7462 	/*
7463 	 * Tell the firmware what we have processed.
7464 	 * Seems like the hardware gets upset unless we align the write by 8??
7465 	 */
7466 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
7467 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
7468 }
7469 
7470 int
7471 iwx_intr(void *arg)
7472 {
7473 	struct iwx_softc *sc = arg;
7474 	int handled = 0;
7475 	int r1, r2, rv = 0;
7476 
7477 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
7478 
7479 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
7480 		uint32_t *ict = sc->ict_dma.vaddr;
7481 		int tmp;
7482 
7483 		tmp = htole32(ict[sc->ict_cur]);
7484 		if (!tmp)
7485 			goto out_ena;
7486 
7487 		/*
7488 		 * ok, there was something.  keep plowing until we have all.
7489 		 */
7490 		r1 = r2 = 0;
7491 		while (tmp) {
7492 			r1 |= tmp;
7493 			ict[sc->ict_cur] = 0;
7494 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
7495 			tmp = htole32(ict[sc->ict_cur]);
7496 		}
7497 
7498 		/* this is where the fun begins.  don't ask */
7499 		if (r1 == 0xffffffff)
7500 			r1 = 0;
7501 
7502 		/* i am not expected to understand this */
7503 		if (r1 & 0xc0000)
7504 			r1 |= 0x8000;
7505 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7506 	} else {
7507 		r1 = IWX_READ(sc, IWX_CSR_INT);
7508 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7509 			goto out;
7510 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
7511 	}
7512 	if (r1 == 0 && r2 == 0) {
7513 		goto out_ena;
7514 	}
7515 
7516 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
7517 
7518 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
7519 		int i;
7520 
7521 		/* Firmware has now configured the RFH. */
7522 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
7523 			iwx_update_rx_desc(sc, &sc->rxq, i);
7524 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
7525 	}
7526 
7527 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
7528 
7529 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
7530 		handled |= IWX_CSR_INT_BIT_RF_KILL;
7531 		iwx_check_rfkill(sc);
7532 		task_add(systq, &sc->init_task);
7533 		rv = 1;
7534 		goto out_ena;
7535 	}
7536 
7537 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
7538 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
7539 		int i;
7540 
7541 		iwx_nic_error(sc);
7542 
7543 		/* Dump driver status (TX and RX rings) while we're here. */
7544 		printf("driver status:\n");
7545 		for (i = 0; i < IWX_MAX_QUEUES; i++) {
7546 			struct iwx_tx_ring *ring = &sc->txq[i];
7547 			printf("  tx ring %2d: qid=%-2d cur=%-3d "
7548 			    "queued=%-3d\n",
7549 			    i, ring->qid, ring->cur, ring->queued);
7550 		}
7551 		printf("  rx ring: cur=%d\n", sc->rxq.cur);
7552 		printf("  802.11 state %s\n",
7553 		    ieee80211_state_name[sc->sc_ic.ic_state]);
7554 #endif
7555 
7556 		printf("%s: fatal firmware error\n", DEVNAME(sc));
7557 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7558 			task_add(systq, &sc->init_task);
7559 		rv = 1;
7560 		goto out;
7561 
7562 	}
7563 
7564 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
7565 		handled |= IWX_CSR_INT_BIT_HW_ERR;
7566 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
7567 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
7568 			sc->sc_flags |= IWX_FLAG_HW_ERR;
7569 			task_add(systq, &sc->init_task);
7570 		}
7571 		rv = 1;
7572 		goto out;
7573 	}
7574 
7575 	/* firmware chunk loaded */
7576 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
7577 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
7578 		handled |= IWX_CSR_INT_BIT_FH_TX;
7579 
7580 		sc->sc_fw_chunk_done = 1;
7581 		wakeup(&sc->sc_fw);
7582 	}
7583 
7584 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
7585 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
7586 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
7587 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
7588 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
7589 		}
7590 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
7591 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
7592 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
7593 		}
7594 
7595 		/* Disable periodic interrupt; we use it as just a one-shot. */
7596 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
7597 
7598 		/*
7599 		 * Enable periodic interrupt in 8 msec only if we received
7600 		 * real RX interrupt (instead of just periodic int), to catch
7601 		 * any dangling Rx interrupt.  If it was just the periodic
7602 		 * interrupt, there was no dangling Rx activity, and no need
7603 		 * to extend the periodic interrupt; one-shot is enough.
7604 		 */
7605 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
7606 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
7607 			    IWX_CSR_INT_PERIODIC_ENA);
7608 
7609 		iwx_notif_intr(sc);
7610 	}
7611 
7612 	rv = 1;
7613 
7614  out_ena:
7615 	iwx_restore_interrupts(sc);
7616  out:
7617 	return rv;
7618 }
7619 
7620 int
7621 iwx_intr_msix(void *arg)
7622 {
7623 	struct iwx_softc *sc = arg;
7624 	uint32_t inta_fh, inta_hw;
7625 	int vector = 0;
7626 
7627 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
7628 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
7629 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
7630 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
7631 	inta_fh &= sc->sc_fh_mask;
7632 	inta_hw &= sc->sc_hw_mask;
7633 
7634 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
7635 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
7636 		iwx_notif_intr(sc);
7637 	}
7638 
7639 	/* firmware chunk loaded */
7640 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
7641 		sc->sc_fw_chunk_done = 1;
7642 		wakeup(&sc->sc_fw);
7643 	}
7644 
7645 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
7646 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
7647 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
7648 #if 1 /* usually #ifdef IWX_DEBUG but always enabled for now */
7649 		int i;
7650 
7651 		iwx_nic_error(sc);
7652 
7653 		/* Dump driver status (TX and RX rings) while we're here. */
7654 		printf("driver status:\n");
7655 		for (i = 0; i < IWX_MAX_QUEUES; i++) {
7656 			struct iwx_tx_ring *ring = &sc->txq[i];
7657 			printf("  tx ring %2d: qid=%-2d cur=%-3d "
7658 			    "queued=%-3d\n",
7659 			    i, ring->qid, ring->cur, ring->queued);
7660 		}
7661 		printf("  rx ring: cur=%d\n", sc->rxq.cur);
7662 		printf("  802.11 state %s\n",
7663 		    ieee80211_state_name[sc->sc_ic.ic_state]);
7664 #endif
7665 
7666 		printf("%s: fatal firmware error\n", DEVNAME(sc));
7667 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7668 			task_add(systq, &sc->init_task);
7669 		return 1;
7670 	}
7671 
7672 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
7673 		iwx_check_rfkill(sc);
7674 		task_add(systq, &sc->init_task);
7675 	}
7676 
7677 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
7678 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
7679 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
7680 			sc->sc_flags |= IWX_FLAG_HW_ERR;
7681 			task_add(systq, &sc->init_task);
7682 		}
7683 		return 1;
7684 	}
7685 
7686 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
7687 		int i;
7688 
7689 		/* Firmware has now configured the RFH. */
7690 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
7691 			iwx_update_rx_desc(sc, &sc->rxq, i);
7692 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
7693 	}
7694 
7695 	/*
7696 	 * Before sending the interrupt the HW disables it to prevent
7697 	 * a nested interrupt. This is done by writing 1 to the corresponding
7698 	 * bit in the mask register. After handling the interrupt, it should be
7699 	 * re-enabled by clearing this bit. This register is defined as
7700 	 * write 1 clear (W1C) register, meaning that it's being clear
7701 	 * by writing 1 to the bit.
7702 	 */
7703 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
7704 	return 1;
7705 }
7706 
7707 typedef void *iwx_match_t;
7708 
7709 static const struct pci_matchid iwx_devices[] = {
7710 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
7711 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
7712 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
7713 };
7714 
7715 static const struct pci_matchid iwx_subsystem_id_ax201[] = {
7716 	{ PCI_VENDOR_INTEL,	0x0070 },
7717 	{ PCI_VENDOR_INTEL,	0x0074 },
7718 	{ PCI_VENDOR_INTEL,	0x0078 },
7719 	{ PCI_VENDOR_INTEL,	0x007c },
7720 	{ PCI_VENDOR_INTEL,	0x0310 },
7721 	{ PCI_VENDOR_INTEL,	0x2074 },
7722 	{ PCI_VENDOR_INTEL,	0x4070 },
7723 	/* TODO: There are more ax201 devices with "main" product ID 0x06f0 */
7724 };
7725 
7726 int
7727 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
7728 {
7729 	struct pci_attach_args *pa = aux;
7730 	pcireg_t subid;
7731 	pci_vendor_id_t svid;
7732 	pci_product_id_t spid;
7733 	int i;
7734 
7735 	if (!pci_matchbyid(pa, iwx_devices, nitems(iwx_devices)))
7736 		return 0;
7737 
7738 	/*
7739 	 * Some PCI product IDs are shared among devices which use distinct
7740 	 * chips or firmware. We need to match the subsystem ID as well to
7741 	 * ensure that we have in fact found a supported device.
7742 	 */
7743 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
7744 	svid = PCI_VENDOR(subid);
7745 	spid = PCI_PRODUCT(subid);
7746 
7747 	switch (PCI_PRODUCT(pa->pa_id)) {
7748 	case PCI_PRODUCT_INTEL_WL_22500_1: /* AX200 */
7749 		return 1; /* match any device */
7750 	case PCI_PRODUCT_INTEL_WL_22500_2: /* AX201 */
7751 	case PCI_PRODUCT_INTEL_WL_22500_3: /* AX201 */
7752 		for (i = 0; i < nitems(iwx_subsystem_id_ax201); i++) {
7753 			if (svid == iwx_subsystem_id_ax201[i].pm_vid &&
7754 			    spid == iwx_subsystem_id_ax201[i].pm_pid)
7755 				return 1;
7756 
7757 		}
7758 		break;
7759 	default:
7760 		break;
7761 	}
7762 
7763 	return 0;
7764 }
7765 
7766 int
7767 iwx_preinit(struct iwx_softc *sc)
7768 {
7769 	struct ieee80211com *ic = &sc->sc_ic;
7770 	struct ifnet *ifp = IC2IFP(ic);
7771 	int err;
7772 	static int attached;
7773 
7774 	err = iwx_prepare_card_hw(sc);
7775 	if (err) {
7776 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7777 		return err;
7778 	}
7779 
7780 	if (attached) {
7781 		/* Update MAC in case the upper layers changed it. */
7782 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
7783 		    ((struct arpcom *)ifp)->ac_enaddr);
7784 		return 0;
7785 	}
7786 
7787 	err = iwx_start_hw(sc);
7788 	if (err) {
7789 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7790 		return err;
7791 	}
7792 
7793 	err = iwx_run_init_mvm_ucode(sc, 1);
7794 	iwx_stop_device(sc);
7795 	if (err)
7796 		return err;
7797 
7798 	/* Print version info and MAC address on first successful fw load. */
7799 	attached = 1;
7800 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
7801 	    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
7802 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
7803 
7804 	if (sc->sc_nvm.sku_cap_11n_enable)
7805 		iwx_setup_ht_rates(sc);
7806 
7807 	/* not all hardware can do 5GHz band */
7808 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
7809 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
7810 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
7811 
7812 	/* Configure channel information obtained from firmware. */
7813 	ieee80211_channel_init(ifp);
7814 
7815 	/* Configure MAC address. */
7816 	err = if_setlladdr(ifp, ic->ic_myaddr);
7817 	if (err)
7818 		printf("%s: could not set MAC address (error %d)\n",
7819 		    DEVNAME(sc), err);
7820 
7821 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
7822 
7823 	return 0;
7824 }
7825 
7826 void
7827 iwx_attach_hook(struct device *self)
7828 {
7829 	struct iwx_softc *sc = (void *)self;
7830 
7831 	KASSERT(!cold);
7832 
7833 	iwx_preinit(sc);
7834 }
7835 
7836 void
7837 iwx_attach(struct device *parent, struct device *self, void *aux)
7838 {
7839 	struct iwx_softc *sc = (void *)self;
7840 	struct pci_attach_args *pa = aux;
7841 	pci_intr_handle_t ih;
7842 	pcireg_t reg, memtype;
7843 	struct ieee80211com *ic = &sc->sc_ic;
7844 	struct ifnet *ifp = &ic->ic_if;
7845 	const char *intrstr;
7846 	int err;
7847 	int txq_i, i;
7848 
7849 	sc->sc_pct = pa->pa_pc;
7850 	sc->sc_pcitag = pa->pa_tag;
7851 	sc->sc_dmat = pa->pa_dmat;
7852 
7853 	rw_init(&sc->ioctl_rwl, "iwxioctl");
7854 
7855 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7856 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7857 	if (err == 0) {
7858 		printf("%s: PCIe capability structure not found!\n",
7859 		    DEVNAME(sc));
7860 		return;
7861 	}
7862 
7863 	/* Clear device-specific "PCI retry timeout" register (41h). */
7864 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7865 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7866 
7867 	/* Enable bus-mastering and hardware bug workaround. */
7868 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7869 	reg |= PCI_COMMAND_MASTER_ENABLE;
7870 	/* if !MSI */
7871 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
7872 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
7873 	}
7874 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7875 
7876 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7877 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7878 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
7879 	if (err) {
7880 		printf("%s: can't map mem space\n", DEVNAME(sc));
7881 		return;
7882 	}
7883 
7884 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
7885 		sc->sc_msix = 1;
7886 	} else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
7887 		printf("%s: can't map interrupt\n", DEVNAME(sc));
7888 		return;
7889 	}
7890 
7891 	intrstr = pci_intr_string(sc->sc_pct, ih);
7892 	if (sc->sc_msix)
7893 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
7894 		    iwx_intr_msix, sc, DEVNAME(sc));
7895 	else
7896 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
7897 		    iwx_intr, sc, DEVNAME(sc));
7898 
7899 	if (sc->sc_ih == NULL) {
7900 		printf("\n");
7901 		printf("%s: can't establish interrupt", DEVNAME(sc));
7902 		if (intrstr != NULL)
7903 			printf(" at %s", intrstr);
7904 		printf("\n");
7905 		return;
7906 	}
7907 	printf(", %s\n", intrstr);
7908 
7909 	/* Clear pending interrupts. */
7910 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
7911 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
7912 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
7913 
7914 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
7915 
7916 	/*
7917 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7918 	 * changed, and now the revision step also includes bit 0-1 (no more
7919 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7920 	 * in the old format.
7921 	 */
7922 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7923 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7924 
7925 	switch (PCI_PRODUCT(pa->pa_id)) {
7926 	case PCI_PRODUCT_INTEL_WL_22500_1:
7927 		sc->sc_fwname = "iwx-cc-a0-48";
7928 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
7929 		sc->sc_fwdmasegsz = IWX_FWDMASEGSZ_8000;
7930 		sc->sc_integrated = 1;
7931 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
7932 		sc->sc_low_latency_xtal = 0;
7933 		sc->sc_xtal_latency = 0;
7934 		sc->sc_tx_with_siso_diversity = 0;
7935 		sc->sc_uhb_supported = 0;
7936 		break;
7937 	case PCI_PRODUCT_INTEL_WL_22500_2:
7938 	case PCI_PRODUCT_INTEL_WL_22500_3:
7939 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
7940 			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
7941 			return;
7942 		}
7943 
7944 		sc->sc_fwname = "iwx-QuZ-a0-hr-b0-48";
7945 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
7946 		sc->sc_fwdmasegsz = IWX_FWDMASEGSZ_8000;
7947 		sc->sc_integrated = 1;
7948 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
7949 		sc->sc_low_latency_xtal = 0;
7950 		sc->sc_xtal_latency = 5000;
7951 		sc->sc_tx_with_siso_diversity = 0;
7952 		sc->sc_uhb_supported = 0;
7953 		break;
7954 	default:
7955 		printf("%s: unknown adapter type\n", DEVNAME(sc));
7956 		return;
7957 	}
7958 
7959 	if (iwx_prepare_card_hw(sc) != 0) {
7960 		printf("%s: could not initialize hardware\n",
7961 		    DEVNAME(sc));
7962 		return;
7963 	}
7964 
7965 	/*
7966 	 * In order to recognize C step the driver should read the
7967 	 * chip version id located at the AUX bus MISC address.
7968 	 */
7969 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
7970 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7971 	DELAY(2);
7972 
7973 	err = iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
7974 			   IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7975 			   IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7976 			   25000);
7977 	if (!err) {
7978 		printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
7979 		return;
7980 	}
7981 
7982 	if (iwx_nic_lock(sc)) {
7983 		uint32_t hw_step = iwx_read_prph(sc, IWX_WFPM_CTRL_REG);
7984 		hw_step |= IWX_ENABLE_WFPM;
7985 		iwx_write_prph(sc, IWX_WFPM_CTRL_REG, hw_step);
7986 		hw_step = iwx_read_prph(sc, IWX_AUX_MISC_REG);
7987 		hw_step = (hw_step >> IWX_HW_STEP_LOCATION_BITS) & 0xF;
7988 		if (hw_step == 0x3)
7989 			sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7990 					(IWX_SILICON_C_STEP << 2);
7991 		iwx_nic_unlock(sc);
7992 	} else {
7993 		printf("%s: Failed to lock the nic\n", DEVNAME(sc));
7994 		return;
7995 	}
7996 
7997 	/* Allocate DMA memory for loading firmware. */
7998 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
7999 	    sizeof(struct iwx_context_info), 0);
8000 	if (err) {
8001 		printf("%s: could not allocate memory for loading firmware\n",
8002 		    DEVNAME(sc));
8003 		return;
8004 	}
8005 
8006 	/*
8007 	 * Allocate DMA memory for firmware transfers.
8008 	 * Must be aligned on a 16-byte boundary.
8009 	 */
8010 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
8011 	    sc->sc_fwdmasegsz, 16);
8012 	if (err) {
8013 		printf("%s: could not allocate memory for firmware transfers\n",
8014 		    DEVNAME(sc));
8015 		goto fail0;
8016 	}
8017 
8018 	/* Allocate interrupt cause table (ICT).*/
8019 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
8020 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
8021 	if (err) {
8022 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
8023 		goto fail1;
8024 	}
8025 
8026 	/* TX scheduler rings must be aligned on a 1KB boundary. */
8027 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
8028 	    nitems(sc->txq) * sizeof(struct iwx_agn_scd_bc_tbl), 1024);
8029 	if (err) {
8030 		printf("%s: could not allocate TX scheduler rings\n",
8031 		    DEVNAME(sc));
8032 		goto fail3;
8033 	}
8034 
8035 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
8036 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
8037 		if (err) {
8038 			printf("%s: could not allocate TX ring %d\n",
8039 			    DEVNAME(sc), txq_i);
8040 			goto fail4;
8041 		}
8042 	}
8043 
8044 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
8045 	if (err) {
8046 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
8047 		goto fail4;
8048 	}
8049 
8050 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
8051 	if (sc->sc_nswq == NULL)
8052 		goto fail4;
8053 
8054 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
8055 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
8056 	ic->ic_state = IEEE80211_S_INIT;
8057 
8058 	/* Set device capabilities. */
8059 	ic->ic_caps =
8060 	    IEEE80211_C_WEP |		/* WEP */
8061 	    IEEE80211_C_RSN |		/* WPA/RSN */
8062 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
8063 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
8064 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
8065 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
8066 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
8067 
8068 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8069 	ic->ic_htcaps |=
8070 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
8071 	ic->ic_htxcaps = 0;
8072 	ic->ic_txbfcaps = 0;
8073 	ic->ic_aselcaps = 0;
8074 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8075 
8076 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
8077 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8078 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8079 
8080 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
8081 		sc->sc_phyctxt[i].id = i;
8082 	}
8083 
8084 	/* IBSS channel undefined for now. */
8085 	ic->ic_ibss_chan = &ic->ic_channels[1];
8086 
8087 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
8088 
8089 	ifp->if_softc = sc;
8090 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8091 	ifp->if_ioctl = iwx_ioctl;
8092 	ifp->if_start = iwx_start;
8093 	ifp->if_watchdog = iwx_watchdog;
8094 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8095 
8096 	if_attach(ifp);
8097 	ieee80211_ifattach(ifp);
8098 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
8099 
8100 #if NBPFILTER > 0
8101 	iwx_radiotap_attach(sc);
8102 #endif
8103 	task_set(&sc->init_task, iwx_init_task, sc);
8104 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
8105 	task_set(&sc->ba_task, iwx_ba_task, sc);
8106 	task_set(&sc->htprot_task, iwx_htprot_task, sc);
8107 
8108 	ic->ic_node_alloc = iwx_node_alloc;
8109 	ic->ic_bgscan_start = iwx_bgscan;
8110 	ic->ic_set_key = iwx_set_key;
8111 	ic->ic_delete_key = iwx_delete_key;
8112 
8113 	/* Override 802.11 state transition machine. */
8114 	sc->sc_newstate = ic->ic_newstate;
8115 	ic->ic_newstate = iwx_newstate;
8116 	ic->ic_update_htprot = iwx_update_htprot;
8117 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
8118 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
8119 #ifdef notyet
8120 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
8121 	ic->ic_ampdu_tx_stop = iwx_ampdu_tx_stop;
8122 #endif
8123 	/*
8124 	 * We cannot read the MAC address without loading the
8125 	 * firmware from disk. Postpone until mountroot is done.
8126 	 */
8127 	config_mountroot(self, iwx_attach_hook);
8128 
8129 	return;
8130 
8131 fail4:	while (--txq_i >= 0)
8132 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
8133 	iwx_free_rx_ring(sc, &sc->rxq);
8134 	iwx_dma_contig_free(&sc->sched_dma);
8135 fail3:	if (sc->ict_dma.vaddr != NULL)
8136 		iwx_dma_contig_free(&sc->ict_dma);
8137 
8138 fail1:	iwx_dma_contig_free(&sc->fw_dma);
8139 fail0:	iwx_dma_contig_free(&sc->ctxt_info_dma);
8140 	return;
8141 }
8142 
8143 #if NBPFILTER > 0
8144 void
8145 iwx_radiotap_attach(struct iwx_softc *sc)
8146 {
8147 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
8148 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
8149 
8150 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8151 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8152 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
8153 
8154 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
8155 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8156 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
8157 }
8158 #endif
8159 
8160 void
8161 iwx_init_task(void *arg1)
8162 {
8163 	struct iwx_softc *sc = arg1;
8164 	struct ifnet *ifp = &sc->sc_ic.ic_if;
8165 	int s = splnet();
8166 	int generation = sc->sc_generation;
8167 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
8168 
8169 	rw_enter_write(&sc->ioctl_rwl);
8170 	if (generation != sc->sc_generation) {
8171 		rw_exit(&sc->ioctl_rwl);
8172 		splx(s);
8173 		return;
8174 	}
8175 
8176 	if (ifp->if_flags & IFF_RUNNING)
8177 		iwx_stop(ifp);
8178 	else
8179 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8180 
8181 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8182 		iwx_init(ifp);
8183 
8184 	rw_exit(&sc->ioctl_rwl);
8185 	splx(s);
8186 }
8187 
8188 int
8189 iwx_resume(struct iwx_softc *sc)
8190 {
8191 	pcireg_t reg;
8192 
8193 	/* Clear device-specific "PCI retry timeout" register (41h). */
8194 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8195 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8196 
8197 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
8198 	iwx_conf_msix_hw(sc, 0);
8199 
8200 	iwx_enable_rfkill_int(sc);
8201 	iwx_check_rfkill(sc);
8202 
8203 	return iwx_prepare_card_hw(sc);
8204 }
8205 
8206 int
8207 iwx_activate(struct device *self, int act)
8208 {
8209 	struct iwx_softc *sc = (struct iwx_softc *)self;
8210 	struct ifnet *ifp = &sc->sc_ic.ic_if;
8211 	int err = 0;
8212 
8213 	switch (act) {
8214 	case DVACT_QUIESCE:
8215 		if (ifp->if_flags & IFF_RUNNING) {
8216 			rw_enter_write(&sc->ioctl_rwl);
8217 			iwx_stop(ifp);
8218 			rw_exit(&sc->ioctl_rwl);
8219 		}
8220 		break;
8221 	case DVACT_RESUME:
8222 		err = iwx_resume(sc);
8223 		if (err)
8224 			printf("%s: could not initialize hardware\n",
8225 			    DEVNAME(sc));
8226 		break;
8227 	case DVACT_WAKEUP:
8228 		/* Hardware should be up at this point. */
8229 		if (iwx_set_hw_ready(sc))
8230 			task_add(systq, &sc->init_task);
8231 		break;
8232 	}
8233 
8234 	return 0;
8235 }
8236 
8237 struct cfdriver iwx_cd = {
8238 	NULL, "iwx", DV_IFNET
8239 };
8240 
8241 struct cfattach iwx_ca = {
8242 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
8243 	NULL, iwx_activate
8244 };
8245