xref: /openbsd/sys/dev/pci/if_iwm.c (revision 55cc5ba3)
1 /*	$OpenBSD: if_iwm.c,v 1.317 2020/12/12 11:48:53 jan Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_mira.h>
146 #include <net80211/ieee80211_radiotap.h>
147 
148 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
149 
150 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
151 
152 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
153 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
154 
155 #ifdef IWM_DEBUG
156 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
157 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
158 int iwm_debug = 1;
159 #else
160 #define DPRINTF(x)	do { ; } while (0)
161 #define DPRINTFN(n, x)	do { ; } while (0)
162 #endif
163 
164 #include <dev/pci/if_iwmreg.h>
165 #include <dev/pci/if_iwmvar.h>
166 
167 const uint8_t iwm_nvm_channels[] = {
168 	/* 2.4 GHz */
169 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
170 	/* 5 GHz */
171 	36, 40, 44 , 48, 52, 56, 60, 64,
172 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
173 	149, 153, 157, 161, 165
174 };
175 
176 const uint8_t iwm_nvm_channels_8000[] = {
177 	/* 2.4 GHz */
178 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
179 	/* 5 GHz */
180 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
181 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
182 	149, 153, 157, 161, 165, 169, 173, 177, 181
183 };
184 
185 #define IWM_NUM_2GHZ_CHANNELS	14
186 
187 const struct iwm_rate {
188 	uint16_t rate;
189 	uint8_t plcp;
190 	uint8_t ht_plcp;
191 } iwm_rates[] = {
192 		/* Legacy */		/* HT */
193 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
194 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
196 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
198 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
199 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
200 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
201 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
202 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
203 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
204 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
205 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
206 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
207 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
208 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
209 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
210 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
211 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
212 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
213 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
214 };
215 #define IWM_RIDX_CCK	0
216 #define IWM_RIDX_OFDM	4
217 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
218 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
219 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
220 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
221 
222 /* Convert an MCS index into an iwm_rates[] index. */
223 const int iwm_mcs2ridx[] = {
224 	IWM_RATE_MCS_0_INDEX,
225 	IWM_RATE_MCS_1_INDEX,
226 	IWM_RATE_MCS_2_INDEX,
227 	IWM_RATE_MCS_3_INDEX,
228 	IWM_RATE_MCS_4_INDEX,
229 	IWM_RATE_MCS_5_INDEX,
230 	IWM_RATE_MCS_6_INDEX,
231 	IWM_RATE_MCS_7_INDEX,
232 	IWM_RATE_MCS_8_INDEX,
233 	IWM_RATE_MCS_9_INDEX,
234 	IWM_RATE_MCS_10_INDEX,
235 	IWM_RATE_MCS_11_INDEX,
236 	IWM_RATE_MCS_12_INDEX,
237 	IWM_RATE_MCS_13_INDEX,
238 	IWM_RATE_MCS_14_INDEX,
239 	IWM_RATE_MCS_15_INDEX,
240 };
241 
242 struct iwm_nvm_section {
243 	uint16_t length;
244 	uint8_t *data;
245 };
246 
247 int	iwm_is_mimo_ht_plcp(uint8_t);
248 int	iwm_is_mimo_mcs(int);
249 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
250 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
251 	    uint8_t *, size_t);
252 int	iwm_set_default_calib(struct iwm_softc *, const void *);
253 void	iwm_fw_info_free(struct iwm_fw_info *);
254 int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
255 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
256 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
257 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
258 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
259 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
260 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
261 int	iwm_nic_lock(struct iwm_softc *);
262 void	iwm_nic_assert_locked(struct iwm_softc *);
263 void	iwm_nic_unlock(struct iwm_softc *);
264 void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
265 	    uint32_t);
266 void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
267 void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
268 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
269 	    bus_size_t);
270 void	iwm_dma_contig_free(struct iwm_dma_info *);
271 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 void	iwm_disable_rx_dma(struct iwm_softc *);
273 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
276 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 void	iwm_enable_rfkill_int(struct iwm_softc *);
279 int	iwm_check_rfkill(struct iwm_softc *);
280 void	iwm_enable_interrupts(struct iwm_softc *);
281 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
282 void	iwm_restore_interrupts(struct iwm_softc *);
283 void	iwm_disable_interrupts(struct iwm_softc *);
284 void	iwm_ict_reset(struct iwm_softc *);
285 int	iwm_set_hw_ready(struct iwm_softc *);
286 int	iwm_prepare_card_hw(struct iwm_softc *);
287 void	iwm_apm_config(struct iwm_softc *);
288 int	iwm_apm_init(struct iwm_softc *);
289 void	iwm_apm_stop(struct iwm_softc *);
290 int	iwm_allow_mcast(struct iwm_softc *);
291 void	iwm_init_msix_hw(struct iwm_softc *);
292 void	iwm_conf_msix_hw(struct iwm_softc *, int);
293 int	iwm_start_hw(struct iwm_softc *);
294 void	iwm_stop_device(struct iwm_softc *);
295 void	iwm_nic_config(struct iwm_softc *);
296 int	iwm_nic_rx_init(struct iwm_softc *);
297 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
298 int	iwm_nic_rx_mq_init(struct iwm_softc *);
299 int	iwm_nic_tx_init(struct iwm_softc *);
300 int	iwm_nic_init(struct iwm_softc *);
301 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
302 int	iwm_enable_txq(struct iwm_softc *, int, int, int);
303 int	iwm_post_alive(struct iwm_softc *);
304 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
305 	    uint16_t);
306 int	iwm_phy_db_set_section(struct iwm_softc *,
307 	    struct iwm_calib_res_notif_phy_db *);
308 int	iwm_is_valid_channel(uint16_t);
309 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
310 uint16_t iwm_channel_id_to_papd(uint16_t);
311 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
312 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
313 	    uint16_t *, uint16_t);
314 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
315 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
316 	    uint8_t);
317 int	iwm_send_phy_db_data(struct iwm_softc *);
318 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
319 	    uint32_t);
320 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
321 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
322 	    uint8_t *, uint16_t *);
323 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
324 	    uint16_t *, size_t);
325 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
326 	    const uint8_t *nvm_channels, int nchan);
327 int	iwm_mimo_enabled(struct iwm_softc *);
328 void	iwm_setup_ht_rates(struct iwm_softc *);
329 void	iwm_htprot_task(void *);
330 void	iwm_update_htprot(struct ieee80211com *, struct ieee80211_node *);
331 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
332 	    uint8_t);
333 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
334 	    uint8_t);
335 void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
336 	    uint16_t, uint16_t, int);
337 #ifdef notyet
338 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
339 	    uint8_t);
340 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
341 	    uint8_t);
342 #endif
343 void	iwm_ba_task(void *);
344 
345 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
346 	    const uint16_t *, const uint16_t *,
347 	    const uint16_t *, const uint16_t *,
348 	    const uint16_t *, int);
349 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
350 	    const uint16_t *, const uint16_t *);
351 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
352 int	iwm_nvm_init(struct iwm_softc *);
353 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
354 	    uint32_t);
355 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
356 	    uint32_t);
357 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
358 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
359 	    int , int *);
360 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
361 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
362 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
363 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
364 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
365 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
366 int	iwm_send_dqa_cmd(struct iwm_softc *);
367 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
368 int	iwm_config_ltr(struct iwm_softc *);
369 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
370 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
371 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
372 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
373 	    struct iwm_rx_data *);
374 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
375 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
376 	    struct ieee80211_node *);
377 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
378 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
379 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
380 	    struct iwm_node *, int, int);
381 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
382 	    struct iwm_rx_data *);
383 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
384 	    struct iwm_rx_data *);
385 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
386 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
387 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
388 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
389 	    struct ieee80211_channel *, uint8_t, uint8_t);
390 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
391 	    uint8_t, uint32_t, uint32_t);
392 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
393 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
394 	    const void *);
395 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
396 	    uint32_t *);
397 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
398 	    const void *, uint32_t *);
399 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
400 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
401 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
402 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
403 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
404 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
405 int	iwm_flush_tx_path(struct iwm_softc *, int);
406 void	iwm_led_enable(struct iwm_softc *);
407 void	iwm_led_disable(struct iwm_softc *);
408 int	iwm_led_is_enabled(struct iwm_softc *);
409 void	iwm_led_blink_timeout(void *);
410 void	iwm_led_blink_start(struct iwm_softc *);
411 void	iwm_led_blink_stop(struct iwm_softc *);
412 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
413 	    struct iwm_beacon_filter_cmd *);
414 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
415 	    struct iwm_beacon_filter_cmd *);
416 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
417 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
418 	    struct iwm_mac_power_cmd *);
419 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
420 int	iwm_power_update_device(struct iwm_softc *);
421 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
422 int	iwm_disable_beacon_filter(struct iwm_softc *);
423 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
424 int	iwm_add_aux_sta(struct iwm_softc *);
425 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
426 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
427 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
428 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
429 	    struct iwm_scan_channel_cfg_lmac *, int, int);
430 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
431 int	iwm_lmac_scan(struct iwm_softc *, int);
432 int	iwm_config_umac_scan(struct iwm_softc *);
433 int	iwm_umac_scan(struct iwm_softc *, int);
434 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
435 int	iwm_rval2ridx(int);
436 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
437 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
438 	    struct iwm_mac_ctx_cmd *, uint32_t);
439 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
440 	    struct iwm_mac_data_sta *, int);
441 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
442 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
443 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
444 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
445 int	iwm_scan(struct iwm_softc *);
446 int	iwm_bgscan(struct ieee80211com *);
447 int	iwm_umac_scan_abort(struct iwm_softc *);
448 int	iwm_lmac_scan_abort(struct iwm_softc *);
449 int	iwm_scan_abort(struct iwm_softc *);
450 int	iwm_auth(struct iwm_softc *);
451 int	iwm_deauth(struct iwm_softc *);
452 int	iwm_assoc(struct iwm_softc *);
453 int	iwm_disassoc(struct iwm_softc *);
454 int	iwm_run(struct iwm_softc *);
455 int	iwm_run_stop(struct iwm_softc *);
456 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
457 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
458 	    struct ieee80211_key *);
459 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
460 	    struct ieee80211_key *);
461 void	iwm_delete_key_v1(struct ieee80211com *,
462 	    struct ieee80211_node *, struct ieee80211_key *);
463 void	iwm_delete_key(struct ieee80211com *,
464 	    struct ieee80211_node *, struct ieee80211_key *);
465 void	iwm_calib_timeout(void *);
466 void	iwm_setrates(struct iwm_node *, int);
467 int	iwm_media_change(struct ifnet *);
468 void	iwm_newstate_task(void *);
469 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
470 void	iwm_endscan(struct iwm_softc *);
471 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
472 	    struct ieee80211_node *);
473 int	iwm_sf_config(struct iwm_softc *, int);
474 int	iwm_send_bt_init_conf(struct iwm_softc *);
475 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
476 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
477 void	iwm_free_fw_paging(struct iwm_softc *);
478 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
479 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
480 int	iwm_init_hw(struct iwm_softc *);
481 int	iwm_init(struct ifnet *);
482 void	iwm_start(struct ifnet *);
483 void	iwm_stop(struct ifnet *);
484 void	iwm_watchdog(struct ifnet *);
485 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
486 #ifdef IWM_DEBUG
487 const char *iwm_desc_lookup(uint32_t);
488 void	iwm_nic_error(struct iwm_softc *);
489 void	iwm_nic_umac_error(struct iwm_softc *);
490 #endif
491 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
492 	    struct mbuf_list *);
493 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
494 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
495 	    struct mbuf_list *);
496 void	iwm_notif_intr(struct iwm_softc *);
497 int	iwm_intr(void *);
498 int	iwm_intr_msix(void *);
499 int	iwm_match(struct device *, void *, void *);
500 int	iwm_preinit(struct iwm_softc *);
501 void	iwm_attach_hook(struct device *);
502 void	iwm_attach(struct device *, struct device *, void *);
503 void	iwm_init_task(void *);
504 int	iwm_activate(struct device *, int);
505 int	iwm_resume(struct iwm_softc *);
506 
507 #if NBPFILTER > 0
508 void	iwm_radiotap_attach(struct iwm_softc *);
509 #endif
510 
511 int
512 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
513 {
514 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
515 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
516 }
517 
518 int
519 iwm_is_mimo_mcs(int mcs)
520 {
521 	int ridx = iwm_mcs2ridx[mcs];
522 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
523 
524 }
525 
526 int
527 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
528 {
529 	struct iwm_fw_cscheme_list *l = (void *)data;
530 
531 	if (dlen < sizeof(*l) ||
532 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
533 		return EINVAL;
534 
535 	/* we don't actually store anything for now, always use s/w crypto */
536 
537 	return 0;
538 }
539 
540 int
541 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
542     uint8_t *data, size_t dlen)
543 {
544 	struct iwm_fw_sects *fws;
545 	struct iwm_fw_onesect *fwone;
546 
547 	if (type >= IWM_UCODE_TYPE_MAX)
548 		return EINVAL;
549 	if (dlen < sizeof(uint32_t))
550 		return EINVAL;
551 
552 	fws = &sc->sc_fw.fw_sects[type];
553 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
554 		return EINVAL;
555 
556 	fwone = &fws->fw_sect[fws->fw_count];
557 
558 	/* first 32bit are device load offset */
559 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
560 
561 	/* rest is data */
562 	fwone->fws_data = data + sizeof(uint32_t);
563 	fwone->fws_len = dlen - sizeof(uint32_t);
564 
565 	fws->fw_count++;
566 	fws->fw_totlen += fwone->fws_len;
567 
568 	return 0;
569 }
570 
571 #define IWM_DEFAULT_SCAN_CHANNELS	40
572 /* Newer firmware might support more channels. Raise this value if needed. */
573 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
574 
575 struct iwm_tlv_calib_data {
576 	uint32_t ucode_type;
577 	struct iwm_tlv_calib_ctrl calib;
578 } __packed;
579 
580 int
581 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
582 {
583 	const struct iwm_tlv_calib_data *def_calib = data;
584 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
585 
586 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
587 		return EINVAL;
588 
589 	sc->sc_default_calib[ucode_type].flow_trigger =
590 	    def_calib->calib.flow_trigger;
591 	sc->sc_default_calib[ucode_type].event_trigger =
592 	    def_calib->calib.event_trigger;
593 
594 	return 0;
595 }
596 
597 void
598 iwm_fw_info_free(struct iwm_fw_info *fw)
599 {
600 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
601 	fw->fw_rawdata = NULL;
602 	fw->fw_rawsize = 0;
603 	/* don't touch fw->fw_status */
604 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
605 }
606 
607 int
608 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
609 {
610 	struct iwm_fw_info *fw = &sc->sc_fw;
611 	struct iwm_tlv_ucode_header *uhdr;
612 	struct iwm_ucode_tlv tlv;
613 	uint32_t tlv_type;
614 	uint8_t *data;
615 	uint32_t usniffer_img;
616 	uint32_t paging_mem_size;
617 	int err;
618 	size_t len;
619 
620 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
621 	    ucode_type != IWM_UCODE_TYPE_INIT)
622 		return 0;
623 
624 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
625 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
626 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
627 
628 	if (fw->fw_rawdata != NULL)
629 		iwm_fw_info_free(fw);
630 
631 	err = loadfirmware(sc->sc_fwname,
632 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
633 	if (err) {
634 		printf("%s: could not read firmware %s (error %d)\n",
635 		    DEVNAME(sc), sc->sc_fwname, err);
636 		goto out;
637 	}
638 
639 	sc->sc_capaflags = 0;
640 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
641 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
642 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
643 
644 	uhdr = (void *)fw->fw_rawdata;
645 	if (*(uint32_t *)fw->fw_rawdata != 0
646 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
647 		printf("%s: invalid firmware %s\n",
648 		    DEVNAME(sc), sc->sc_fwname);
649 		err = EINVAL;
650 		goto out;
651 	}
652 
653 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
654 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
655 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
656 	    IWM_UCODE_API(le32toh(uhdr->ver)));
657 	data = uhdr->data;
658 	len = fw->fw_rawsize - sizeof(*uhdr);
659 
660 	while (len >= sizeof(tlv)) {
661 		size_t tlv_len;
662 		void *tlv_data;
663 
664 		memcpy(&tlv, data, sizeof(tlv));
665 		tlv_len = le32toh(tlv.length);
666 		tlv_type = le32toh(tlv.type);
667 
668 		len -= sizeof(tlv);
669 		data += sizeof(tlv);
670 		tlv_data = data;
671 
672 		if (len < tlv_len) {
673 			printf("%s: firmware too short: %zu bytes\n",
674 			    DEVNAME(sc), len);
675 			err = EINVAL;
676 			goto parse_out;
677 		}
678 
679 		switch (tlv_type) {
680 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
681 			if (tlv_len < sizeof(uint32_t)) {
682 				err = EINVAL;
683 				goto parse_out;
684 			}
685 			sc->sc_capa_max_probe_len
686 			    = le32toh(*(uint32_t *)tlv_data);
687 			if (sc->sc_capa_max_probe_len >
688 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
689 				err = EINVAL;
690 				goto parse_out;
691 			}
692 			break;
693 		case IWM_UCODE_TLV_PAN:
694 			if (tlv_len) {
695 				err = EINVAL;
696 				goto parse_out;
697 			}
698 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
699 			break;
700 		case IWM_UCODE_TLV_FLAGS:
701 			if (tlv_len < sizeof(uint32_t)) {
702 				err = EINVAL;
703 				goto parse_out;
704 			}
705 			/*
706 			 * Apparently there can be many flags, but Linux driver
707 			 * parses only the first one, and so do we.
708 			 *
709 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
710 			 * Intentional or a bug?  Observations from
711 			 * current firmware file:
712 			 *  1) TLV_PAN is parsed first
713 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
714 			 * ==> this resets TLV_PAN to itself... hnnnk
715 			 */
716 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
717 			break;
718 		case IWM_UCODE_TLV_CSCHEME:
719 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
720 			if (err)
721 				goto parse_out;
722 			break;
723 		case IWM_UCODE_TLV_NUM_OF_CPU: {
724 			uint32_t num_cpu;
725 			if (tlv_len != sizeof(uint32_t)) {
726 				err = EINVAL;
727 				goto parse_out;
728 			}
729 			num_cpu = le32toh(*(uint32_t *)tlv_data);
730 			if (num_cpu < 1 || num_cpu > 2) {
731 				err = EINVAL;
732 				goto parse_out;
733 			}
734 			break;
735 		}
736 		case IWM_UCODE_TLV_SEC_RT:
737 			err = iwm_firmware_store_section(sc,
738 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
739 			if (err)
740 				goto parse_out;
741 			break;
742 		case IWM_UCODE_TLV_SEC_INIT:
743 			err = iwm_firmware_store_section(sc,
744 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
745 			if (err)
746 				goto parse_out;
747 			break;
748 		case IWM_UCODE_TLV_SEC_WOWLAN:
749 			err = iwm_firmware_store_section(sc,
750 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
751 			if (err)
752 				goto parse_out;
753 			break;
754 		case IWM_UCODE_TLV_DEF_CALIB:
755 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
756 				err = EINVAL;
757 				goto parse_out;
758 			}
759 			err = iwm_set_default_calib(sc, tlv_data);
760 			if (err)
761 				goto parse_out;
762 			break;
763 		case IWM_UCODE_TLV_PHY_SKU:
764 			if (tlv_len != sizeof(uint32_t)) {
765 				err = EINVAL;
766 				goto parse_out;
767 			}
768 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
769 			break;
770 
771 		case IWM_UCODE_TLV_API_CHANGES_SET: {
772 			struct iwm_ucode_api *api;
773 			int idx, i;
774 			if (tlv_len != sizeof(*api)) {
775 				err = EINVAL;
776 				goto parse_out;
777 			}
778 			api = (struct iwm_ucode_api *)tlv_data;
779 			idx = le32toh(api->api_index);
780 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
781 				err = EINVAL;
782 				goto parse_out;
783 			}
784 			for (i = 0; i < 32; i++) {
785 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
786 					continue;
787 				setbit(sc->sc_ucode_api, i + (32 * idx));
788 			}
789 			break;
790 		}
791 
792 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
793 			struct iwm_ucode_capa *capa;
794 			int idx, i;
795 			if (tlv_len != sizeof(*capa)) {
796 				err = EINVAL;
797 				goto parse_out;
798 			}
799 			capa = (struct iwm_ucode_capa *)tlv_data;
800 			idx = le32toh(capa->api_index);
801 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
802 				goto parse_out;
803 			}
804 			for (i = 0; i < 32; i++) {
805 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
806 					continue;
807 				setbit(sc->sc_enabled_capa, i + (32 * idx));
808 			}
809 			break;
810 		}
811 
812 		case 48: /* undocumented TLV */
813 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
814 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
815 			/* ignore, not used by current driver */
816 			break;
817 
818 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
819 			err = iwm_firmware_store_section(sc,
820 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
821 			    tlv_len);
822 			if (err)
823 				goto parse_out;
824 			break;
825 
826 		case IWM_UCODE_TLV_PAGING:
827 			if (tlv_len != sizeof(uint32_t)) {
828 				err = EINVAL;
829 				goto parse_out;
830 			}
831 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
832 
833 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
834 			    DEVNAME(sc), paging_mem_size));
835 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
836 				printf("%s: Driver only supports up to %u"
837 				    " bytes for paging image (%u requested)\n",
838 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
839 				    paging_mem_size);
840 				err = EINVAL;
841 				goto out;
842 			}
843 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
844 				printf("%s: Paging: image isn't multiple of %u\n",
845 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
846 				err = EINVAL;
847 				goto out;
848 			}
849 
850 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
851 			    paging_mem_size;
852 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
853 			fw->fw_sects[usniffer_img].paging_mem_size =
854 			    paging_mem_size;
855 			break;
856 
857 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
858 			if (tlv_len != sizeof(uint32_t)) {
859 				err = EINVAL;
860 				goto parse_out;
861 			}
862 			sc->sc_capa_n_scan_channels =
863 			  le32toh(*(uint32_t *)tlv_data);
864 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
865 				err = ERANGE;
866 				goto parse_out;
867 			}
868 			break;
869 
870 		case IWM_UCODE_TLV_FW_VERSION:
871 			if (tlv_len != sizeof(uint32_t) * 3) {
872 				err = EINVAL;
873 				goto parse_out;
874 			}
875 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
876 			    "%u.%u.%u",
877 			    le32toh(((uint32_t *)tlv_data)[0]),
878 			    le32toh(((uint32_t *)tlv_data)[1]),
879 			    le32toh(((uint32_t *)tlv_data)[2]));
880 			break;
881 
882 		case IWM_UCODE_TLV_FW_DBG_DEST:
883 		case IWM_UCODE_TLV_FW_DBG_CONF:
884 			break;
885 
886 		case IWM_UCODE_TLV_FW_MEM_SEG:
887 			break;
888 
889 		default:
890 			err = EINVAL;
891 			goto parse_out;
892 		}
893 
894 		len -= roundup(tlv_len, 4);
895 		data += roundup(tlv_len, 4);
896 	}
897 
898 	KASSERT(err == 0);
899 
900  parse_out:
901 	if (err) {
902 		printf("%s: firmware parse error %d, "
903 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
904 	}
905 
906  out:
907 	if (err) {
908 		fw->fw_status = IWM_FW_STATUS_NONE;
909 		if (fw->fw_rawdata != NULL)
910 			iwm_fw_info_free(fw);
911 	} else
912 		fw->fw_status = IWM_FW_STATUS_DONE;
913 	wakeup(&sc->sc_fw);
914 
915 	return err;
916 }
917 
918 uint32_t
919 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
920 {
921 	iwm_nic_assert_locked(sc);
922 	IWM_WRITE(sc,
923 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
924 	IWM_BARRIER_READ_WRITE(sc);
925 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
926 }
927 
928 void
929 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
930 {
931 	iwm_nic_assert_locked(sc);
932 	IWM_WRITE(sc,
933 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
934 	IWM_BARRIER_WRITE(sc);
935 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
936 }
937 
938 void
939 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
940 {
941 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
942 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
943 }
944 
945 int
946 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
947 {
948 	int offs, err = 0;
949 	uint32_t *vals = buf;
950 
951 	if (iwm_nic_lock(sc)) {
952 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
953 		for (offs = 0; offs < dwords; offs++)
954 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
955 		iwm_nic_unlock(sc);
956 	} else {
957 		err = EBUSY;
958 	}
959 	return err;
960 }
961 
962 int
963 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
964 {
965 	int offs;
966 	const uint32_t *vals = buf;
967 
968 	if (iwm_nic_lock(sc)) {
969 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
970 		/* WADDR auto-increments */
971 		for (offs = 0; offs < dwords; offs++) {
972 			uint32_t val = vals ? vals[offs] : 0;
973 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
974 		}
975 		iwm_nic_unlock(sc);
976 	} else {
977 		return EBUSY;
978 	}
979 	return 0;
980 }
981 
982 int
983 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
984 {
985 	return iwm_write_mem(sc, addr, &val, 1);
986 }
987 
988 int
989 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
990     int timo)
991 {
992 	for (;;) {
993 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
994 			return 1;
995 		}
996 		if (timo < 10) {
997 			return 0;
998 		}
999 		timo -= 10;
1000 		DELAY(10);
1001 	}
1002 }
1003 
1004 int
1005 iwm_nic_lock(struct iwm_softc *sc)
1006 {
1007 	if (sc->sc_nic_locks > 0) {
1008 		iwm_nic_assert_locked(sc);
1009 		sc->sc_nic_locks++;
1010 		return 1; /* already locked */
1011 	}
1012 
1013 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1014 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1015 
1016 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1017 		DELAY(2);
1018 
1019 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1020 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1021 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1022 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1023 		sc->sc_nic_locks++;
1024 		return 1;
1025 	}
1026 
1027 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1028 	return 0;
1029 }
1030 
1031 void
1032 iwm_nic_assert_locked(struct iwm_softc *sc)
1033 {
1034 	uint32_t reg = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1035 	if ((reg & IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
1036 		panic("%s: mac clock not ready", DEVNAME(sc));
1037 	if (reg & IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
1038 		panic("%s: mac gone to sleep", DEVNAME(sc));
1039 	if (sc->sc_nic_locks <= 0)
1040 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1041 }
1042 
1043 void
1044 iwm_nic_unlock(struct iwm_softc *sc)
1045 {
1046 	if (sc->sc_nic_locks > 0) {
1047 		if (--sc->sc_nic_locks == 0)
1048 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1049 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1050 	} else
1051 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1052 }
1053 
1054 void
1055 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1056     uint32_t mask)
1057 {
1058 	uint32_t val;
1059 
1060 	/* XXX: no error path? */
1061 	if (iwm_nic_lock(sc)) {
1062 		val = iwm_read_prph(sc, reg) & mask;
1063 		val |= bits;
1064 		iwm_write_prph(sc, reg, val);
1065 		iwm_nic_unlock(sc);
1066 	}
1067 }
1068 
1069 void
1070 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1071 {
1072 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1073 }
1074 
1075 void
1076 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1077 {
1078 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1079 }
1080 
1081 int
1082 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1083     bus_size_t size, bus_size_t alignment)
1084 {
1085 	int nsegs, err;
1086 	caddr_t va;
1087 
1088 	dma->tag = tag;
1089 	dma->size = size;
1090 
1091 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1092 	    &dma->map);
1093 	if (err)
1094 		goto fail;
1095 
1096 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1097 	    BUS_DMA_NOWAIT);
1098 	if (err)
1099 		goto fail;
1100 
1101 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1102 	    BUS_DMA_NOWAIT);
1103 	if (err)
1104 		goto fail;
1105 	dma->vaddr = va;
1106 
1107 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1108 	    BUS_DMA_NOWAIT);
1109 	if (err)
1110 		goto fail;
1111 
1112 	memset(dma->vaddr, 0, size);
1113 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1114 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1115 
1116 	return 0;
1117 
1118 fail:	iwm_dma_contig_free(dma);
1119 	return err;
1120 }
1121 
1122 void
1123 iwm_dma_contig_free(struct iwm_dma_info *dma)
1124 {
1125 	if (dma->map != NULL) {
1126 		if (dma->vaddr != NULL) {
1127 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1128 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1129 			bus_dmamap_unload(dma->tag, dma->map);
1130 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1131 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1132 			dma->vaddr = NULL;
1133 		}
1134 		bus_dmamap_destroy(dma->tag, dma->map);
1135 		dma->map = NULL;
1136 	}
1137 }
1138 
1139 int
1140 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1141 {
1142 	bus_size_t size;
1143 	size_t descsz;
1144 	int count, i, err;
1145 
1146 	ring->cur = 0;
1147 
1148 	if (sc->sc_mqrx_supported) {
1149 		count = IWM_RX_MQ_RING_COUNT;
1150 		descsz = sizeof(uint64_t);
1151 	} else {
1152 		count = IWM_RX_RING_COUNT;
1153 		descsz = sizeof(uint32_t);
1154 	}
1155 
1156 	/* Allocate RX descriptors (256-byte aligned). */
1157 	size = count * descsz;
1158 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1159 	if (err) {
1160 		printf("%s: could not allocate RX ring DMA memory\n",
1161 		    DEVNAME(sc));
1162 		goto fail;
1163 	}
1164 	ring->desc = ring->free_desc_dma.vaddr;
1165 
1166 	/* Allocate RX status area (16-byte aligned). */
1167 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1168 	    sizeof(*ring->stat), 16);
1169 	if (err) {
1170 		printf("%s: could not allocate RX status DMA memory\n",
1171 		    DEVNAME(sc));
1172 		goto fail;
1173 	}
1174 	ring->stat = ring->stat_dma.vaddr;
1175 
1176 	if (sc->sc_mqrx_supported) {
1177 		size = count * sizeof(uint32_t);
1178 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1179 		    size, 256);
1180 		if (err) {
1181 			printf("%s: could not allocate RX ring DMA memory\n",
1182 			    DEVNAME(sc));
1183 			goto fail;
1184 		}
1185 	}
1186 
1187 	for (i = 0; i < count; i++) {
1188 		struct iwm_rx_data *data = &ring->data[i];
1189 
1190 		memset(data, 0, sizeof(*data));
1191 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1192 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1193 		    &data->map);
1194 		if (err) {
1195 			printf("%s: could not create RX buf DMA map\n",
1196 			    DEVNAME(sc));
1197 			goto fail;
1198 		}
1199 
1200 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1201 		if (err)
1202 			goto fail;
1203 	}
1204 	return 0;
1205 
1206 fail:	iwm_free_rx_ring(sc, ring);
1207 	return err;
1208 }
1209 
1210 void
1211 iwm_disable_rx_dma(struct iwm_softc *sc)
1212 {
1213 	int ntries;
1214 
1215 	if (iwm_nic_lock(sc)) {
1216 		if (sc->sc_mqrx_supported) {
1217 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1218 			for (ntries = 0; ntries < 1000; ntries++) {
1219 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1220 				    IWM_RXF_DMA_IDLE)
1221 					break;
1222 				DELAY(10);
1223 			}
1224 		} else {
1225 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1226 			for (ntries = 0; ntries < 1000; ntries++) {
1227 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1228 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1229 					break;
1230 				DELAY(10);
1231 			}
1232 		}
1233 		iwm_nic_unlock(sc);
1234 	}
1235 }
1236 
1237 void
1238 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1239 {
1240 	ring->cur = 0;
1241 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1242 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1243 	memset(ring->stat, 0, sizeof(*ring->stat));
1244 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1245 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1246 
1247 }
1248 
1249 void
1250 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1251 {
1252 	int count, i;
1253 
1254 	iwm_dma_contig_free(&ring->free_desc_dma);
1255 	iwm_dma_contig_free(&ring->stat_dma);
1256 	iwm_dma_contig_free(&ring->used_desc_dma);
1257 
1258 	if (sc->sc_mqrx_supported)
1259 		count = IWM_RX_MQ_RING_COUNT;
1260 	else
1261 		count = IWM_RX_RING_COUNT;
1262 
1263 	for (i = 0; i < count; i++) {
1264 		struct iwm_rx_data *data = &ring->data[i];
1265 
1266 		if (data->m != NULL) {
1267 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1268 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1269 			bus_dmamap_unload(sc->sc_dmat, data->map);
1270 			m_freem(data->m);
1271 			data->m = NULL;
1272 		}
1273 		if (data->map != NULL)
1274 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1275 	}
1276 }
1277 
1278 int
1279 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1280 {
1281 	bus_addr_t paddr;
1282 	bus_size_t size;
1283 	int i, err;
1284 
1285 	ring->qid = qid;
1286 	ring->queued = 0;
1287 	ring->cur = 0;
1288 	ring->tail = 0;
1289 
1290 	/* Allocate TX descriptors (256-byte aligned). */
1291 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1292 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1293 	if (err) {
1294 		printf("%s: could not allocate TX ring DMA memory\n",
1295 		    DEVNAME(sc));
1296 		goto fail;
1297 	}
1298 	ring->desc = ring->desc_dma.vaddr;
1299 
1300 	/*
1301 	 * There is no need to allocate DMA buffers for unused rings.
1302 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1303 	 * than we currently need.
1304 	 *
1305 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1306 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1307 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1308 	 * in order to provide one queue per EDCA category.
1309 	 *
1310 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd).
1311 	 *
1312 	 * Tx aggregation will require additional queues (one queue per TID
1313 	 * for which aggregation is enabled) but we do not implement this yet.
1314 	 *
1315 	 * Unfortunately, we cannot tell if DQA will be used until the
1316 	 * firmware gets loaded later, so just allocate sufficient rings
1317 	 * in order to satisfy both cases.
1318 	 */
1319 	if (qid > IWM_CMD_QUEUE)
1320 		return 0;
1321 
1322 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1323 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1324 	if (err) {
1325 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1326 		goto fail;
1327 	}
1328 	ring->cmd = ring->cmd_dma.vaddr;
1329 
1330 	paddr = ring->cmd_dma.paddr;
1331 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1332 		struct iwm_tx_data *data = &ring->data[i];
1333 		size_t mapsize;
1334 
1335 		data->cmd_paddr = paddr;
1336 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1337 		    + offsetof(struct iwm_tx_cmd, scratch);
1338 		paddr += sizeof(struct iwm_device_cmd);
1339 
1340 		/* FW commands may require more mapped space than packets. */
1341 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1342 			mapsize = (sizeof(struct iwm_cmd_header) +
1343 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1344 		else
1345 			mapsize = MCLBYTES;
1346 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1347 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1348 		    &data->map);
1349 		if (err) {
1350 			printf("%s: could not create TX buf DMA map\n",
1351 			    DEVNAME(sc));
1352 			goto fail;
1353 		}
1354 	}
1355 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1356 	return 0;
1357 
1358 fail:	iwm_free_tx_ring(sc, ring);
1359 	return err;
1360 }
1361 
1362 void
1363 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1364 {
1365 	int i;
1366 
1367 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1368 		struct iwm_tx_data *data = &ring->data[i];
1369 
1370 		if (data->m != NULL) {
1371 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1372 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1373 			bus_dmamap_unload(sc->sc_dmat, data->map);
1374 			m_freem(data->m);
1375 			data->m = NULL;
1376 		}
1377 	}
1378 	/* Clear TX descriptors. */
1379 	memset(ring->desc, 0, ring->desc_dma.size);
1380 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1381 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1382 	sc->qfullmsk &= ~(1 << ring->qid);
1383 	/* 7000 family NICs are locked while commands are in progress. */
1384 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1385 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1386 			iwm_nic_unlock(sc);
1387 	}
1388 	ring->queued = 0;
1389 	ring->cur = 0;
1390 	ring->tail = 0;
1391 }
1392 
1393 void
1394 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1395 {
1396 	int i;
1397 
1398 	iwm_dma_contig_free(&ring->desc_dma);
1399 	iwm_dma_contig_free(&ring->cmd_dma);
1400 
1401 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1402 		struct iwm_tx_data *data = &ring->data[i];
1403 
1404 		if (data->m != NULL) {
1405 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1406 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1407 			bus_dmamap_unload(sc->sc_dmat, data->map);
1408 			m_freem(data->m);
1409 			data->m = NULL;
1410 		}
1411 		if (data->map != NULL)
1412 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1413 	}
1414 }
1415 
1416 void
1417 iwm_enable_rfkill_int(struct iwm_softc *sc)
1418 {
1419 	if (!sc->sc_msix) {
1420 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1421 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1422 	} else {
1423 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1424 		    sc->sc_fh_init_mask);
1425 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1426 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1427 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1428 	}
1429 
1430 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1431 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1432 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1433 }
1434 
1435 int
1436 iwm_check_rfkill(struct iwm_softc *sc)
1437 {
1438 	uint32_t v;
1439 	int s;
1440 	int rv;
1441 
1442 	s = splnet();
1443 
1444 	/*
1445 	 * "documentation" is not really helpful here:
1446 	 *  27:	HW_RF_KILL_SW
1447 	 *	Indicates state of (platform's) hardware RF-Kill switch
1448 	 *
1449 	 * But apparently when it's off, it's on ...
1450 	 */
1451 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1452 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1453 	if (rv) {
1454 		sc->sc_flags |= IWM_FLAG_RFKILL;
1455 	} else {
1456 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1457 	}
1458 
1459 	splx(s);
1460 	return rv;
1461 }
1462 
1463 void
1464 iwm_enable_interrupts(struct iwm_softc *sc)
1465 {
1466 	if (!sc->sc_msix) {
1467 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1468 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1469 	} else {
1470 		/*
1471 		 * fh/hw_mask keeps all the unmasked causes.
1472 		 * Unlike msi, in msix cause is enabled when it is unset.
1473 		 */
1474 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1475 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1476 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1477 		    ~sc->sc_fh_mask);
1478 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1479 		    ~sc->sc_hw_mask);
1480 	}
1481 }
1482 
1483 void
1484 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1485 {
1486 	if (!sc->sc_msix) {
1487 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1488 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1489 	} else {
1490 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1491 		    sc->sc_hw_init_mask);
1492 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1493 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1494 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1495 	}
1496 }
1497 
1498 void
1499 iwm_restore_interrupts(struct iwm_softc *sc)
1500 {
1501 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1502 }
1503 
1504 void
1505 iwm_disable_interrupts(struct iwm_softc *sc)
1506 {
1507 	int s = splnet();
1508 
1509 	if (!sc->sc_msix) {
1510 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1511 
1512 		/* acknowledge all interrupts */
1513 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1514 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1515 	} else {
1516 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1517 		    sc->sc_fh_init_mask);
1518 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1519 		    sc->sc_hw_init_mask);
1520 	}
1521 
1522 	splx(s);
1523 }
1524 
1525 void
1526 iwm_ict_reset(struct iwm_softc *sc)
1527 {
1528 	iwm_disable_interrupts(sc);
1529 
1530 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1531 	sc->ict_cur = 0;
1532 
1533 	/* Set physical address of ICT (4KB aligned). */
1534 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1535 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1536 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1537 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1538 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1539 
1540 	/* Switch to ICT interrupt mode in driver. */
1541 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1542 
1543 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1544 	iwm_enable_interrupts(sc);
1545 }
1546 
1547 #define IWM_HW_READY_TIMEOUT 50
1548 int
1549 iwm_set_hw_ready(struct iwm_softc *sc)
1550 {
1551 	int ready;
1552 
1553 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1554 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1555 
1556 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1557 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1558 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1559 	    IWM_HW_READY_TIMEOUT);
1560 	if (ready)
1561 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1562 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1563 
1564 	return ready;
1565 }
1566 #undef IWM_HW_READY_TIMEOUT
1567 
1568 int
1569 iwm_prepare_card_hw(struct iwm_softc *sc)
1570 {
1571 	int t = 0;
1572 
1573 	if (iwm_set_hw_ready(sc))
1574 		return 0;
1575 
1576 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1577 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1578 	DELAY(1000);
1579 
1580 
1581 	/* If HW is not ready, prepare the conditions to check again */
1582 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1583 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1584 
1585 	do {
1586 		if (iwm_set_hw_ready(sc))
1587 			return 0;
1588 		DELAY(200);
1589 		t += 200;
1590 	} while (t < 150000);
1591 
1592 	return ETIMEDOUT;
1593 }
1594 
1595 void
1596 iwm_apm_config(struct iwm_softc *sc)
1597 {
1598 	pcireg_t lctl, cap;
1599 
1600 	/*
1601 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1602 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1603 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1604 	 *    costs negligible amount of power savings.
1605 	 * If not (unlikely), enable L0S, so there is at least some
1606 	 *    power savings, even without L1.
1607 	 */
1608 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1609 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1610 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1611 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1612 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1613 	} else {
1614 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1615 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1616 	}
1617 
1618 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1619 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1620 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1621 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1622 	    DEVNAME(sc),
1623 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1624 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1625 }
1626 
1627 /*
1628  * Start up NIC's basic functionality after it has been reset
1629  * e.g. after platform boot or shutdown.
1630  * NOTE:  This does not load uCode nor start the embedded processor
1631  */
1632 int
1633 iwm_apm_init(struct iwm_softc *sc)
1634 {
1635 	int err = 0;
1636 
1637 	/* Disable L0S exit timer (platform NMI workaround) */
1638 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1639 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1640 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1641 
1642 	/*
1643 	 * Disable L0s without affecting L1;
1644 	 *  don't wait for ICH L0s (ICH bug W/A)
1645 	 */
1646 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1647 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1648 
1649 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1650 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1651 
1652 	/*
1653 	 * Enable HAP INTA (interrupt from management bus) to
1654 	 * wake device's PCI Express link L1a -> L0s
1655 	 */
1656 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1657 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1658 
1659 	iwm_apm_config(sc);
1660 
1661 #if 0 /* not for 7k/8k */
1662 	/* Configure analog phase-lock-loop before activating to D0A */
1663 	if (trans->cfg->base_params->pll_cfg_val)
1664 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1665 		    trans->cfg->base_params->pll_cfg_val);
1666 #endif
1667 
1668 	/*
1669 	 * Set "initialization complete" bit to move adapter from
1670 	 * D0U* --> D0A* (powered-up active) state.
1671 	 */
1672 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1673 
1674 	/*
1675 	 * Wait for clock stabilization; once stabilized, access to
1676 	 * device-internal resources is supported, e.g. iwm_write_prph()
1677 	 * and accesses to uCode SRAM.
1678 	 */
1679 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1680 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1681 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1682 		printf("%s: timeout waiting for clock stabilization\n",
1683 		    DEVNAME(sc));
1684 		err = ETIMEDOUT;
1685 		goto out;
1686 	}
1687 
1688 	if (sc->host_interrupt_operation_mode) {
1689 		/*
1690 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1691 		 * only check host_interrupt_operation_mode even if this is
1692 		 * not related to host_interrupt_operation_mode.
1693 		 *
1694 		 * Enable the oscillator to count wake up time for L1 exit. This
1695 		 * consumes slightly more power (100uA) - but allows to be sure
1696 		 * that we wake up from L1 on time.
1697 		 *
1698 		 * This looks weird: read twice the same register, discard the
1699 		 * value, set a bit, and yet again, read that same register
1700 		 * just to discard the value. But that's the way the hardware
1701 		 * seems to like it.
1702 		 */
1703 		if (iwm_nic_lock(sc)) {
1704 			iwm_read_prph(sc, IWM_OSC_CLK);
1705 			iwm_read_prph(sc, IWM_OSC_CLK);
1706 			iwm_nic_unlock(sc);
1707 		}
1708 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1709 		if (iwm_nic_lock(sc)) {
1710 			iwm_read_prph(sc, IWM_OSC_CLK);
1711 			iwm_read_prph(sc, IWM_OSC_CLK);
1712 			iwm_nic_unlock(sc);
1713 		}
1714 	}
1715 
1716 	/*
1717 	 * Enable DMA clock and wait for it to stabilize.
1718 	 *
1719 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1720 	 * do not disable clocks.  This preserves any hardware bits already
1721 	 * set by default in "CLK_CTRL_REG" after reset.
1722 	 */
1723 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1724 		if (iwm_nic_lock(sc)) {
1725 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1726 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1727 			iwm_nic_unlock(sc);
1728 		}
1729 		DELAY(20);
1730 
1731 		/* Disable L1-Active */
1732 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1733 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1734 
1735 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1736 		if (iwm_nic_lock(sc)) {
1737 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1738 			    IWM_APMG_RTC_INT_STT_RFKILL);
1739 			iwm_nic_unlock(sc);
1740 		}
1741 	}
1742  out:
1743 	if (err)
1744 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1745 	return err;
1746 }
1747 
1748 void
1749 iwm_apm_stop(struct iwm_softc *sc)
1750 {
1751 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1752 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1753 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1754 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1755 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1756 	DELAY(1000);
1757 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1758 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1759 	DELAY(5000);
1760 
1761 	/* stop device's busmaster DMA activity */
1762 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1763 
1764 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1765 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1766 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1767 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1768 
1769 	/*
1770 	 * Clear "initialization complete" bit to move adapter from
1771 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1772 	 */
1773 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1774 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1775 }
1776 
1777 void
1778 iwm_init_msix_hw(struct iwm_softc *sc)
1779 {
1780 	iwm_conf_msix_hw(sc, 0);
1781 
1782 	if (!sc->sc_msix)
1783 		return;
1784 
1785 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1786 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1787 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1788 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1789 }
1790 
1791 void
1792 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1793 {
1794 	int vector = 0;
1795 
1796 	if (!sc->sc_msix) {
1797 		/* Newer chips default to MSIX. */
1798 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1799 			iwm_write_prph(sc, IWM_UREG_CHICK,
1800 			    IWM_UREG_CHICK_MSI_ENABLE);
1801 			iwm_nic_unlock(sc);
1802 		}
1803 		return;
1804 	}
1805 
1806 	if (!stopped && iwm_nic_lock(sc)) {
1807 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1808 		iwm_nic_unlock(sc);
1809 	}
1810 
1811 	/* Disable all interrupts */
1812 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1813 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1814 
1815 	/* Map fallback-queue (command/mgmt) to a single vector */
1816 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1817 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1818 	/* Map RSS queue (data) to the same vector */
1819 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1820 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1821 
1822 	/* Enable the RX queues cause interrupts */
1823 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1824 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1825 
1826 	/* Map non-RX causes to the same vector */
1827 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1828 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1829 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1830 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1831 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1832 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1833 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1834 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1835 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1836 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1837 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1838 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1839 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1840 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1841 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1842 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1843 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1844 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1845 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1846 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1847 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1848 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1849 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1850 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1851 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1852 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1853 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1854 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1855 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1856 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1857 
1858 	/* Enable non-RX causes interrupts */
1859 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1860 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
1861 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
1862 	    IWM_MSIX_FH_INT_CAUSES_S2D |
1863 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
1864 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1865 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
1866 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
1867 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
1868 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
1869 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
1870 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
1871 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
1872 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
1873 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
1874 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
1875 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
1876 }
1877 
1878 int
1879 iwm_start_hw(struct iwm_softc *sc)
1880 {
1881 	int err;
1882 
1883 	err = iwm_prepare_card_hw(sc);
1884 	if (err)
1885 		return err;
1886 
1887 	/* Reset the entire device */
1888 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1889 	DELAY(5000);
1890 
1891 	err = iwm_apm_init(sc);
1892 	if (err)
1893 		return err;
1894 
1895 	iwm_init_msix_hw(sc);
1896 
1897 	iwm_enable_rfkill_int(sc);
1898 	iwm_check_rfkill(sc);
1899 
1900 	return 0;
1901 }
1902 
1903 
1904 void
1905 iwm_stop_device(struct iwm_softc *sc)
1906 {
1907 	int chnl, ntries;
1908 	int qid;
1909 
1910 	iwm_disable_interrupts(sc);
1911 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1912 
1913 	/* Stop all DMA channels. */
1914 	if (iwm_nic_lock(sc)) {
1915 		/* Deactivate TX scheduler. */
1916 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1917 
1918 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1919 			IWM_WRITE(sc,
1920 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1921 			for (ntries = 0; ntries < 200; ntries++) {
1922 				uint32_t r;
1923 
1924 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1925 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1926 				    chnl))
1927 					break;
1928 				DELAY(20);
1929 			}
1930 		}
1931 		iwm_nic_unlock(sc);
1932 	}
1933 	iwm_disable_rx_dma(sc);
1934 
1935 	iwm_reset_rx_ring(sc, &sc->rxq);
1936 
1937 	for (qid = 0; qid < nitems(sc->txq); qid++)
1938 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1939 
1940 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1941 		if (iwm_nic_lock(sc)) {
1942 			/* Power-down device's busmaster DMA clocks */
1943 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1944 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1945 			iwm_nic_unlock(sc);
1946 		}
1947 		DELAY(5);
1948 	}
1949 
1950 	/* Make sure (redundant) we've released our request to stay awake */
1951 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1952 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1953 	if (sc->sc_nic_locks > 0)
1954 		printf("%s: %d active NIC locks forcefully cleared\n",
1955 		    DEVNAME(sc), sc->sc_nic_locks);
1956 	sc->sc_nic_locks = 0;
1957 
1958 	/* Stop the device, and put it in low power state */
1959 	iwm_apm_stop(sc);
1960 
1961 	/* Reset the on-board processor. */
1962 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1963 	DELAY(5000);
1964 
1965 	/*
1966 	 * Upon stop, the IVAR table gets erased, so msi-x won't
1967 	 * work. This causes a bug in RF-KILL flows, since the interrupt
1968 	 * that enables radio won't fire on the correct irq, and the
1969 	 * driver won't be able to handle the interrupt.
1970 	 * Configure the IVAR table again after reset.
1971 	 */
1972 	iwm_conf_msix_hw(sc, 1);
1973 
1974 	/*
1975 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1976 	 * Clear the interrupt again.
1977 	 */
1978 	iwm_disable_interrupts(sc);
1979 
1980 	/* Even though we stop the HW we still want the RF kill interrupt. */
1981 	iwm_enable_rfkill_int(sc);
1982 	iwm_check_rfkill(sc);
1983 
1984 	iwm_prepare_card_hw(sc);
1985 }
1986 
1987 void
1988 iwm_nic_config(struct iwm_softc *sc)
1989 {
1990 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1991 	uint32_t mask, val, reg_val = 0;
1992 
1993 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1994 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1995 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1996 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1997 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1998 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1999 
2000 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2001 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2002 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2003 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2004 
2005 	/* radio configuration */
2006 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2007 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2008 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2009 
2010 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2011 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2012 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2013 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2014 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2015 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2016 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2017 
2018 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2019 	val &= ~mask;
2020 	val |= reg_val;
2021 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2022 
2023 	/*
2024 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2025 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2026 	 * to lose ownership and not being able to obtain it back.
2027 	 */
2028 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2029 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2030 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2031 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2032 }
2033 
2034 int
2035 iwm_nic_rx_init(struct iwm_softc *sc)
2036 {
2037 	if (sc->sc_mqrx_supported)
2038 		return iwm_nic_rx_mq_init(sc);
2039 	else
2040 		return iwm_nic_rx_legacy_init(sc);
2041 }
2042 
2043 int
2044 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2045 {
2046 	int enabled;
2047 
2048 	if (!iwm_nic_lock(sc))
2049 		return EBUSY;
2050 
2051 	/* Stop RX DMA. */
2052 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2053 	/* Disable RX used and free queue operation. */
2054 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2055 
2056 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2057 	    sc->rxq.free_desc_dma.paddr);
2058 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2059 	    sc->rxq.used_desc_dma.paddr);
2060 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2061 	    sc->rxq.stat_dma.paddr);
2062 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2063 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2064 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2065 
2066 	/* We configure only queue 0 for now. */
2067 	enabled = ((1 << 0) << 16) | (1 << 0);
2068 
2069 	/* Enable RX DMA, 4KB buffer size. */
2070 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2071 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2072 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2073 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2074 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2075 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2076 
2077 	/* Enable RX DMA snooping. */
2078 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2079 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2080 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2081 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2082 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2083 
2084 	/* Enable the configured queue(s). */
2085 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2086 
2087 	iwm_nic_unlock(sc);
2088 
2089 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2090 
2091 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2092 
2093 	return 0;
2094 }
2095 
2096 int
2097 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2098 {
2099 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2100 
2101 	iwm_disable_rx_dma(sc);
2102 
2103 	if (!iwm_nic_lock(sc))
2104 		return EBUSY;
2105 
2106 	/* reset and flush pointers */
2107 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2108 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2109 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2110 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2111 
2112 	/* Set physical address of RX ring (256-byte aligned). */
2113 	IWM_WRITE(sc,
2114 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2115 
2116 	/* Set physical address of RX status (16-byte aligned). */
2117 	IWM_WRITE(sc,
2118 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2119 
2120 	/* Enable RX. */
2121 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2122 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2123 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2124 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2125 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2126 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2127 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2128 
2129 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2130 
2131 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2132 	if (sc->host_interrupt_operation_mode)
2133 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2134 
2135 	iwm_nic_unlock(sc);
2136 
2137 	/*
2138 	 * This value should initially be 0 (before preparing any RBs),
2139 	 * and should be 8 after preparing the first 8 RBs (for example).
2140 	 */
2141 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2142 
2143 	return 0;
2144 }
2145 
2146 int
2147 iwm_nic_tx_init(struct iwm_softc *sc)
2148 {
2149 	int qid;
2150 
2151 	if (!iwm_nic_lock(sc))
2152 		return EBUSY;
2153 
2154 	/* Deactivate TX scheduler. */
2155 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2156 
2157 	/* Set physical address of "keep warm" page (16-byte aligned). */
2158 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2159 
2160 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2161 		struct iwm_tx_ring *txq = &sc->txq[qid];
2162 
2163 		/* Set physical address of TX ring (256-byte aligned). */
2164 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2165 		    txq->desc_dma.paddr >> 8);
2166 	}
2167 
2168 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2169 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2170 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2171 
2172 	iwm_nic_unlock(sc);
2173 
2174 	return 0;
2175 }
2176 
2177 int
2178 iwm_nic_init(struct iwm_softc *sc)
2179 {
2180 	int err;
2181 
2182 	iwm_apm_init(sc);
2183 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2184 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2185 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2186 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2187 
2188 	iwm_nic_config(sc);
2189 
2190 	err = iwm_nic_rx_init(sc);
2191 	if (err)
2192 		return err;
2193 
2194 	err = iwm_nic_tx_init(sc);
2195 	if (err)
2196 		return err;
2197 
2198 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2199 
2200 	return 0;
2201 }
2202 
2203 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2204 const uint8_t iwm_ac_to_tx_fifo[] = {
2205 	IWM_TX_FIFO_BE,
2206 	IWM_TX_FIFO_BK,
2207 	IWM_TX_FIFO_VI,
2208 	IWM_TX_FIFO_VO,
2209 };
2210 
2211 int
2212 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2213 {
2214 	iwm_nic_assert_locked(sc);
2215 
2216 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2217 
2218 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2219 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2220 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2221 
2222 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2223 
2224 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2225 
2226 	iwm_write_mem32(sc,
2227 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2228 
2229 	/* Set scheduler window size and frame limit. */
2230 	iwm_write_mem32(sc,
2231 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2232 	    sizeof(uint32_t),
2233 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2234 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2235 	    ((IWM_FRAME_LIMIT
2236 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2237 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2238 
2239 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2240 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2241 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2242 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2243 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2244 
2245 	if (qid == sc->cmdqid)
2246 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2247 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2248 
2249 	return 0;
2250 }
2251 
2252 int
2253 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
2254 {
2255 	struct iwm_scd_txq_cfg_cmd cmd;
2256 	int err;
2257 
2258 	iwm_nic_assert_locked(sc);
2259 
2260 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2261 
2262 	memset(&cmd, 0, sizeof(cmd));
2263 	cmd.scd_queue = qid;
2264 	cmd.enable = 1;
2265 	cmd.sta_id = sta_id;
2266 	cmd.tx_fifo = fifo;
2267 	cmd.aggregate = 0;
2268 	cmd.window = IWM_FRAME_LIMIT;
2269 
2270 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2271 	    sizeof(cmd), &cmd);
2272 	if (err)
2273 		return err;
2274 
2275 	return 0;
2276 }
2277 
2278 int
2279 iwm_post_alive(struct iwm_softc *sc)
2280 {
2281 	int nwords;
2282 	int err, chnl;
2283 	uint32_t base;
2284 
2285 	if (!iwm_nic_lock(sc))
2286 		return EBUSY;
2287 
2288 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2289 
2290 	iwm_ict_reset(sc);
2291 
2292 	iwm_nic_unlock(sc);
2293 
2294 	/* Clear TX scheduler state in SRAM. */
2295 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2296 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2297 	    / sizeof(uint32_t);
2298 	err = iwm_write_mem(sc,
2299 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2300 	    NULL, nwords);
2301 	if (err)
2302 		return err;
2303 
2304 	if (!iwm_nic_lock(sc))
2305 		return EBUSY;
2306 
2307 	/* Set physical address of TX scheduler rings (1KB aligned). */
2308 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2309 
2310 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2311 
2312 	/* enable command channel */
2313 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2314 	if (err) {
2315 		iwm_nic_unlock(sc);
2316 		return err;
2317 	}
2318 
2319 	/* Activate TX scheduler. */
2320 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2321 
2322 	/* Enable DMA channels. */
2323 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2324 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2325 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2326 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2327 	}
2328 
2329 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2330 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2331 
2332 	iwm_nic_unlock(sc);
2333 
2334 	/* Enable L1-Active */
2335 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
2336 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2337 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2338 
2339 	return err;
2340 }
2341 
2342 struct iwm_phy_db_entry *
2343 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2344 {
2345 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2346 
2347 	if (type >= IWM_PHY_DB_MAX)
2348 		return NULL;
2349 
2350 	switch (type) {
2351 	case IWM_PHY_DB_CFG:
2352 		return &phy_db->cfg;
2353 	case IWM_PHY_DB_CALIB_NCH:
2354 		return &phy_db->calib_nch;
2355 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2356 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2357 			return NULL;
2358 		return &phy_db->calib_ch_group_papd[chg_id];
2359 	case IWM_PHY_DB_CALIB_CHG_TXP:
2360 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2361 			return NULL;
2362 		return &phy_db->calib_ch_group_txp[chg_id];
2363 	default:
2364 		return NULL;
2365 	}
2366 	return NULL;
2367 }
2368 
2369 int
2370 iwm_phy_db_set_section(struct iwm_softc *sc,
2371     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2372 {
2373 	uint16_t type = le16toh(phy_db_notif->type);
2374 	uint16_t size  = le16toh(phy_db_notif->length);
2375 	struct iwm_phy_db_entry *entry;
2376 	uint16_t chg_id = 0;
2377 
2378 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2379 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2380 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2381 
2382 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2383 	if (!entry)
2384 		return EINVAL;
2385 
2386 	if (entry->data)
2387 		free(entry->data, M_DEVBUF, entry->size);
2388 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2389 	if (!entry->data) {
2390 		entry->size = 0;
2391 		return ENOMEM;
2392 	}
2393 	memcpy(entry->data, phy_db_notif->data, size);
2394 	entry->size = size;
2395 
2396 	return 0;
2397 }
2398 
2399 int
2400 iwm_is_valid_channel(uint16_t ch_id)
2401 {
2402 	if (ch_id <= 14 ||
2403 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2404 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2405 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2406 		return 1;
2407 	return 0;
2408 }
2409 
2410 uint8_t
2411 iwm_ch_id_to_ch_index(uint16_t ch_id)
2412 {
2413 	if (!iwm_is_valid_channel(ch_id))
2414 		return 0xff;
2415 
2416 	if (ch_id <= 14)
2417 		return ch_id - 1;
2418 	if (ch_id <= 64)
2419 		return (ch_id + 20) / 4;
2420 	if (ch_id <= 140)
2421 		return (ch_id - 12) / 4;
2422 	return (ch_id - 13) / 4;
2423 }
2424 
2425 
2426 uint16_t
2427 iwm_channel_id_to_papd(uint16_t ch_id)
2428 {
2429 	if (!iwm_is_valid_channel(ch_id))
2430 		return 0xff;
2431 
2432 	if (1 <= ch_id && ch_id <= 14)
2433 		return 0;
2434 	if (36 <= ch_id && ch_id <= 64)
2435 		return 1;
2436 	if (100 <= ch_id && ch_id <= 140)
2437 		return 2;
2438 	return 3;
2439 }
2440 
2441 uint16_t
2442 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2443 {
2444 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2445 	struct iwm_phy_db_chg_txp *txp_chg;
2446 	int i;
2447 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2448 
2449 	if (ch_index == 0xff)
2450 		return 0xff;
2451 
2452 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2453 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2454 		if (!txp_chg)
2455 			return 0xff;
2456 		/*
2457 		 * Looking for the first channel group the max channel
2458 		 * of which is higher than the requested channel.
2459 		 */
2460 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2461 			return i;
2462 	}
2463 	return 0xff;
2464 }
2465 
2466 int
2467 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2468     uint16_t *size, uint16_t ch_id)
2469 {
2470 	struct iwm_phy_db_entry *entry;
2471 	uint16_t ch_group_id = 0;
2472 
2473 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2474 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2475 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2476 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2477 
2478 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2479 	if (!entry)
2480 		return EINVAL;
2481 
2482 	*data = entry->data;
2483 	*size = entry->size;
2484 
2485 	return 0;
2486 }
2487 
2488 int
2489 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2490     void *data)
2491 {
2492 	struct iwm_phy_db_cmd phy_db_cmd;
2493 	struct iwm_host_cmd cmd = {
2494 		.id = IWM_PHY_DB_CMD,
2495 		.flags = IWM_CMD_ASYNC,
2496 	};
2497 
2498 	phy_db_cmd.type = le16toh(type);
2499 	phy_db_cmd.length = le16toh(length);
2500 
2501 	cmd.data[0] = &phy_db_cmd;
2502 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2503 	cmd.data[1] = data;
2504 	cmd.len[1] = length;
2505 
2506 	return iwm_send_cmd(sc, &cmd);
2507 }
2508 
2509 int
2510 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2511     uint8_t max_ch_groups)
2512 {
2513 	uint16_t i;
2514 	int err;
2515 	struct iwm_phy_db_entry *entry;
2516 
2517 	for (i = 0; i < max_ch_groups; i++) {
2518 		entry = iwm_phy_db_get_section(sc, type, i);
2519 		if (!entry)
2520 			return EINVAL;
2521 
2522 		if (!entry->size)
2523 			continue;
2524 
2525 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2526 		if (err)
2527 			return err;
2528 
2529 		DELAY(1000);
2530 	}
2531 
2532 	return 0;
2533 }
2534 
2535 int
2536 iwm_send_phy_db_data(struct iwm_softc *sc)
2537 {
2538 	uint8_t *data = NULL;
2539 	uint16_t size = 0;
2540 	int err;
2541 
2542 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2543 	if (err)
2544 		return err;
2545 
2546 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2547 	if (err)
2548 		return err;
2549 
2550 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2551 	    &data, &size, 0);
2552 	if (err)
2553 		return err;
2554 
2555 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2556 	if (err)
2557 		return err;
2558 
2559 	err = iwm_phy_db_send_all_channel_groups(sc,
2560 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2561 	if (err)
2562 		return err;
2563 
2564 	err = iwm_phy_db_send_all_channel_groups(sc,
2565 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2566 	if (err)
2567 		return err;
2568 
2569 	return 0;
2570 }
2571 
2572 /*
2573  * For the high priority TE use a time event type that has similar priority to
2574  * the FW's action scan priority.
2575  */
2576 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2577 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2578 
2579 int
2580 iwm_send_time_event_cmd(struct iwm_softc *sc,
2581     const struct iwm_time_event_cmd *cmd)
2582 {
2583 	struct iwm_rx_packet *pkt;
2584 	struct iwm_time_event_resp *resp;
2585 	struct iwm_host_cmd hcmd = {
2586 		.id = IWM_TIME_EVENT_CMD,
2587 		.flags = IWM_CMD_WANT_RESP,
2588 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2589 	};
2590 	uint32_t resp_len;
2591 	int err;
2592 
2593 	hcmd.data[0] = cmd;
2594 	hcmd.len[0] = sizeof(*cmd);
2595 	err = iwm_send_cmd(sc, &hcmd);
2596 	if (err)
2597 		return err;
2598 
2599 	pkt = hcmd.resp_pkt;
2600 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2601 		err = EIO;
2602 		goto out;
2603 	}
2604 
2605 	resp_len = iwm_rx_packet_payload_len(pkt);
2606 	if (resp_len != sizeof(*resp)) {
2607 		err = EIO;
2608 		goto out;
2609 	}
2610 
2611 	resp = (void *)pkt->data;
2612 	if (le32toh(resp->status) == 0)
2613 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2614 	else
2615 		err = EIO;
2616 out:
2617 	iwm_free_resp(sc, &hcmd);
2618 	return err;
2619 }
2620 
2621 void
2622 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2623     uint32_t duration, uint32_t max_delay)
2624 {
2625 	struct iwm_time_event_cmd time_cmd;
2626 
2627 	/* Do nothing if a time event is already scheduled. */
2628 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2629 		return;
2630 
2631 	memset(&time_cmd, 0, sizeof(time_cmd));
2632 
2633 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2634 	time_cmd.id_and_color =
2635 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2636 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2637 
2638 	time_cmd.apply_time = htole32(0);
2639 
2640 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2641 	time_cmd.max_delay = htole32(max_delay);
2642 	/* TODO: why do we need to interval = bi if it is not periodic? */
2643 	time_cmd.interval = htole32(1);
2644 	time_cmd.duration = htole32(duration);
2645 	time_cmd.repeat = 1;
2646 	time_cmd.policy
2647 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2648 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2649 		IWM_T2_V2_START_IMMEDIATELY);
2650 
2651 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2652 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2653 
2654 	DELAY(100);
2655 }
2656 
2657 void
2658 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2659 {
2660 	struct iwm_time_event_cmd time_cmd;
2661 
2662 	/* Do nothing if the time event has already ended. */
2663 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2664 		return;
2665 
2666 	memset(&time_cmd, 0, sizeof(time_cmd));
2667 
2668 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2669 	time_cmd.id_and_color =
2670 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2671 	time_cmd.id = htole32(sc->sc_time_event_uid);
2672 
2673 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2674 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2675 
2676 	DELAY(100);
2677 }
2678 
2679 /*
2680  * NVM read access and content parsing.  We do not support
2681  * external NVM or writing NVM.
2682  */
2683 
2684 /* list of NVM sections we are allowed/need to read */
2685 const int iwm_nvm_to_read[] = {
2686 	IWM_NVM_SECTION_TYPE_HW,
2687 	IWM_NVM_SECTION_TYPE_SW,
2688 	IWM_NVM_SECTION_TYPE_REGULATORY,
2689 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2690 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2691 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2692 	IWM_NVM_SECTION_TYPE_HW_8000,
2693 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2694 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2695 };
2696 
2697 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2698 
2699 #define IWM_NVM_WRITE_OPCODE 1
2700 #define IWM_NVM_READ_OPCODE 0
2701 
2702 int
2703 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2704     uint16_t length, uint8_t *data, uint16_t *len)
2705 {
2706 	offset = 0;
2707 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2708 		.offset = htole16(offset),
2709 		.length = htole16(length),
2710 		.type = htole16(section),
2711 		.op_code = IWM_NVM_READ_OPCODE,
2712 	};
2713 	struct iwm_nvm_access_resp *nvm_resp;
2714 	struct iwm_rx_packet *pkt;
2715 	struct iwm_host_cmd cmd = {
2716 		.id = IWM_NVM_ACCESS_CMD,
2717 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2718 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2719 		.data = { &nvm_access_cmd, },
2720 	};
2721 	int err, offset_read;
2722 	size_t bytes_read;
2723 	uint8_t *resp_data;
2724 
2725 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2726 
2727 	err = iwm_send_cmd(sc, &cmd);
2728 	if (err)
2729 		return err;
2730 
2731 	pkt = cmd.resp_pkt;
2732 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2733 		err = EIO;
2734 		goto exit;
2735 	}
2736 
2737 	/* Extract NVM response */
2738 	nvm_resp = (void *)pkt->data;
2739 	if (nvm_resp == NULL)
2740 		return EIO;
2741 
2742 	err = le16toh(nvm_resp->status);
2743 	bytes_read = le16toh(nvm_resp->length);
2744 	offset_read = le16toh(nvm_resp->offset);
2745 	resp_data = nvm_resp->data;
2746 	if (err) {
2747 		err = EINVAL;
2748 		goto exit;
2749 	}
2750 
2751 	if (offset_read != offset) {
2752 		err = EINVAL;
2753 		goto exit;
2754 	}
2755 
2756 	if (bytes_read > length) {
2757 		err = EINVAL;
2758 		goto exit;
2759 	}
2760 
2761 	memcpy(data + offset, resp_data, bytes_read);
2762 	*len = bytes_read;
2763 
2764  exit:
2765 	iwm_free_resp(sc, &cmd);
2766 	return err;
2767 }
2768 
2769 /*
2770  * Reads an NVM section completely.
2771  * NICs prior to 7000 family doesn't have a real NVM, but just read
2772  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2773  * by uCode, we need to manually check in this case that we don't
2774  * overflow and try to read more than the EEPROM size.
2775  */
2776 int
2777 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2778     uint16_t *len, size_t max_len)
2779 {
2780 	uint16_t chunklen, seglen;
2781 	int err = 0;
2782 
2783 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2784 	*len = 0;
2785 
2786 	/* Read NVM chunks until exhausted (reading less than requested) */
2787 	while (seglen == chunklen && *len < max_len) {
2788 		err = iwm_nvm_read_chunk(sc,
2789 		    section, *len, chunklen, data, &seglen);
2790 		if (err)
2791 			return err;
2792 
2793 		*len += seglen;
2794 	}
2795 
2796 	return err;
2797 }
2798 
2799 uint8_t
2800 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2801 {
2802 	uint8_t tx_ant;
2803 
2804 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2805 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2806 
2807 	if (sc->sc_nvm.valid_tx_ant)
2808 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2809 
2810 	return tx_ant;
2811 }
2812 
2813 uint8_t
2814 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2815 {
2816 	uint8_t rx_ant;
2817 
2818 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2819 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2820 
2821 	if (sc->sc_nvm.valid_rx_ant)
2822 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2823 
2824 	return rx_ant;
2825 }
2826 
2827 void
2828 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2829     const uint8_t *nvm_channels, int nchan)
2830 {
2831 	struct ieee80211com *ic = &sc->sc_ic;
2832 	struct iwm_nvm_data *data = &sc->sc_nvm;
2833 	int ch_idx;
2834 	struct ieee80211_channel *channel;
2835 	uint16_t ch_flags;
2836 	int is_5ghz;
2837 	int flags, hw_value;
2838 
2839 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2840 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2841 
2842 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2843 		    !data->sku_cap_band_52GHz_enable)
2844 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2845 
2846 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
2847 			continue;
2848 
2849 		hw_value = nvm_channels[ch_idx];
2850 		channel = &ic->ic_channels[hw_value];
2851 
2852 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2853 		if (!is_5ghz) {
2854 			flags = IEEE80211_CHAN_2GHZ;
2855 			channel->ic_flags
2856 			    = IEEE80211_CHAN_CCK
2857 			    | IEEE80211_CHAN_OFDM
2858 			    | IEEE80211_CHAN_DYN
2859 			    | IEEE80211_CHAN_2GHZ;
2860 		} else {
2861 			flags = IEEE80211_CHAN_5GHZ;
2862 			channel->ic_flags =
2863 			    IEEE80211_CHAN_A;
2864 		}
2865 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2866 
2867 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2868 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2869 
2870 		if (data->sku_cap_11n_enable)
2871 			channel->ic_flags |= IEEE80211_CHAN_HT;
2872 	}
2873 }
2874 
2875 int
2876 iwm_mimo_enabled(struct iwm_softc *sc)
2877 {
2878 	struct ieee80211com *ic = &sc->sc_ic;
2879 
2880 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2881 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2882 }
2883 
2884 void
2885 iwm_setup_ht_rates(struct iwm_softc *sc)
2886 {
2887 	struct ieee80211com *ic = &sc->sc_ic;
2888 	uint8_t rx_ant;
2889 
2890 	/* TX is supported with the same MCS as RX. */
2891 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2892 
2893 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2894 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2895 
2896 	if (!iwm_mimo_enabled(sc))
2897 		return;
2898 
2899 	rx_ant = iwm_fw_valid_rx_ant(sc);
2900 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
2901 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
2902 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2903 }
2904 
2905 #define IWM_MAX_RX_BA_SESSIONS 16
2906 
2907 void
2908 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2909     uint16_t ssn, uint16_t winsize, int start)
2910 {
2911 	struct ieee80211com *ic = &sc->sc_ic;
2912 	struct iwm_add_sta_cmd cmd;
2913 	struct iwm_node *in = (void *)ni;
2914 	int err, s;
2915 	uint32_t status;
2916 	size_t cmdsize;
2917 
2918 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2919 		ieee80211_addba_req_refuse(ic, ni, tid);
2920 		return;
2921 	}
2922 
2923 	memset(&cmd, 0, sizeof(cmd));
2924 
2925 	cmd.sta_id = IWM_STATION_ID;
2926 	cmd.mac_id_n_color
2927 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2928 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2929 
2930 	if (start) {
2931 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2932 		cmd.add_immediate_ba_ssn = ssn;
2933 		cmd.rx_ba_window = winsize;
2934 	} else {
2935 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2936 	}
2937 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2938 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2939 
2940 	status = IWM_ADD_STA_SUCCESS;
2941 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
2942 		cmdsize = sizeof(cmd);
2943 	else
2944 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
2945 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
2946 	    &status);
2947 
2948 	s = splnet();
2949 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) == IWM_ADD_STA_SUCCESS) {
2950 		if (start) {
2951 			sc->sc_rx_ba_sessions++;
2952 			ieee80211_addba_req_accept(ic, ni, tid);
2953 		} else if (sc->sc_rx_ba_sessions > 0)
2954 			sc->sc_rx_ba_sessions--;
2955 	} else if (start)
2956 		ieee80211_addba_req_refuse(ic, ni, tid);
2957 
2958 	splx(s);
2959 }
2960 
2961 void
2962 iwm_htprot_task(void *arg)
2963 {
2964 	struct iwm_softc *sc = arg;
2965 	struct ieee80211com *ic = &sc->sc_ic;
2966 	struct iwm_node *in = (void *)ic->ic_bss;
2967 	int err, s = splnet();
2968 
2969 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
2970 		refcnt_rele_wake(&sc->task_refs);
2971 		splx(s);
2972 		return;
2973 	}
2974 
2975 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2976 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2977 	if (err)
2978 		printf("%s: could not change HT protection: error %d\n",
2979 		    DEVNAME(sc), err);
2980 
2981 	refcnt_rele_wake(&sc->task_refs);
2982 	splx(s);
2983 }
2984 
2985 /*
2986  * This function is called by upper layer when HT protection settings in
2987  * beacons have changed.
2988  */
2989 void
2990 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2991 {
2992 	struct iwm_softc *sc = ic->ic_softc;
2993 
2994 	/* assumes that ni == ic->ic_bss */
2995 	iwm_add_task(sc, systq, &sc->htprot_task);
2996 }
2997 
2998 void
2999 iwm_ba_task(void *arg)
3000 {
3001 	struct iwm_softc *sc = arg;
3002 	struct ieee80211com *ic = &sc->sc_ic;
3003 	struct ieee80211_node *ni = ic->ic_bss;
3004 	int s = splnet();
3005 
3006 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
3007 		refcnt_rele_wake(&sc->task_refs);
3008 		splx(s);
3009 		return;
3010 	}
3011 
3012 	if (sc->ba_start)
3013 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn,
3014 		    sc->ba_winsize, 1);
3015 	else
3016 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0, 0);
3017 
3018 	refcnt_rele_wake(&sc->task_refs);
3019 	splx(s);
3020 }
3021 
3022 /*
3023  * This function is called by upper layer when an ADDBA request is received
3024  * from another STA and before the ADDBA response is sent.
3025  */
3026 int
3027 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3028     uint8_t tid)
3029 {
3030 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3031 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3032 
3033 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
3034 		return ENOSPC;
3035 
3036 	sc->ba_start = 1;
3037 	sc->ba_tid = tid;
3038 	sc->ba_ssn = htole16(ba->ba_winstart);
3039 	sc->ba_winsize = htole16(ba->ba_winsize);
3040 	iwm_add_task(sc, systq, &sc->ba_task);
3041 
3042 	return EBUSY;
3043 }
3044 
3045 /*
3046  * This function is called by upper layer on teardown of an HT-immediate
3047  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3048  */
3049 void
3050 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3051     uint8_t tid)
3052 {
3053 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3054 
3055 	sc->ba_start = 0;
3056 	sc->ba_tid = tid;
3057 	iwm_add_task(sc, systq, &sc->ba_task);
3058 }
3059 
3060 void
3061 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3062     const uint16_t *mac_override, const uint16_t *nvm_hw)
3063 {
3064 	const uint8_t *hw_addr;
3065 
3066 	if (mac_override) {
3067 		static const uint8_t reserved_mac[] = {
3068 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3069 		};
3070 
3071 		hw_addr = (const uint8_t *)(mac_override +
3072 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3073 
3074 		/*
3075 		 * Store the MAC address from MAO section.
3076 		 * No byte swapping is required in MAO section
3077 		 */
3078 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3079 
3080 		/*
3081 		 * Force the use of the OTP MAC address in case of reserved MAC
3082 		 * address in the NVM, or if address is given but invalid.
3083 		 */
3084 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3085 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3086 		    sizeof(etherbroadcastaddr)) != 0) &&
3087 		    (memcmp(etheranyaddr, data->hw_addr,
3088 		    sizeof(etheranyaddr)) != 0) &&
3089 		    !ETHER_IS_MULTICAST(data->hw_addr))
3090 			return;
3091 	}
3092 
3093 	if (nvm_hw) {
3094 		/* Read the mac address from WFMP registers. */
3095 		uint32_t mac_addr0, mac_addr1;
3096 
3097 		if (!iwm_nic_lock(sc))
3098 			goto out;
3099 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3100 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3101 		iwm_nic_unlock(sc);
3102 
3103 		hw_addr = (const uint8_t *)&mac_addr0;
3104 		data->hw_addr[0] = hw_addr[3];
3105 		data->hw_addr[1] = hw_addr[2];
3106 		data->hw_addr[2] = hw_addr[1];
3107 		data->hw_addr[3] = hw_addr[0];
3108 
3109 		hw_addr = (const uint8_t *)&mac_addr1;
3110 		data->hw_addr[4] = hw_addr[1];
3111 		data->hw_addr[5] = hw_addr[0];
3112 
3113 		return;
3114 	}
3115 out:
3116 	printf("%s: mac address not found\n", DEVNAME(sc));
3117 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3118 }
3119 
3120 int
3121 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3122     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3123     const uint16_t *mac_override, const uint16_t *phy_sku,
3124     const uint16_t *regulatory, int n_regulatory)
3125 {
3126 	struct iwm_nvm_data *data = &sc->sc_nvm;
3127 	uint8_t hw_addr[ETHER_ADDR_LEN];
3128 	uint32_t sku;
3129 	uint16_t lar_config;
3130 
3131 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3132 
3133 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3134 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3135 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3136 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3137 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3138 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3139 
3140 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3141 	} else {
3142 		uint32_t radio_cfg =
3143 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3144 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3145 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3146 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3147 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3148 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3149 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3150 
3151 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3152 	}
3153 
3154 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3155 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3156 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3157 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3158 
3159 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3160 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3161 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3162 				       IWM_NVM_LAR_OFFSET_8000;
3163 
3164 		lar_config = le16_to_cpup(regulatory + lar_offset);
3165 		data->lar_enabled = !!(lar_config &
3166 				       IWM_NVM_LAR_ENABLED_8000);
3167 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3168 	} else
3169 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3170 
3171 
3172 	/* The byte order is little endian 16 bit, meaning 214365 */
3173 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3174 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3175 		data->hw_addr[0] = hw_addr[1];
3176 		data->hw_addr[1] = hw_addr[0];
3177 		data->hw_addr[2] = hw_addr[3];
3178 		data->hw_addr[3] = hw_addr[2];
3179 		data->hw_addr[4] = hw_addr[5];
3180 		data->hw_addr[5] = hw_addr[4];
3181 	} else
3182 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3183 
3184 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3185 		if (sc->nvm_type == IWM_NVM_SDP) {
3186 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3187 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3188 		} else {
3189 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3190 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3191 		}
3192 	} else
3193 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3194 		    iwm_nvm_channels_8000,
3195 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3196 
3197 	data->calib_version = 255;   /* TODO:
3198 					this value will prevent some checks from
3199 					failing, we need to check if this
3200 					field is still needed, and if it does,
3201 					where is it in the NVM */
3202 
3203 	return 0;
3204 }
3205 
3206 int
3207 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3208 {
3209 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3210 	const uint16_t *regulatory = NULL;
3211 	int n_regulatory = 0;
3212 
3213 	/* Checking for required sections */
3214 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3215 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3216 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3217 			return ENOENT;
3218 		}
3219 
3220 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3221 
3222 		if (sc->nvm_type == IWM_NVM_SDP) {
3223 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3224 				return ENOENT;
3225 			regulatory = (const uint16_t *)
3226 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3227 			n_regulatory =
3228 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3229 		}
3230 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3231 		/* SW and REGULATORY sections are mandatory */
3232 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3233 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3234 			return ENOENT;
3235 		}
3236 		/* MAC_OVERRIDE or at least HW section must exist */
3237 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3238 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3239 			return ENOENT;
3240 		}
3241 
3242 		/* PHY_SKU section is mandatory in B0 */
3243 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3244 			return ENOENT;
3245 		}
3246 
3247 		regulatory = (const uint16_t *)
3248 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3249 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
3250 		hw = (const uint16_t *)
3251 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3252 		mac_override =
3253 			(const uint16_t *)
3254 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3255 		phy_sku = (const uint16_t *)
3256 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3257 	} else {
3258 		panic("unknown device family %d\n", sc->sc_device_family);
3259 	}
3260 
3261 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3262 	calib = (const uint16_t *)
3263 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3264 
3265 	/* XXX should pass in the length of every section */
3266 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3267 	    phy_sku, regulatory, n_regulatory);
3268 }
3269 
3270 int
3271 iwm_nvm_init(struct iwm_softc *sc)
3272 {
3273 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3274 	int i, section, err;
3275 	uint16_t len;
3276 	uint8_t *buf;
3277 	const size_t bufsz = sc->sc_nvm_max_section_size;
3278 
3279 	memset(nvm_sections, 0, sizeof(nvm_sections));
3280 
3281 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
3282 	if (buf == NULL)
3283 		return ENOMEM;
3284 
3285 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
3286 		section = iwm_nvm_to_read[i];
3287 		KASSERT(section <= nitems(nvm_sections));
3288 
3289 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3290 		if (err) {
3291 			err = 0;
3292 			continue;
3293 		}
3294 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
3295 		if (nvm_sections[section].data == NULL) {
3296 			err = ENOMEM;
3297 			break;
3298 		}
3299 		memcpy(nvm_sections[section].data, buf, len);
3300 		nvm_sections[section].length = len;
3301 	}
3302 	free(buf, M_DEVBUF, bufsz);
3303 	if (err == 0)
3304 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3305 
3306 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3307 		if (nvm_sections[i].data != NULL)
3308 			free(nvm_sections[i].data, M_DEVBUF,
3309 			    nvm_sections[i].length);
3310 	}
3311 
3312 	return err;
3313 }
3314 
3315 int
3316 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3317     const uint8_t *section, uint32_t byte_cnt)
3318 {
3319 	int err = EINVAL;
3320 	uint32_t chunk_sz, offset;
3321 
3322 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3323 
3324 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3325 		uint32_t addr, len;
3326 		const uint8_t *data;
3327 
3328 		addr = dst_addr + offset;
3329 		len = MIN(chunk_sz, byte_cnt - offset);
3330 		data = section + offset;
3331 
3332 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3333 		if (err)
3334 			break;
3335 	}
3336 
3337 	return err;
3338 }
3339 
3340 int
3341 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3342     const uint8_t *chunk, uint32_t byte_cnt)
3343 {
3344 	struct iwm_dma_info *dma = &sc->fw_dma;
3345 	int err;
3346 
3347 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3348 	memcpy(dma->vaddr, chunk, byte_cnt);
3349 	bus_dmamap_sync(sc->sc_dmat,
3350 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
3351 
3352 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3353 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
3354 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3355 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3356 
3357 	sc->sc_fw_chunk_done = 0;
3358 
3359 	if (!iwm_nic_lock(sc))
3360 		return EBUSY;
3361 
3362 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3363 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3364 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3365 	    dst_addr);
3366 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3367 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3368 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3369 	    (iwm_get_dma_hi_addr(dma->paddr)
3370 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3371 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3372 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3373 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3374 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3375 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3376 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3377 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3378 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3379 
3380 	iwm_nic_unlock(sc);
3381 
3382 	/* Wait for this segment to load. */
3383 	err = 0;
3384 	while (!sc->sc_fw_chunk_done) {
3385 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
3386 		if (err)
3387 			break;
3388 	}
3389 
3390 	if (!sc->sc_fw_chunk_done)
3391 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
3392 		    DEVNAME(sc), dst_addr, byte_cnt);
3393 
3394 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3395 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
3396 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3397 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3398 	}
3399 
3400 	return err;
3401 }
3402 
3403 int
3404 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3405 {
3406 	struct iwm_fw_sects *fws;
3407 	int err, i;
3408 	void *data;
3409 	uint32_t dlen;
3410 	uint32_t offset;
3411 
3412 	fws = &sc->sc_fw.fw_sects[ucode_type];
3413 	for (i = 0; i < fws->fw_count; i++) {
3414 		data = fws->fw_sect[i].fws_data;
3415 		dlen = fws->fw_sect[i].fws_len;
3416 		offset = fws->fw_sect[i].fws_devoff;
3417 		if (dlen > sc->sc_fwdmasegsz) {
3418 			err = EFBIG;
3419 		} else
3420 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3421 		if (err) {
3422 			printf("%s: could not load firmware chunk %u of %u\n",
3423 			    DEVNAME(sc), i, fws->fw_count);
3424 			return err;
3425 		}
3426 	}
3427 
3428 	iwm_enable_interrupts(sc);
3429 
3430 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3431 
3432 	return 0;
3433 }
3434 
3435 int
3436 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3437     int cpu, int *first_ucode_section)
3438 {
3439 	int shift_param;
3440 	int i, err = 0, sec_num = 0x1;
3441 	uint32_t val, last_read_idx = 0;
3442 	void *data;
3443 	uint32_t dlen;
3444 	uint32_t offset;
3445 
3446 	if (cpu == 1) {
3447 		shift_param = 0;
3448 		*first_ucode_section = 0;
3449 	} else {
3450 		shift_param = 16;
3451 		(*first_ucode_section)++;
3452 	}
3453 
3454 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3455 		last_read_idx = i;
3456 		data = fws->fw_sect[i].fws_data;
3457 		dlen = fws->fw_sect[i].fws_len;
3458 		offset = fws->fw_sect[i].fws_devoff;
3459 
3460 		/*
3461 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3462 		 * CPU1 to CPU2.
3463 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3464 		 * CPU2 non paged to CPU2 paging sec.
3465 		 */
3466 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3467 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3468 			break;
3469 
3470 		if (dlen > sc->sc_fwdmasegsz) {
3471 			err = EFBIG;
3472 		} else
3473 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3474 		if (err) {
3475 			printf("%s: could not load firmware chunk %d "
3476 			    "(error %d)\n", DEVNAME(sc), i, err);
3477 			return err;
3478 		}
3479 
3480 		/* Notify the ucode of the loaded section number and status */
3481 		if (iwm_nic_lock(sc)) {
3482 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3483 			val = val | (sec_num << shift_param);
3484 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3485 			sec_num = (sec_num << 1) | 0x1;
3486 			iwm_nic_unlock(sc);
3487 		} else {
3488 			err = EBUSY;
3489 			printf("%s: could not load firmware chunk %d "
3490 			    "(error %d)\n", DEVNAME(sc), i, err);
3491 			return err;
3492 		}
3493 	}
3494 
3495 	*first_ucode_section = last_read_idx;
3496 
3497 	if (iwm_nic_lock(sc)) {
3498 		if (cpu == 1)
3499 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3500 		else
3501 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3502 		iwm_nic_unlock(sc);
3503 	} else {
3504 		err = EBUSY;
3505 		printf("%s: could not finalize firmware loading (error %d)\n",
3506 		    DEVNAME(sc), err);
3507 		return err;
3508 	}
3509 
3510 	return 0;
3511 }
3512 
3513 int
3514 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3515 {
3516 	struct iwm_fw_sects *fws;
3517 	int err = 0;
3518 	int first_ucode_section;
3519 
3520 	fws = &sc->sc_fw.fw_sects[ucode_type];
3521 
3522 	/* configure the ucode to be ready to get the secured image */
3523 	/* release CPU reset */
3524 	if (iwm_nic_lock(sc)) {
3525 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3526 		    IWM_RELEASE_CPU_RESET_BIT);
3527 		iwm_nic_unlock(sc);
3528 	}
3529 
3530 	/* load to FW the binary Secured sections of CPU1 */
3531 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3532 	if (err)
3533 		return err;
3534 
3535 	/* load to FW the binary sections of CPU2 */
3536 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3537 	if (err)
3538 		return err;
3539 
3540 	iwm_enable_interrupts(sc);
3541 	return 0;
3542 }
3543 
3544 int
3545 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3546 {
3547 	int err, w;
3548 
3549 	sc->sc_uc.uc_intr = 0;
3550 
3551 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
3552 		err = iwm_load_firmware_8000(sc, ucode_type);
3553 	else
3554 		err = iwm_load_firmware_7000(sc, ucode_type);
3555 
3556 	if (err)
3557 		return err;
3558 
3559 	/* wait for the firmware to load */
3560 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
3561 		err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", MSEC_TO_NSEC(100));
3562 	}
3563 	if (err || !sc->sc_uc.uc_ok)
3564 		printf("%s: could not load firmware\n", DEVNAME(sc));
3565 
3566 	return err;
3567 }
3568 
3569 int
3570 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3571 {
3572 	int err;
3573 
3574 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3575 
3576 	err = iwm_nic_init(sc);
3577 	if (err) {
3578 		printf("%s: unable to init nic\n", DEVNAME(sc));
3579 		return err;
3580 	}
3581 
3582 	/* make sure rfkill handshake bits are cleared */
3583 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3584 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3585 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3586 
3587 	/* clear (again), then enable firwmare load interrupt */
3588 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3589 	iwm_enable_fwload_interrupt(sc);
3590 
3591 	/* really make sure rfkill handshake bits are cleared */
3592 	/* maybe we should write a few times more?  just to make sure */
3593 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3594 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3595 
3596 	return iwm_load_firmware(sc, ucode_type);
3597 }
3598 
3599 int
3600 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3601 {
3602 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3603 		.valid = htole32(valid_tx_ant),
3604 	};
3605 
3606 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
3607 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3608 }
3609 
3610 int
3611 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3612 {
3613 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3614 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3615 
3616 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3617 	phy_cfg_cmd.calib_control.event_trigger =
3618 	    sc->sc_default_calib[ucode_type].event_trigger;
3619 	phy_cfg_cmd.calib_control.flow_trigger =
3620 	    sc->sc_default_calib[ucode_type].flow_trigger;
3621 
3622 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3623 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3624 }
3625 
3626 int
3627 iwm_send_dqa_cmd(struct iwm_softc *sc)
3628 {
3629 	struct iwm_dqa_enable_cmd dqa_cmd = {
3630 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
3631 	};
3632 	uint32_t cmd_id;
3633 
3634 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
3635 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3636 }
3637 
3638 int
3639 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
3640 	enum iwm_ucode_type ucode_type)
3641 {
3642 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3643 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
3644 	int err;
3645 
3646 	err = iwm_read_firmware(sc, ucode_type);
3647 	if (err)
3648 		return err;
3649 
3650 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3651 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
3652 	else
3653 		sc->cmdqid = IWM_CMD_QUEUE;
3654 
3655 	sc->sc_uc_current = ucode_type;
3656 	err = iwm_start_fw(sc, ucode_type);
3657 	if (err) {
3658 		sc->sc_uc_current = old_type;
3659 		return err;
3660 	}
3661 
3662 	err = iwm_post_alive(sc);
3663 	if (err)
3664 		return err;
3665 
3666 	/*
3667 	 * configure and operate fw paging mechanism.
3668 	 * driver configures the paging flow only once, CPU2 paging image
3669 	 * included in the IWM_UCODE_INIT image.
3670 	 */
3671 	if (fw->paging_mem_size) {
3672 		err = iwm_save_fw_paging(sc, fw);
3673 		if (err) {
3674 			printf("%s: failed to save the FW paging image\n",
3675 			    DEVNAME(sc));
3676 			return err;
3677 		}
3678 
3679 		err = iwm_send_paging_cmd(sc, fw);
3680 		if (err) {
3681 			printf("%s: failed to send the paging cmd\n",
3682 			    DEVNAME(sc));
3683 			iwm_free_fw_paging(sc);
3684 			return err;
3685 		}
3686 	}
3687 
3688 	return 0;
3689 }
3690 
3691 int
3692 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3693 {
3694 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
3695 	int err;
3696 
3697 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3698 		printf("%s: radio is disabled by hardware switch\n",
3699 		    DEVNAME(sc));
3700 		return EPERM;
3701 	}
3702 
3703 	sc->sc_init_complete = 0;
3704 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3705 	if (err) {
3706 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3707 		return err;
3708 	}
3709 
3710 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
3711 		err = iwm_send_bt_init_conf(sc);
3712 		if (err) {
3713 			printf("%s: could not init bt coex (error %d)\n",
3714 			    DEVNAME(sc), err);
3715 			return err;
3716 		}
3717 	}
3718 
3719 	if (justnvm) {
3720 		err = iwm_nvm_init(sc);
3721 		if (err) {
3722 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3723 			return err;
3724 		}
3725 
3726 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3727 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3728 			    sc->sc_nvm.hw_addr);
3729 
3730 		return 0;
3731 	}
3732 
3733 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3734 	if (err)
3735 		return err;
3736 
3737 	/* Send TX valid antennas before triggering calibrations */
3738 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3739 	if (err)
3740 		return err;
3741 
3742 	/*
3743 	 * Send phy configurations command to init uCode
3744 	 * to start the 16.0 uCode init image internal calibrations.
3745 	 */
3746 	err = iwm_send_phy_cfg_cmd(sc);
3747 	if (err)
3748 		return err;
3749 
3750 	/*
3751 	 * Nothing to do but wait for the init complete and phy DB
3752 	 * notifications from the firmware.
3753 	 */
3754 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3755 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
3756 		    SEC_TO_NSEC(2));
3757 		if (err)
3758 			break;
3759 	}
3760 
3761 	return err;
3762 }
3763 
3764 int
3765 iwm_config_ltr(struct iwm_softc *sc)
3766 {
3767 	struct iwm_ltr_config_cmd cmd = {
3768 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3769 	};
3770 
3771 	if (!sc->sc_ltr_enabled)
3772 		return 0;
3773 
3774 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3775 }
3776 
3777 int
3778 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3779 {
3780 	struct iwm_rx_ring *ring = &sc->rxq;
3781 	struct iwm_rx_data *data = &ring->data[idx];
3782 	struct mbuf *m;
3783 	int err;
3784 	int fatal = 0;
3785 
3786 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3787 	if (m == NULL)
3788 		return ENOBUFS;
3789 
3790 	if (size <= MCLBYTES) {
3791 		MCLGET(m, M_DONTWAIT);
3792 	} else {
3793 		MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE);
3794 	}
3795 	if ((m->m_flags & M_EXT) == 0) {
3796 		m_freem(m);
3797 		return ENOBUFS;
3798 	}
3799 
3800 	if (data->m != NULL) {
3801 		bus_dmamap_unload(sc->sc_dmat, data->map);
3802 		fatal = 1;
3803 	}
3804 
3805 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3806 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3807 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3808 	if (err) {
3809 		/* XXX */
3810 		if (fatal)
3811 			panic("iwm: could not load RX mbuf");
3812 		m_freem(m);
3813 		return err;
3814 	}
3815 	data->m = m;
3816 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3817 
3818 	/* Update RX descriptor. */
3819 	if (sc->sc_mqrx_supported) {
3820 		((uint64_t *)ring->desc)[idx] =
3821 		    htole64(data->map->dm_segs[0].ds_addr);
3822 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3823 		    idx * sizeof(uint64_t), sizeof(uint64_t),
3824 		    BUS_DMASYNC_PREWRITE);
3825 	} else {
3826 		((uint32_t *)ring->desc)[idx] =
3827 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
3828 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3829 		    idx * sizeof(uint32_t), sizeof(uint32_t),
3830 		    BUS_DMASYNC_PREWRITE);
3831 	}
3832 
3833 	return 0;
3834 }
3835 
3836 /*
3837  * RSSI values are reported by the FW as positive values - need to negate
3838  * to obtain their dBM.  Account for missing antennas by replacing 0
3839  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3840  */
3841 int
3842 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3843 {
3844 	int energy_a, energy_b, energy_c, max_energy;
3845 	uint32_t val;
3846 
3847 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3848 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3849 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3850 	energy_a = energy_a ? -energy_a : -256;
3851 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3852 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3853 	energy_b = energy_b ? -energy_b : -256;
3854 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3855 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3856 	energy_c = energy_c ? -energy_c : -256;
3857 	max_energy = MAX(energy_a, energy_b);
3858 	max_energy = MAX(max_energy, energy_c);
3859 
3860 	return max_energy;
3861 }
3862 
3863 int
3864 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3865     struct iwm_rx_mpdu_desc *desc)
3866 {
3867 	int energy_a, energy_b;
3868 
3869 	energy_a = desc->v1.energy_a;
3870 	energy_b = desc->v1.energy_b;
3871 	energy_a = energy_a ? -energy_a : -256;
3872 	energy_b = energy_b ? -energy_b : -256;
3873 	return MAX(energy_a, energy_b);
3874 }
3875 
3876 void
3877 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3878     struct iwm_rx_data *data)
3879 {
3880 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3881 
3882 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3883 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3884 
3885 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3886 }
3887 
3888 /*
3889  * Retrieve the average noise (in dBm) among receivers.
3890  */
3891 int
3892 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3893 {
3894 	int i, total, nbant, noise;
3895 
3896 	total = nbant = noise = 0;
3897 	for (i = 0; i < 3; i++) {
3898 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3899 		if (noise) {
3900 			total += noise;
3901 			nbant++;
3902 		}
3903 	}
3904 
3905 	/* There should be at least one antenna but check anyway. */
3906 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3907 }
3908 
3909 int
3910 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3911 {
3912 	struct ieee80211com *ic = &sc->sc_ic;
3913 	struct ieee80211_key *k = &ni->ni_pairwise_key;
3914 	struct ieee80211_frame *wh;
3915 	uint64_t pn, *prsc;
3916 	uint8_t *ivp;
3917 	uint8_t tid;
3918 	int hdrlen, hasqos;
3919 
3920 	wh = mtod(m, struct ieee80211_frame *);
3921 	hdrlen = ieee80211_get_hdrlen(wh);
3922 	ivp = (uint8_t *)wh + hdrlen;
3923 
3924 	/* Check that ExtIV bit is set. */
3925 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
3926 		return 1;
3927 
3928 	hasqos = ieee80211_has_qos(wh);
3929 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
3930 	prsc = &k->k_rsc[tid];
3931 
3932 	/* Extract the 48-bit PN from the CCMP header. */
3933 	pn = (uint64_t)ivp[0]       |
3934 	     (uint64_t)ivp[1] <<  8 |
3935 	     (uint64_t)ivp[4] << 16 |
3936 	     (uint64_t)ivp[5] << 24 |
3937 	     (uint64_t)ivp[6] << 32 |
3938 	     (uint64_t)ivp[7] << 40;
3939 	if (pn <= *prsc) {
3940 		ic->ic_stats.is_ccmp_replays++;
3941 		return 1;
3942 	}
3943 	/* Last seen packet number is updated in ieee80211_inputm(). */
3944 
3945 	/*
3946 	 * Some firmware versions strip the MIC, and some don't. It is not
3947 	 * clear which of the capability flags could tell us what to expect.
3948 	 * For now, keep things simple and just leave the MIC in place if
3949 	 * it is present.
3950 	 *
3951 	 * The IV will be stripped by ieee80211_inputm().
3952 	 */
3953 	return 0;
3954 }
3955 
3956 void
3957 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
3958     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3959     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3960     struct mbuf_list *ml)
3961 {
3962 	struct ieee80211com *ic = &sc->sc_ic;
3963 	struct ieee80211_frame *wh;
3964 	struct ieee80211_node *ni;
3965 	struct ieee80211_channel *bss_chan;
3966 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3967 	struct ifnet *ifp = IC2IFP(ic);
3968 
3969 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3970 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3971 
3972 	wh = mtod(m, struct ieee80211_frame *);
3973 	ni = ieee80211_find_rxnode(ic, wh);
3974 	if (ni == ic->ic_bss) {
3975 		/*
3976 		 * We may switch ic_bss's channel during scans.
3977 		 * Record the current channel so we can restore it later.
3978 		 */
3979 		bss_chan = ni->ni_chan;
3980 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3981 	}
3982 	ni->ni_chan = &ic->ic_channels[chanidx];
3983 
3984 	/* Handle hardware decryption. */
3985 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
3986 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
3987 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3988 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
3989 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
3990 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
3991 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3992 			ic->ic_stats.is_ccmp_dec_errs++;
3993 			ifp->if_ierrors++;
3994 			m_freem(m);
3995 			ieee80211_release_node(ic, ni);
3996 			return;
3997 		}
3998 		/* Check whether decryption was successful or not. */
3999 		if ((rx_pkt_status &
4000 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4001 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4002 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4003 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4004 			ic->ic_stats.is_ccmp_dec_errs++;
4005 			ifp->if_ierrors++;
4006 			m_freem(m);
4007 			ieee80211_release_node(ic, ni);
4008 			return;
4009 		}
4010 		if (iwm_ccmp_decap(sc, m, ni) != 0) {
4011 			ifp->if_ierrors++;
4012 			m_freem(m);
4013 			ieee80211_release_node(ic, ni);
4014 			return;
4015 		}
4016 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4017 	}
4018 
4019 #if NBPFILTER > 0
4020 	if (sc->sc_drvbpf != NULL) {
4021 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4022 		uint16_t chan_flags;
4023 
4024 		tap->wr_flags = 0;
4025 		if (is_shortpre)
4026 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4027 		tap->wr_chan_freq =
4028 		    htole16(ic->ic_channels[chanidx].ic_freq);
4029 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4030 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4031 			chan_flags &= ~IEEE80211_CHAN_HT;
4032 		tap->wr_chan_flags = htole16(chan_flags);
4033 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4034 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4035 		tap->wr_tsft = device_timestamp;
4036 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4037 			uint8_t mcs = (rate_n_flags &
4038 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4039 			    IWM_RATE_HT_MCS_NSS_MSK));
4040 			tap->wr_rate = (0x80 | mcs);
4041 		} else {
4042 			uint8_t rate = (rate_n_flags &
4043 			    IWM_RATE_LEGACY_RATE_MSK);
4044 			switch (rate) {
4045 			/* CCK rates. */
4046 			case  10: tap->wr_rate =   2; break;
4047 			case  20: tap->wr_rate =   4; break;
4048 			case  55: tap->wr_rate =  11; break;
4049 			case 110: tap->wr_rate =  22; break;
4050 			/* OFDM rates. */
4051 			case 0xd: tap->wr_rate =  12; break;
4052 			case 0xf: tap->wr_rate =  18; break;
4053 			case 0x5: tap->wr_rate =  24; break;
4054 			case 0x7: tap->wr_rate =  36; break;
4055 			case 0x9: tap->wr_rate =  48; break;
4056 			case 0xb: tap->wr_rate =  72; break;
4057 			case 0x1: tap->wr_rate =  96; break;
4058 			case 0x3: tap->wr_rate = 108; break;
4059 			/* Unknown rate: should not happen. */
4060 			default:  tap->wr_rate =   0;
4061 			}
4062 		}
4063 
4064 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4065 		    m, BPF_DIRECTION_IN);
4066 	}
4067 #endif
4068 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4069 	/*
4070 	 * ieee80211_inputm() might have changed our BSS.
4071 	 * Restore ic_bss's channel if we are still in the same BSS.
4072 	 */
4073 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
4074 		ni->ni_chan = bss_chan;
4075 	ieee80211_release_node(ic, ni);
4076 }
4077 
4078 void
4079 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4080     size_t maxlen, struct mbuf_list *ml)
4081 {
4082 	struct ieee80211com *ic = &sc->sc_ic;
4083 	struct ieee80211_rxinfo rxi;
4084 	struct iwm_rx_phy_info *phy_info;
4085 	struct iwm_rx_mpdu_res_start *rx_res;
4086 	int device_timestamp;
4087 	uint16_t phy_flags;
4088 	uint32_t len;
4089 	uint32_t rx_pkt_status;
4090 	int rssi, chanidx, rate_n_flags;
4091 
4092 	phy_info = &sc->sc_last_phy_info;
4093 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4094 	len = le16toh(rx_res->byte_count);
4095 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4096 		/* Allow control frames in monitor mode. */
4097 		if (len < sizeof(struct ieee80211_frame_cts)) {
4098 			ic->ic_stats.is_rx_tooshort++;
4099 			IC2IFP(ic)->if_ierrors++;
4100 			m_freem(m);
4101 			return;
4102 		}
4103 	} else if (len < sizeof(struct ieee80211_frame)) {
4104 		ic->ic_stats.is_rx_tooshort++;
4105 		IC2IFP(ic)->if_ierrors++;
4106 		m_freem(m);
4107 		return;
4108 	}
4109 	if (len > maxlen - sizeof(*rx_res)) {
4110 		IC2IFP(ic)->if_ierrors++;
4111 		m_freem(m);
4112 		return;
4113 	}
4114 
4115 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4116 		m_freem(m);
4117 		return;
4118 	}
4119 
4120 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4121 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4122 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4123 		m_freem(m);
4124 		return; /* drop */
4125 	}
4126 
4127 	m->m_data = pktdata + sizeof(*rx_res);
4128 	m->m_pkthdr.len = m->m_len = len;
4129 
4130 	chanidx = letoh32(phy_info->channel);
4131 	device_timestamp = le32toh(phy_info->system_timestamp);
4132 	phy_flags = letoh16(phy_info->phy_flags);
4133 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4134 
4135 	rssi = iwm_get_signal_strength(sc, phy_info);
4136 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4137 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4138 
4139 	memset(&rxi, 0, sizeof(rxi));
4140 	rxi.rxi_rssi = rssi;
4141 	rxi.rxi_tstamp = device_timestamp;
4142 
4143 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4144 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4145 	    rate_n_flags, device_timestamp, &rxi, ml);
4146 }
4147 
4148 void
4149 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4150     size_t maxlen, struct mbuf_list *ml)
4151 {
4152 	struct ieee80211com *ic = &sc->sc_ic;
4153 	struct ieee80211_rxinfo rxi;
4154 	struct iwm_rx_mpdu_desc *desc;
4155 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4156 	int rssi;
4157 	uint8_t chanidx;
4158 	uint16_t phy_info;
4159 
4160 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
4161 
4162 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
4163 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4164 		m_freem(m);
4165 		return; /* drop */
4166 	}
4167 
4168 	len = le16toh(desc->mpdu_len);
4169 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4170 		/* Allow control frames in monitor mode. */
4171 		if (len < sizeof(struct ieee80211_frame_cts)) {
4172 			ic->ic_stats.is_rx_tooshort++;
4173 			IC2IFP(ic)->if_ierrors++;
4174 			m_freem(m);
4175 			return;
4176 		}
4177 	} else if (len < sizeof(struct ieee80211_frame)) {
4178 		ic->ic_stats.is_rx_tooshort++;
4179 		IC2IFP(ic)->if_ierrors++;
4180 		m_freem(m);
4181 		return;
4182 	}
4183 	if (len > maxlen - sizeof(*desc)) {
4184 		IC2IFP(ic)->if_ierrors++;
4185 		m_freem(m);
4186 		return;
4187 	}
4188 
4189 	m->m_data = pktdata + sizeof(*desc);
4190 	m->m_pkthdr.len = m->m_len = len;
4191 
4192 	/* Account for padding following the frame header. */
4193 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
4194 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4195 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4196 		if (type == IEEE80211_FC0_TYPE_CTL) {
4197 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4198 			case IEEE80211_FC0_SUBTYPE_CTS:
4199 				hdrlen = sizeof(struct ieee80211_frame_cts);
4200 				break;
4201 			case IEEE80211_FC0_SUBTYPE_ACK:
4202 				hdrlen = sizeof(struct ieee80211_frame_ack);
4203 				break;
4204 			default:
4205 				hdrlen = sizeof(struct ieee80211_frame_min);
4206 				break;
4207 			}
4208 		} else
4209 			hdrlen = ieee80211_get_hdrlen(wh);
4210 
4211 		if ((le16toh(desc->status) &
4212 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4213 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4214 			/* Padding is inserted after the IV. */
4215 			hdrlen += IEEE80211_CCMP_HDRLEN;
4216 		}
4217 
4218 		memmove(m->m_data + 2, m->m_data, hdrlen);
4219 		m_adj(m, 2);
4220 	}
4221 
4222 	phy_info = le16toh(desc->phy_info);
4223 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
4224 	chanidx = desc->v1.channel;
4225 	device_timestamp = desc->v1.gp2_on_air_rise;
4226 
4227 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
4228 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4229 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4230 
4231 	memset(&rxi, 0, sizeof(rxi));
4232 	rxi.rxi_rssi = rssi;
4233 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
4234 
4235 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
4236 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
4237 	    rate_n_flags, device_timestamp, &rxi, ml);
4238 }
4239 
4240 void
4241 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4242     struct iwm_node *in, int txmcs, int txrate)
4243 {
4244 	struct ieee80211com *ic = &sc->sc_ic;
4245 	struct ieee80211_node *ni = &in->in_ni;
4246 	struct ifnet *ifp = IC2IFP(ic);
4247 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4248 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4249 	int txfail;
4250 
4251 	KASSERT(tx_resp->frame_count == 1);
4252 
4253 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
4254 	    status != IWM_TX_STATUS_DIRECT_DONE);
4255 
4256 	/*
4257 	 * Update rate control statistics.
4258 	 * Only report frames which were actually queued with the currently
4259 	 * selected Tx rate. Because Tx queues are relatively long we may
4260 	 * encounter previously selected rates here during Tx bursts.
4261 	 * Providing feedback based on such frames can lead to suboptimal
4262 	 * Tx rate control decisions.
4263 	 */
4264 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
4265 		if (txrate == ni->ni_txrate) {
4266 			in->in_amn.amn_txcnt++;
4267 			if (txfail)
4268 				in->in_amn.amn_retrycnt++;
4269 			if (tx_resp->failure_frame > 0)
4270 				in->in_amn.amn_retrycnt++;
4271 		}
4272 	} else if (ic->ic_fixed_mcs == -1 && txmcs == ni->ni_txmcs) {
4273 		in->in_mn.frames += tx_resp->frame_count;
4274 		in->in_mn.ampdu_size = le16toh(tx_resp->byte_cnt);
4275 		in->in_mn.agglen = tx_resp->frame_count;
4276 		if (tx_resp->failure_frame > 0)
4277 			in->in_mn.retries += tx_resp->failure_frame;
4278 		if (txfail)
4279 			in->in_mn.txfail += tx_resp->frame_count;
4280 		if (ic->ic_state == IEEE80211_S_RUN) {
4281 			int best_mcs;
4282 
4283 			ieee80211_mira_choose(&in->in_mn, ic, &in->in_ni);
4284 			/*
4285 			 * If MiRA has chosen a new TX rate we must update
4286 			 * the firwmare's LQ rate table from process context.
4287 			 * ni_txmcs may change again before the task runs so
4288 			 * cache the chosen rate in the iwm_node structure.
4289 			 */
4290 			best_mcs = ieee80211_mira_get_best_mcs(&in->in_mn);
4291 			if (best_mcs != in->chosen_txmcs) {
4292 				in->chosen_txmcs = best_mcs;
4293 				iwm_setrates(in, 1);
4294 			}
4295 		}
4296 	}
4297 
4298 	if (txfail)
4299 		ifp->if_oerrors++;
4300 }
4301 
4302 void
4303 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
4304 {
4305 	struct ieee80211com *ic = &sc->sc_ic;
4306 
4307 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4308 	    BUS_DMASYNC_POSTWRITE);
4309 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4310 	m_freem(txd->m);
4311 	txd->m = NULL;
4312 
4313 	KASSERT(txd->in);
4314 	ieee80211_release_node(ic, &txd->in->in_ni);
4315 	txd->in = NULL;
4316 }
4317 
4318 void
4319 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4320     struct iwm_rx_data *data)
4321 {
4322 	struct ieee80211com *ic = &sc->sc_ic;
4323 	struct ifnet *ifp = IC2IFP(ic);
4324 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4325 	int idx = cmd_hdr->idx;
4326 	int qid = cmd_hdr->qid;
4327 	struct iwm_tx_ring *ring = &sc->txq[qid];
4328 	struct iwm_tx_data *txd;
4329 
4330 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4331 	    BUS_DMASYNC_POSTREAD);
4332 
4333 	sc->sc_tx_timer = 0;
4334 
4335 	txd = &ring->data[idx];
4336 	if (txd->m == NULL)
4337 		return;
4338 
4339 	iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
4340 	iwm_txd_done(sc, txd);
4341 
4342 	/*
4343 	 * XXX Sometimes we miss Tx completion interrupts.
4344 	 * We cannot check Tx success/failure for affected frames; just free
4345 	 * the associated mbuf and release the associated node reference.
4346 	 */
4347 	while (ring->tail != idx) {
4348 		txd = &ring->data[ring->tail];
4349 		if (txd->m != NULL) {
4350 			DPRINTF(("%s: missed Tx completion: tail=%d idx=%d\n",
4351 			    __func__, ring->tail, idx));
4352 			iwm_txd_done(sc, txd);
4353 			ring->queued--;
4354 		}
4355 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
4356 	}
4357 
4358 	if (--ring->queued < IWM_TX_RING_LOMARK) {
4359 		sc->qfullmsk &= ~(1 << ring->qid);
4360 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
4361 			ifq_clr_oactive(&ifp->if_snd);
4362 			/*
4363 			 * Well, we're in interrupt context, but then again
4364 			 * I guess net80211 does all sorts of stunts in
4365 			 * interrupt context, so maybe this is no biggie.
4366 			 */
4367 			(*ifp->if_start)(ifp);
4368 		}
4369 	}
4370 }
4371 
4372 void
4373 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4374     struct iwm_rx_data *data)
4375 {
4376 	struct ieee80211com *ic = &sc->sc_ic;
4377 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
4378 	uint32_t missed;
4379 
4380 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
4381 	    (ic->ic_state != IEEE80211_S_RUN))
4382 		return;
4383 
4384 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4385 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
4386 
4387 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4388 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
4389 		if (ic->ic_if.if_flags & IFF_DEBUG)
4390 			printf("%s: receiving no beacons from %s; checking if "
4391 			    "this AP is still responding to probe requests\n",
4392 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
4393 		/*
4394 		 * Rather than go directly to scan state, try to send a
4395 		 * directed probe request first. If that fails then the
4396 		 * state machine will drop us into scanning after timing
4397 		 * out waiting for a probe response.
4398 		 */
4399 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
4400 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
4401 	}
4402 
4403 }
4404 
4405 int
4406 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4407 {
4408 	struct iwm_binding_cmd cmd;
4409 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4410 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4411 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
4412 	uint32_t status;
4413 
4414 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
4415 		panic("binding already added");
4416 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
4417 		panic("binding already removed");
4418 
4419 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
4420 		return EINVAL;
4421 
4422 	memset(&cmd, 0, sizeof(cmd));
4423 
4424 	cmd.id_and_color
4425 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4426 	cmd.action = htole32(action);
4427 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4428 
4429 	cmd.macs[0] = htole32(mac_id);
4430 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4431 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4432 
4433 	status = 0;
4434 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4435 	    sizeof(cmd), &cmd, &status);
4436 	if (err == 0 && status != 0)
4437 		err = EIO;
4438 
4439 	return err;
4440 }
4441 
4442 void
4443 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4444     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4445 {
4446 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4447 
4448 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4449 	    ctxt->color));
4450 	cmd->action = htole32(action);
4451 	cmd->apply_time = htole32(apply_time);
4452 }
4453 
4454 void
4455 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4456     struct ieee80211_channel *chan, uint8_t chains_static,
4457     uint8_t chains_dynamic)
4458 {
4459 	struct ieee80211com *ic = &sc->sc_ic;
4460 	uint8_t active_cnt, idle_cnt;
4461 
4462 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4463 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4464 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4465 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4466 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4467 
4468 	/* Set rx the chains */
4469 	idle_cnt = chains_static;
4470 	active_cnt = chains_dynamic;
4471 
4472 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4473 					IWM_PHY_RX_CHAIN_VALID_POS);
4474 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4475 	cmd->rxchain_info |= htole32(active_cnt <<
4476 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4477 
4478 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4479 }
4480 
4481 int
4482 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4483     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4484     uint32_t apply_time)
4485 {
4486 	struct iwm_phy_context_cmd cmd;
4487 
4488 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4489 
4490 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4491 	    chains_static, chains_dynamic);
4492 
4493 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4494 	    sizeof(struct iwm_phy_context_cmd), &cmd);
4495 }
4496 
4497 int
4498 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4499 {
4500 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
4501 	struct iwm_tfd *desc;
4502 	struct iwm_tx_data *txdata;
4503 	struct iwm_device_cmd *cmd;
4504 	struct mbuf *m;
4505 	bus_addr_t paddr;
4506 	uint32_t addr_lo;
4507 	int err = 0, i, paylen, off, s;
4508 	int idx, code, async, group_id;
4509 	size_t hdrlen, datasz;
4510 	uint8_t *data;
4511 	int generation = sc->sc_generation;
4512 
4513 	code = hcmd->id;
4514 	async = hcmd->flags & IWM_CMD_ASYNC;
4515 	idx = ring->cur;
4516 
4517 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
4518 		paylen += hcmd->len[i];
4519 	}
4520 
4521 	/* If this command waits for a response, allocate response buffer. */
4522 	hcmd->resp_pkt = NULL;
4523 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
4524 		uint8_t *resp_buf;
4525 		KASSERT(!async);
4526 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
4527 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
4528 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
4529 			return ENOSPC;
4530 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
4531 		    M_NOWAIT | M_ZERO);
4532 		if (resp_buf == NULL)
4533 			return ENOMEM;
4534 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
4535 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
4536 	} else {
4537 		sc->sc_cmd_resp_pkt[idx] = NULL;
4538 	}
4539 
4540 	s = splnet();
4541 
4542 	desc = &ring->desc[idx];
4543 	txdata = &ring->data[idx];
4544 
4545 	group_id = iwm_cmd_groupid(code);
4546 	if (group_id != 0) {
4547 		hdrlen = sizeof(cmd->hdr_wide);
4548 		datasz = sizeof(cmd->data_wide);
4549 	} else {
4550 		hdrlen = sizeof(cmd->hdr);
4551 		datasz = sizeof(cmd->data);
4552 	}
4553 
4554 	if (paylen > datasz) {
4555 		/* Command is too large to fit in pre-allocated space. */
4556 		size_t totlen = hdrlen + paylen;
4557 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4558 			printf("%s: firmware command too long (%zd bytes)\n",
4559 			    DEVNAME(sc), totlen);
4560 			err = EINVAL;
4561 			goto out;
4562 		}
4563 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
4564 		if (m == NULL) {
4565 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
4566 			    DEVNAME(sc), totlen);
4567 			err = ENOMEM;
4568 			goto out;
4569 		}
4570 		cmd = mtod(m, struct iwm_device_cmd *);
4571 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4572 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4573 		if (err) {
4574 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
4575 			    DEVNAME(sc), totlen);
4576 			m_freem(m);
4577 			goto out;
4578 		}
4579 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
4580 		paddr = txdata->map->dm_segs[0].ds_addr;
4581 	} else {
4582 		cmd = &ring->cmd[idx];
4583 		paddr = txdata->cmd_paddr;
4584 	}
4585 
4586 	if (group_id != 0) {
4587 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4588 		cmd->hdr_wide.group_id = group_id;
4589 		cmd->hdr_wide.qid = ring->qid;
4590 		cmd->hdr_wide.idx = idx;
4591 		cmd->hdr_wide.length = htole16(paylen);
4592 		cmd->hdr_wide.version = iwm_cmd_version(code);
4593 		data = cmd->data_wide;
4594 	} else {
4595 		cmd->hdr.code = code;
4596 		cmd->hdr.flags = 0;
4597 		cmd->hdr.qid = ring->qid;
4598 		cmd->hdr.idx = idx;
4599 		data = cmd->data;
4600 	}
4601 
4602 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
4603 		if (hcmd->len[i] == 0)
4604 			continue;
4605 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4606 		off += hcmd->len[i];
4607 	}
4608 	KASSERT(off == paylen);
4609 
4610 	/* lo field is not aligned */
4611 	addr_lo = htole32((uint32_t)paddr);
4612 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4613 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
4614 	    | ((hdrlen + paylen) << 4));
4615 	desc->num_tbs = 1;
4616 
4617 	if (paylen > datasz) {
4618 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
4619 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
4620 	} else {
4621 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4622 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4623 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
4624 	}
4625 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4626 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4627 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4628 
4629 	/*
4630 	 * Wake up the NIC to make sure that the firmware will see the host
4631 	 * command - we will let the NIC sleep once all the host commands
4632 	 * returned. This needs to be done only on 7000 family NICs.
4633 	 */
4634 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
4635 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
4636 			err = EBUSY;
4637 			goto out;
4638 		}
4639 	}
4640 
4641 #if 0
4642 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4643 #endif
4644 	/* Kick command ring. */
4645 	ring->queued++;
4646 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4647 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4648 
4649 	if (!async) {
4650 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
4651 		if (err == 0) {
4652 			/* if hardware is no longer up, return error */
4653 			if (generation != sc->sc_generation) {
4654 				err = ENXIO;
4655 				goto out;
4656 			}
4657 
4658 			/* Response buffer will be freed in iwm_free_resp(). */
4659 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
4660 			sc->sc_cmd_resp_pkt[idx] = NULL;
4661 		} else if (generation == sc->sc_generation) {
4662 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
4663 			    sc->sc_cmd_resp_len[idx]);
4664 			sc->sc_cmd_resp_pkt[idx] = NULL;
4665 		}
4666 	}
4667  out:
4668 	splx(s);
4669 
4670 	return err;
4671 }
4672 
4673 int
4674 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4675     uint16_t len, const void *data)
4676 {
4677 	struct iwm_host_cmd cmd = {
4678 		.id = id,
4679 		.len = { len, },
4680 		.data = { data, },
4681 		.flags = flags,
4682 	};
4683 
4684 	return iwm_send_cmd(sc, &cmd);
4685 }
4686 
4687 int
4688 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4689     uint32_t *status)
4690 {
4691 	struct iwm_rx_packet *pkt;
4692 	struct iwm_cmd_response *resp;
4693 	int err, resp_len;
4694 
4695 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
4696 	cmd->flags |= IWM_CMD_WANT_RESP;
4697 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
4698 
4699 	err = iwm_send_cmd(sc, cmd);
4700 	if (err)
4701 		return err;
4702 
4703 	pkt = cmd->resp_pkt;
4704 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
4705 		return EIO;
4706 
4707 	resp_len = iwm_rx_packet_payload_len(pkt);
4708 	if (resp_len != sizeof(*resp)) {
4709 		iwm_free_resp(sc, cmd);
4710 		return EIO;
4711 	}
4712 
4713 	resp = (void *)pkt->data;
4714 	*status = le32toh(resp->status);
4715 	iwm_free_resp(sc, cmd);
4716 	return err;
4717 }
4718 
4719 int
4720 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4721     const void *data, uint32_t *status)
4722 {
4723 	struct iwm_host_cmd cmd = {
4724 		.id = id,
4725 		.len = { len, },
4726 		.data = { data, },
4727 	};
4728 
4729 	return iwm_send_cmd_status(sc, &cmd, status);
4730 }
4731 
4732 void
4733 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4734 {
4735 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
4736 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
4737 	hcmd->resp_pkt = NULL;
4738 }
4739 
4740 void
4741 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
4742 {
4743 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
4744 	struct iwm_tx_data *data;
4745 
4746 	if (qid != sc->cmdqid) {
4747 		return;	/* Not a command ack. */
4748 	}
4749 
4750 	data = &ring->data[idx];
4751 
4752 	if (data->m != NULL) {
4753 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4754 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4755 		bus_dmamap_unload(sc->sc_dmat, data->map);
4756 		m_freem(data->m);
4757 		data->m = NULL;
4758 	}
4759 	wakeup(&ring->desc[idx]);
4760 
4761 	if (ring->queued == 0) {
4762 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
4763 		    DEVNAME(sc), code));
4764 	} else if (--ring->queued == 0) {
4765 		/*
4766 		 * 7000 family NICs are locked while commands are in progress.
4767 		 * All commands are now done so we may unlock the NIC again.
4768 		 */
4769 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4770 			iwm_nic_unlock(sc);
4771 	}
4772 }
4773 
4774 #if 0
4775 /*
4776  * necessary only for block ack mode
4777  */
4778 void
4779 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4780     uint16_t len)
4781 {
4782 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4783 	uint16_t w_val;
4784 
4785 	scd_bc_tbl = sc->sched_dma.vaddr;
4786 
4787 	len += 8; /* magic numbers came naturally from paris */
4788 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4789 		len = roundup(len, 4) / 4;
4790 
4791 	w_val = htole16(sta_id << 12 | len);
4792 
4793 	/* Update TX scheduler. */
4794 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4795 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4796 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4797 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4798 
4799 	/* I really wonder what this is ?!? */
4800 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4801 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4802 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4803 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4804 		    (char *)(void *)sc->sched_dma.vaddr,
4805 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4806 	}
4807 }
4808 #endif
4809 
4810 /*
4811  * Fill in various bit for management frames, and leave them
4812  * unfilled for data frames (firmware takes care of that).
4813  * Return the selected TX rate.
4814  */
4815 const struct iwm_rate *
4816 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4817     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4818 {
4819 	struct ieee80211com *ic = &sc->sc_ic;
4820 	struct ieee80211_node *ni = &in->in_ni;
4821 	const struct iwm_rate *rinfo;
4822 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4823 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
4824 	int ridx, rate_flags;
4825 
4826 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4827 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
4828 
4829 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4830 	    type != IEEE80211_FC0_TYPE_DATA) {
4831 		/* for non-data, use the lowest supported rate */
4832 		ridx = min_ridx;
4833 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4834 	} else if (ic->ic_fixed_mcs != -1) {
4835 		ridx = sc->sc_fixed_ridx;
4836 	} else if (ic->ic_fixed_rate != -1) {
4837 		ridx = sc->sc_fixed_ridx;
4838 	} else if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4839 	    ieee80211_mira_is_probing(&in->in_mn)) {
4840 		/* Keep Tx rate constant while mira is probing. */
4841 		ridx = iwm_mcs2ridx[ni->ni_txmcs];
4842  	} else {
4843 		int i;
4844 		/* Use firmware rateset retry table. */
4845 		tx->initial_rate_index = 0;
4846 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4847 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4848 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
4849 			return &iwm_rates[ridx];
4850 		}
4851 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4852 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4853 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
4854 			if (iwm_rates[i].rate == (ni->ni_txrate &
4855 			    IEEE80211_RATE_VAL)) {
4856 				ridx = i;
4857 				break;
4858 			}
4859 		}
4860 		return &iwm_rates[ridx];
4861 	}
4862 
4863 	rinfo = &iwm_rates[ridx];
4864 	if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
4865 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
4866 	else
4867 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
4868 	if (IWM_RIDX_IS_CCK(ridx))
4869 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4870 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4871 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4872 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4873 		if (ieee80211_node_supports_ht_sgi20(ni))
4874 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
4875 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4876 	} else
4877 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4878 
4879 	return rinfo;
4880 }
4881 
4882 #define TB0_SIZE 16
4883 int
4884 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4885 {
4886 	struct ieee80211com *ic = &sc->sc_ic;
4887 	struct iwm_node *in = (void *)ni;
4888 	struct iwm_tx_ring *ring;
4889 	struct iwm_tx_data *data;
4890 	struct iwm_tfd *desc;
4891 	struct iwm_device_cmd *cmd;
4892 	struct iwm_tx_cmd *tx;
4893 	struct ieee80211_frame *wh;
4894 	struct ieee80211_key *k = NULL;
4895 	const struct iwm_rate *rinfo;
4896 	uint8_t *ivp;
4897 	uint32_t flags;
4898 	u_int hdrlen;
4899 	bus_dma_segment_t *seg;
4900 	uint8_t tid, type;
4901 	int i, totlen, err, pad;
4902 	int hdrlen2, rtsthres = ic->ic_rtsthreshold;
4903 
4904 	wh = mtod(m, struct ieee80211_frame *);
4905 	hdrlen = ieee80211_get_hdrlen(wh);
4906 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4907 
4908 	hdrlen2 = (ieee80211_has_qos(wh)) ?
4909 	    sizeof (struct ieee80211_qosframe) :
4910 	    sizeof (struct ieee80211_frame);
4911 
4912 	tid = 0;
4913 
4914 	/*
4915 	 * Map EDCA categories to Tx data queues.
4916 	 *
4917 	 * We use static data queue assignments even in DQA mode. We do not
4918 	 * need to share Tx queues between stations because we only implement
4919 	 * client mode; the firmware's station table contains only one entry
4920 	 * which represents our access point.
4921 	 *
4922 	 * Tx aggregation will require additional queues (one queue per TID
4923 	 * for which aggregation is enabled) but we do not implement this yet.
4924 	 */
4925 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4926 		ring = &sc->txq[IWM_DQA_MIN_MGMT_QUEUE + ac];
4927 	else
4928 		ring = &sc->txq[ac];
4929 	desc = &ring->desc[ring->cur];
4930 	memset(desc, 0, sizeof(*desc));
4931 	data = &ring->data[ring->cur];
4932 
4933 	cmd = &ring->cmd[ring->cur];
4934 	cmd->hdr.code = IWM_TX_CMD;
4935 	cmd->hdr.flags = 0;
4936 	cmd->hdr.qid = ring->qid;
4937 	cmd->hdr.idx = ring->cur;
4938 
4939 	tx = (void *)cmd->data;
4940 	memset(tx, 0, sizeof(*tx));
4941 
4942 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4943 
4944 #if NBPFILTER > 0
4945 	if (sc->sc_drvbpf != NULL) {
4946 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4947 		uint16_t chan_flags;
4948 
4949 		tap->wt_flags = 0;
4950 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4951 		chan_flags = ni->ni_chan->ic_flags;
4952 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4953 			chan_flags &= ~IEEE80211_CHAN_HT;
4954 		tap->wt_chan_flags = htole16(chan_flags);
4955 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4956 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4957 		    type == IEEE80211_FC0_TYPE_DATA &&
4958 		    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4959 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4960 		} else
4961 			tap->wt_rate = rinfo->rate;
4962 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4963 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4964 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4965 
4966 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
4967 		    m, BPF_DIRECTION_OUT);
4968 	}
4969 #endif
4970 	totlen = m->m_pkthdr.len;
4971 
4972 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4973 		k = ieee80211_get_txkey(ic, wh, ni);
4974 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
4975 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
4976 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
4977 				return ENOBUFS;
4978 			/* 802.11 header may have moved. */
4979 			wh = mtod(m, struct ieee80211_frame *);
4980 			totlen = m->m_pkthdr.len;
4981 			k = NULL; /* skip hardware crypto below */
4982 		} else {
4983 			/* HW appends CCMP MIC */
4984 			totlen += IEEE80211_CCMP_HDRLEN;
4985 		}
4986 	}
4987 
4988 	flags = 0;
4989 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4990 		flags |= IWM_TX_CMD_FLG_ACK;
4991 	}
4992 
4993 	if (ni->ni_flags & IEEE80211_NODE_HT)
4994 		rtsthres = ieee80211_mira_get_rts_threshold(&in->in_mn, ic, ni,
4995 		    totlen + IEEE80211_CRC_LEN);
4996 
4997 	if (type == IEEE80211_FC0_TYPE_DATA &&
4998 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4999 	    (totlen + IEEE80211_CRC_LEN > rtsthres ||
5000 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
5001 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
5002 
5003 	tx->sta_id = IWM_STATION_ID;
5004 
5005 	if (type == IEEE80211_FC0_TYPE_MGT) {
5006 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5007 
5008 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
5009 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
5010 			tx->pm_frame_timeout = htole16(3);
5011 		else
5012 			tx->pm_frame_timeout = htole16(2);
5013 	} else {
5014 		tx->pm_frame_timeout = htole16(0);
5015 	}
5016 
5017 	if (hdrlen & 3) {
5018 		/* First segment length must be a multiple of 4. */
5019 		flags |= IWM_TX_CMD_FLG_MH_PAD;
5020 		pad = 4 - (hdrlen & 3);
5021 	} else
5022 		pad = 0;
5023 
5024 	tx->driver_txop = 0;
5025 	tx->next_frame_len = 0;
5026 
5027 	tx->len = htole16(totlen);
5028 	tx->tid_tspec = tid;
5029 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
5030 
5031 	/* Set physical address of "scratch area". */
5032 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
5033 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
5034 
5035 	/* Copy 802.11 header in TX command. */
5036 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
5037 
5038 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
5039 		/* Trim 802.11 header and prepend CCMP IV. */
5040 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
5041 		ivp = mtod(m, u_int8_t *);
5042 		k->k_tsc++;	/* increment the 48-bit PN */
5043 		ivp[0] = k->k_tsc; /* PN0 */
5044 		ivp[1] = k->k_tsc >> 8; /* PN1 */
5045 		ivp[2] = 0;        /* Rsvd */
5046 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
5047 		ivp[4] = k->k_tsc >> 16; /* PN2 */
5048 		ivp[5] = k->k_tsc >> 24; /* PN3 */
5049 		ivp[6] = k->k_tsc >> 32; /* PN4 */
5050 		ivp[7] = k->k_tsc >> 40; /* PN5 */
5051 
5052 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
5053 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
5054 	} else {
5055 		/* Trim 802.11 header. */
5056 		m_adj(m, hdrlen);
5057 		tx->sec_ctl = 0;
5058 	}
5059 
5060 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
5061 
5062 	tx->tx_flags |= htole32(flags);
5063 
5064 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5065 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5066 	if (err && err != EFBIG) {
5067 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5068 		m_freem(m);
5069 		return err;
5070 	}
5071 	if (err) {
5072 		/* Too many DMA segments, linearize mbuf. */
5073 		if (m_defrag(m, M_DONTWAIT)) {
5074 			m_freem(m);
5075 			return ENOBUFS;
5076 		}
5077 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5078 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5079 		if (err) {
5080 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
5081 			    err);
5082 			m_freem(m);
5083 			return err;
5084 		}
5085 	}
5086 	data->m = m;
5087 	data->in = in;
5088 	data->txmcs = ni->ni_txmcs;
5089 	data->txrate = ni->ni_txrate;
5090 
5091 	/* Fill TX descriptor. */
5092 	desc->num_tbs = 2 + data->map->dm_nsegs;
5093 
5094 	desc->tbs[0].lo = htole32(data->cmd_paddr);
5095 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
5096 	    (TB0_SIZE << 4));
5097 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
5098 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
5099 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
5100 	      + hdrlen + pad - TB0_SIZE) << 4));
5101 
5102 	/* Other DMA segments are for data payload. */
5103 	seg = data->map->dm_segs;
5104 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
5105 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
5106 		desc->tbs[i+2].hi_n_len = \
5107 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
5108 		    | ((seg->ds_len) << 4));
5109 	}
5110 
5111 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
5112 	    BUS_DMASYNC_PREWRITE);
5113 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5114 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5115 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
5116 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5117 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5118 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5119 
5120 #if 0
5121 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
5122 #endif
5123 
5124 	/* Kick TX ring. */
5125 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
5126 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5127 
5128 	/* Mark TX ring as full if we reach a certain threshold. */
5129 	if (++ring->queued > IWM_TX_RING_HIMARK) {
5130 		sc->qfullmsk |= 1 << ring->qid;
5131 	}
5132 
5133 	return 0;
5134 }
5135 
5136 int
5137 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
5138 {
5139 	struct iwm_tx_path_flush_cmd flush_cmd = {
5140 		.queues_ctl = htole32(tfd_queue_msk),
5141 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
5142 	};
5143 	int err;
5144 
5145 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
5146 	    sizeof(flush_cmd), &flush_cmd);
5147 	if (err)
5148                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
5149 	return err;
5150 }
5151 
5152 void
5153 iwm_led_enable(struct iwm_softc *sc)
5154 {
5155 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
5156 }
5157 
5158 void
5159 iwm_led_disable(struct iwm_softc *sc)
5160 {
5161 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
5162 }
5163 
5164 int
5165 iwm_led_is_enabled(struct iwm_softc *sc)
5166 {
5167 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
5168 }
5169 
5170 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
5171 
5172 void
5173 iwm_led_blink_timeout(void *arg)
5174 {
5175 	struct iwm_softc *sc = arg;
5176 
5177 	if (iwm_led_is_enabled(sc))
5178 		iwm_led_disable(sc);
5179 	else
5180 		iwm_led_enable(sc);
5181 
5182 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
5183 }
5184 
5185 void
5186 iwm_led_blink_start(struct iwm_softc *sc)
5187 {
5188 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
5189 	iwm_led_enable(sc);
5190 }
5191 
5192 void
5193 iwm_led_blink_stop(struct iwm_softc *sc)
5194 {
5195 	timeout_del(&sc->sc_led_blink_to);
5196 	iwm_led_disable(sc);
5197 }
5198 
5199 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
5200 
5201 int
5202 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
5203     struct iwm_beacon_filter_cmd *cmd)
5204 {
5205 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
5206 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
5207 }
5208 
5209 void
5210 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
5211     struct iwm_beacon_filter_cmd *cmd)
5212 {
5213 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
5214 }
5215 
5216 int
5217 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
5218 {
5219 	struct iwm_beacon_filter_cmd cmd = {
5220 		IWM_BF_CMD_CONFIG_DEFAULTS,
5221 		.bf_enable_beacon_filter = htole32(1),
5222 		.ba_enable_beacon_abort = htole32(enable),
5223 	};
5224 
5225 	if (!sc->sc_bf.bf_enabled)
5226 		return 0;
5227 
5228 	sc->sc_bf.ba_enabled = enable;
5229 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5230 	return iwm_beacon_filter_send_cmd(sc, &cmd);
5231 }
5232 
5233 void
5234 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
5235     struct iwm_mac_power_cmd *cmd)
5236 {
5237 	struct ieee80211com *ic = &sc->sc_ic;
5238 	struct ieee80211_node *ni = &in->in_ni;
5239 	int dtim_period, dtim_msec, keep_alive;
5240 
5241 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5242 	    in->in_color));
5243 	if (ni->ni_dtimperiod)
5244 		dtim_period = ni->ni_dtimperiod;
5245 	else
5246 		dtim_period = 1;
5247 
5248 	/*
5249 	 * Regardless of power management state the driver must set
5250 	 * keep alive period. FW will use it for sending keep alive NDPs
5251 	 * immediately after association. Check that keep alive period
5252 	 * is at least 3 * DTIM.
5253 	 */
5254 	dtim_msec = dtim_period * ni->ni_intval;
5255 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
5256 	keep_alive = roundup(keep_alive, 1000) / 1000;
5257 	cmd->keep_alive_seconds = htole16(keep_alive);
5258 
5259 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5260 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5261 }
5262 
5263 int
5264 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
5265 {
5266 	int err;
5267 	int ba_enable;
5268 	struct iwm_mac_power_cmd cmd;
5269 
5270 	memset(&cmd, 0, sizeof(cmd));
5271 
5272 	iwm_power_build_cmd(sc, in, &cmd);
5273 
5274 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
5275 	    sizeof(cmd), &cmd);
5276 	if (err != 0)
5277 		return err;
5278 
5279 	ba_enable = !!(cmd.flags &
5280 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5281 	return iwm_update_beacon_abort(sc, in, ba_enable);
5282 }
5283 
5284 int
5285 iwm_power_update_device(struct iwm_softc *sc)
5286 {
5287 	struct iwm_device_power_cmd cmd = { };
5288 	struct ieee80211com *ic = &sc->sc_ic;
5289 
5290 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5291 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5292 
5293 	return iwm_send_cmd_pdu(sc,
5294 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
5295 }
5296 
5297 int
5298 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
5299 {
5300 	struct iwm_beacon_filter_cmd cmd = {
5301 		IWM_BF_CMD_CONFIG_DEFAULTS,
5302 		.bf_enable_beacon_filter = htole32(1),
5303 	};
5304 	int err;
5305 
5306 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5307 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5308 
5309 	if (err == 0)
5310 		sc->sc_bf.bf_enabled = 1;
5311 
5312 	return err;
5313 }
5314 
5315 int
5316 iwm_disable_beacon_filter(struct iwm_softc *sc)
5317 {
5318 	struct iwm_beacon_filter_cmd cmd;
5319 	int err;
5320 
5321 	memset(&cmd, 0, sizeof(cmd));
5322 
5323 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5324 	if (err == 0)
5325 		sc->sc_bf.bf_enabled = 0;
5326 
5327 	return err;
5328 }
5329 
5330 int
5331 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5332 {
5333 	struct iwm_add_sta_cmd add_sta_cmd;
5334 	int err;
5335 	uint32_t status;
5336 	size_t cmdsize;
5337 	struct ieee80211com *ic = &sc->sc_ic;
5338 
5339 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
5340 		panic("STA already added");
5341 
5342 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5343 
5344 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5345 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
5346 	else
5347 		add_sta_cmd.sta_id = IWM_STATION_ID;
5348 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
5349 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
5350 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
5351 		else
5352 			add_sta_cmd.station_type = IWM_STA_LINK;
5353 	}
5354 	add_sta_cmd.mac_id_n_color
5355 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5356 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5357 		int qid;
5358 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
5359 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
5360 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
5361 		else
5362 			qid = IWM_AUX_QUEUE;
5363 		add_sta_cmd.tfd_queue_msk |= htole32(1 << qid);
5364 	} else if (!update) {
5365 		int ac;
5366 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
5367 			int qid = ac;
5368 			if (isset(sc->sc_enabled_capa,
5369 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
5370 				qid += IWM_DQA_MIN_MGMT_QUEUE;
5371 			add_sta_cmd.tfd_queue_msk |= htole32(1 << qid);
5372 		}
5373 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5374 	}
5375 	add_sta_cmd.add_modify = update ? 1 : 0;
5376 	add_sta_cmd.station_flags_msk
5377 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5378 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
5379 	if (update)
5380 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5381 
5382 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5383 		add_sta_cmd.station_flags_msk
5384 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5385 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5386 
5387 		if (iwm_mimo_enabled(sc)) {
5388 			if (in->in_ni.ni_rxmcs[1] != 0) {
5389 				add_sta_cmd.station_flags |=
5390 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
5391 			}
5392 			if (in->in_ni.ni_rxmcs[2] != 0) {
5393 				add_sta_cmd.station_flags |=
5394 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
5395 			}
5396 		}
5397 
5398 		add_sta_cmd.station_flags
5399 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5400 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5401 		case IEEE80211_AMPDU_PARAM_SS_2:
5402 			add_sta_cmd.station_flags
5403 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5404 			break;
5405 		case IEEE80211_AMPDU_PARAM_SS_4:
5406 			add_sta_cmd.station_flags
5407 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5408 			break;
5409 		case IEEE80211_AMPDU_PARAM_SS_8:
5410 			add_sta_cmd.station_flags
5411 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5412 			break;
5413 		case IEEE80211_AMPDU_PARAM_SS_16:
5414 			add_sta_cmd.station_flags
5415 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5416 			break;
5417 		default:
5418 			break;
5419 		}
5420 	}
5421 
5422 	status = IWM_ADD_STA_SUCCESS;
5423 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
5424 		cmdsize = sizeof(add_sta_cmd);
5425 	else
5426 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
5427 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
5428 	    &add_sta_cmd, &status);
5429 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
5430 		err = EIO;
5431 
5432 	return err;
5433 }
5434 
5435 int
5436 iwm_add_aux_sta(struct iwm_softc *sc)
5437 {
5438 	struct iwm_add_sta_cmd cmd;
5439 	int err, qid;
5440 	uint32_t status;
5441 	size_t cmdsize;
5442 
5443 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
5444 		qid = IWM_DQA_AUX_QUEUE;
5445 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
5446 		    IWM_TX_FIFO_MCAST);
5447 	} else {
5448 		qid = IWM_AUX_QUEUE;
5449 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
5450 	}
5451 	if (err)
5452 		return err;
5453 
5454 	memset(&cmd, 0, sizeof(cmd));
5455 	cmd.sta_id = IWM_AUX_STA_ID;
5456 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
5457 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
5458 	cmd.mac_id_n_color =
5459 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5460 	cmd.tfd_queue_msk = htole32(1 << qid);
5461 	cmd.tid_disable_tx = htole16(0xffff);
5462 
5463 	status = IWM_ADD_STA_SUCCESS;
5464 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
5465 		cmdsize = sizeof(cmd);
5466 	else
5467 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
5468 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
5469 	    &status);
5470 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
5471 		err = EIO;
5472 
5473 	return err;
5474 }
5475 
5476 int
5477 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
5478 {
5479 	struct ieee80211com *ic = &sc->sc_ic;
5480 	struct iwm_rm_sta_cmd rm_sta_cmd;
5481 	int err;
5482 
5483 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
5484 		panic("sta already removed");
5485 
5486 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
5487 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5488 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
5489 	else
5490 		rm_sta_cmd.sta_id = IWM_STATION_ID;
5491 
5492 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
5493 	    &rm_sta_cmd);
5494 
5495 	return err;
5496 }
5497 
5498 uint16_t
5499 iwm_scan_rx_chain(struct iwm_softc *sc)
5500 {
5501 	uint16_t rx_chain;
5502 	uint8_t rx_ant;
5503 
5504 	rx_ant = iwm_fw_valid_rx_ant(sc);
5505 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5506 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5507 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5508 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5509 	return htole16(rx_chain);
5510 }
5511 
5512 uint32_t
5513 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5514 {
5515 	uint32_t tx_ant;
5516 	int i, ind;
5517 
5518 	for (i = 0, ind = sc->sc_scan_last_antenna;
5519 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
5520 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5521 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5522 			sc->sc_scan_last_antenna = ind;
5523 			break;
5524 		}
5525 	}
5526 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5527 
5528 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5529 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5530 				   tx_ant);
5531 	else
5532 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
5533 }
5534 
5535 uint8_t
5536 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5537     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
5538 {
5539 	struct ieee80211com *ic = &sc->sc_ic;
5540 	struct ieee80211_channel *c;
5541 	uint8_t nchan;
5542 
5543 	for (nchan = 0, c = &ic->ic_channels[1];
5544 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5545 	    nchan < sc->sc_capa_n_scan_channels;
5546 	    c++) {
5547 		if (c->ic_flags == 0)
5548 			continue;
5549 
5550 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5551 		chan->iter_count = htole16(1);
5552 		chan->iter_interval = 0;
5553 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5554 		if (n_ssids != 0 && !bgscan)
5555 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
5556 		chan++;
5557 		nchan++;
5558 	}
5559 
5560 	return nchan;
5561 }
5562 
5563 uint8_t
5564 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5565     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
5566 {
5567 	struct ieee80211com *ic = &sc->sc_ic;
5568 	struct ieee80211_channel *c;
5569 	uint8_t nchan;
5570 
5571 	for (nchan = 0, c = &ic->ic_channels[1];
5572 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5573 	    nchan < sc->sc_capa_n_scan_channels;
5574 	    c++) {
5575 		if (c->ic_flags == 0)
5576 			continue;
5577 
5578 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5579 		chan->iter_count = 1;
5580 		chan->iter_interval = htole16(0);
5581 		if (n_ssids != 0 && !bgscan)
5582 			chan->flags = htole32(1 << 0); /* select SSID 0 */
5583 		chan++;
5584 		nchan++;
5585 	}
5586 
5587 	return nchan;
5588 }
5589 
5590 int
5591 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
5592 {
5593 	struct iwm_scan_probe_req preq2;
5594 	int err, i;
5595 
5596 	err = iwm_fill_probe_req(sc, &preq2);
5597 	if (err)
5598 		return err;
5599 
5600 	preq1->mac_header = preq2.mac_header;
5601 	for (i = 0; i < nitems(preq1->band_data); i++)
5602 		preq1->band_data[i] = preq2.band_data[i];
5603 	preq1->common_data = preq2.common_data;
5604 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
5605 	return 0;
5606 }
5607 
5608 int
5609 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5610 {
5611 	struct ieee80211com *ic = &sc->sc_ic;
5612 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5613 	struct ieee80211_rateset *rs;
5614 	size_t remain = sizeof(preq->buf);
5615 	uint8_t *frm, *pos;
5616 
5617 	memset(preq, 0, sizeof(*preq));
5618 
5619 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5620 		return ENOBUFS;
5621 
5622 	/*
5623 	 * Build a probe request frame.  Most of the following code is a
5624 	 * copy & paste of what is done in net80211.
5625 	 */
5626 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5627 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5628 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5629 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5630 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5631 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5632 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5633 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5634 
5635 	frm = (uint8_t *)(wh + 1);
5636 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5637 
5638 	/* Tell the firmware where the MAC header is. */
5639 	preq->mac_header.offset = 0;
5640 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5641 	remain -= frm - (uint8_t *)wh;
5642 
5643 	/* Fill in 2GHz IEs and tell firmware where they are. */
5644 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5645 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5646 		if (remain < 4 + rs->rs_nrates)
5647 			return ENOBUFS;
5648 	} else if (remain < 2 + rs->rs_nrates)
5649 		return ENOBUFS;
5650 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5651 	pos = frm;
5652 	frm = ieee80211_add_rates(frm, rs);
5653 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5654 		frm = ieee80211_add_xrates(frm, rs);
5655 	preq->band_data[0].len = htole16(frm - pos);
5656 	remain -= frm - pos;
5657 
5658 	if (isset(sc->sc_enabled_capa,
5659 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5660 		if (remain < 3)
5661 			return ENOBUFS;
5662 		*frm++ = IEEE80211_ELEMID_DSPARMS;
5663 		*frm++ = 1;
5664 		*frm++ = 0;
5665 		remain -= 3;
5666 	}
5667 
5668 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5669 		/* Fill in 5GHz IEs. */
5670 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5671 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5672 			if (remain < 4 + rs->rs_nrates)
5673 				return ENOBUFS;
5674 		} else if (remain < 2 + rs->rs_nrates)
5675 			return ENOBUFS;
5676 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5677 		pos = frm;
5678 		frm = ieee80211_add_rates(frm, rs);
5679 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5680 			frm = ieee80211_add_xrates(frm, rs);
5681 		preq->band_data[1].len = htole16(frm - pos);
5682 		remain -= frm - pos;
5683 	}
5684 
5685 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
5686 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5687 	pos = frm;
5688 	if (ic->ic_flags & IEEE80211_F_HTON) {
5689 		if (remain < 28)
5690 			return ENOBUFS;
5691 		frm = ieee80211_add_htcaps(frm, ic);
5692 		/* XXX add WME info? */
5693 	}
5694 	preq->common_data.len = htole16(frm - pos);
5695 
5696 	return 0;
5697 }
5698 
5699 int
5700 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
5701 {
5702 	struct ieee80211com *ic = &sc->sc_ic;
5703 	struct iwm_host_cmd hcmd = {
5704 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5705 		.len = { 0, },
5706 		.data = { NULL, },
5707 		.flags = 0,
5708 	};
5709 	struct iwm_scan_req_lmac *req;
5710 	struct iwm_scan_probe_req_v1 *preq;
5711 	size_t req_len;
5712 	int err, async = bgscan;
5713 
5714 	req_len = sizeof(struct iwm_scan_req_lmac) +
5715 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5716 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
5717 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5718 		return ENOMEM;
5719 	req = malloc(req_len, M_DEVBUF,
5720 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
5721 	if (req == NULL)
5722 		return ENOMEM;
5723 
5724 	hcmd.len[0] = (uint16_t)req_len;
5725 	hcmd.data[0] = (void *)req;
5726 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
5727 
5728 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5729 	req->active_dwell = 10;
5730 	req->passive_dwell = 110;
5731 	req->fragmented_dwell = 44;
5732 	req->extended_dwell = 90;
5733 	if (bgscan) {
5734 		req->max_out_time = htole32(120);
5735 		req->suspend_time = htole32(120);
5736 	} else {
5737 		req->max_out_time = htole32(0);
5738 		req->suspend_time = htole32(0);
5739 	}
5740 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5741 	req->rx_chain_select = iwm_scan_rx_chain(sc);
5742 	req->iter_num = htole32(1);
5743 	req->delay = 0;
5744 
5745 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5746 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5747 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5748 	if (ic->ic_des_esslen == 0)
5749 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5750 	else
5751 		req->scan_flags |=
5752 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5753 	if (isset(sc->sc_enabled_capa,
5754 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5755 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5756 
5757 	req->flags = htole32(IWM_PHY_BAND_24);
5758 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5759 		req->flags |= htole32(IWM_PHY_BAND_5);
5760 	req->filter_flags =
5761 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5762 
5763 	/* Tx flags 2 GHz. */
5764 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5765 	    IWM_TX_CMD_FLG_BT_DIS);
5766 	req->tx_cmd[0].rate_n_flags =
5767 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5768 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5769 
5770 	/* Tx flags 5 GHz. */
5771 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5772 	    IWM_TX_CMD_FLG_BT_DIS);
5773 	req->tx_cmd[1].rate_n_flags =
5774 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5775 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5776 
5777 	/* Check if we're doing an active directed scan. */
5778 	if (ic->ic_des_esslen != 0) {
5779 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5780 		req->direct_scan[0].len = ic->ic_des_esslen;
5781 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5782 		    ic->ic_des_esslen);
5783 	}
5784 
5785 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5786 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5787 	    ic->ic_des_esslen != 0, bgscan);
5788 
5789 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
5790 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5791 	    sc->sc_capa_n_scan_channels));
5792 	err = iwm_fill_probe_req_v1(sc, preq);
5793 	if (err) {
5794 		free(req, M_DEVBUF, req_len);
5795 		return err;
5796 	}
5797 
5798 	/* Specify the scan plan: We'll do one iteration. */
5799 	req->schedule[0].iterations = 1;
5800 	req->schedule[0].full_scan_mul = 1;
5801 
5802 	/* Disable EBS. */
5803 	req->channel_opt[0].non_ebs_ratio = 1;
5804 	req->channel_opt[1].non_ebs_ratio = 1;
5805 
5806 	err = iwm_send_cmd(sc, &hcmd);
5807 	free(req, M_DEVBUF, req_len);
5808 	return err;
5809 }
5810 
5811 int
5812 iwm_config_umac_scan(struct iwm_softc *sc)
5813 {
5814 	struct ieee80211com *ic = &sc->sc_ic;
5815 	struct iwm_scan_config *scan_config;
5816 	int err, nchan;
5817 	size_t cmd_size;
5818 	struct ieee80211_channel *c;
5819 	struct iwm_host_cmd hcmd = {
5820 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
5821 		.flags = 0,
5822 	};
5823 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5824 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5825 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5826 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5827 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5828 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5829 	    IWM_SCAN_CONFIG_RATE_54M);
5830 
5831 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5832 
5833 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
5834 	if (scan_config == NULL)
5835 		return ENOMEM;
5836 
5837 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5838 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5839 	scan_config->legacy_rates = htole32(rates |
5840 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5841 
5842 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5843 	scan_config->dwell_active = 10;
5844 	scan_config->dwell_passive = 110;
5845 	scan_config->dwell_fragmented = 44;
5846 	scan_config->dwell_extended = 90;
5847 	scan_config->out_of_channel_time = htole32(0);
5848 	scan_config->suspend_time = htole32(0);
5849 
5850 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5851 
5852 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5853 	scan_config->channel_flags = 0;
5854 
5855 	for (c = &ic->ic_channels[1], nchan = 0;
5856 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5857 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5858 		if (c->ic_flags == 0)
5859 			continue;
5860 		scan_config->channel_array[nchan++] =
5861 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5862 	}
5863 
5864 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5865 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5866 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5867 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5868 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5869 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5870 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5871 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5872 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5873 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5874 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5875 
5876 	hcmd.data[0] = scan_config;
5877 	hcmd.len[0] = cmd_size;
5878 
5879 	err = iwm_send_cmd(sc, &hcmd);
5880 	free(scan_config, M_DEVBUF, cmd_size);
5881 	return err;
5882 }
5883 
5884 int
5885 iwm_umac_scan_size(struct iwm_softc *sc)
5886 {
5887 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
5888 	int tail_size;
5889 
5890 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5891 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
5892 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
5893 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
5894 #ifdef notyet
5895 	else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
5896 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V6;
5897 #endif
5898 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
5899 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
5900 	else
5901 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
5902 
5903 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
5904 	    sc->sc_capa_n_scan_channels + tail_size;
5905 }
5906 
5907 struct iwm_scan_umac_chan_param *
5908 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
5909     struct iwm_scan_req_umac *req)
5910 {
5911 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5912 		return &req->v8.channel;
5913 
5914 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
5915 		return &req->v7.channel;
5916 #ifdef notyet
5917 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
5918 		return &req->v6.channel;
5919 #endif
5920 	return &req->v1.channel;
5921 }
5922 
5923 void *
5924 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
5925 {
5926 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5927 		return (void *)&req->v8.data;
5928 
5929 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
5930 		return (void *)&req->v7.data;
5931 #ifdef notyet
5932 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
5933 		return (void *)&req->v6.data;
5934 #endif
5935 	return (void *)&req->v1.data;
5936 
5937 }
5938 
5939 /* adaptive dwell max budget time [TU] for full scan */
5940 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
5941 /* adaptive dwell max budget time [TU] for directed scan */
5942 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
5943 /* adaptive dwell default high band APs number */
5944 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
5945 /* adaptive dwell default low band APs number */
5946 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
5947 /* adaptive dwell default APs number in social channels (1, 6, 11) */
5948 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
5949 
5950 int
5951 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
5952 {
5953 	struct ieee80211com *ic = &sc->sc_ic;
5954 	struct iwm_host_cmd hcmd = {
5955 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
5956 		.len = { 0, },
5957 		.data = { NULL, },
5958 		.flags = 0,
5959 	};
5960 	struct iwm_scan_req_umac *req;
5961 	void *cmd_data, *tail_data;
5962 	struct iwm_scan_req_umac_tail_v2 *tail;
5963 	struct iwm_scan_req_umac_tail_v1 *tailv1;
5964 	struct iwm_scan_umac_chan_param *chanparam;
5965 	size_t req_len;
5966 	int err, async = bgscan;
5967 
5968 	req_len = iwm_umac_scan_size(sc);
5969 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
5970 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
5971 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5972 		return ERANGE;
5973 	req = malloc(req_len, M_DEVBUF,
5974 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
5975 	if (req == NULL)
5976 		return ENOMEM;
5977 
5978 	hcmd.len[0] = (uint16_t)req_len;
5979 	hcmd.data[0] = (void *)req;
5980 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
5981 
5982 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
5983 		req->v7.adwell_default_n_aps_social =
5984 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
5985 		req->v7.adwell_default_n_aps =
5986 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
5987 
5988 		if (ic->ic_des_esslen != 0)
5989 			req->v7.adwell_max_budget =
5990 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
5991 		else
5992 			req->v7.adwell_max_budget =
5993 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
5994 
5995 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5996 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
5997 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
5998 
5999 		if (isset(sc->sc_ucode_api,
6000 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
6001 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
6002 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
6003 		} else {
6004 			req->v7.active_dwell = 10;
6005 			req->v7.passive_dwell = 110;
6006 			req->v7.fragmented_dwell = 44;
6007 		}
6008 	} else {
6009 		/* These timings correspond to iwlwifi's UNASSOC scan. */
6010 		req->v1.active_dwell = 10;
6011 		req->v1.passive_dwell = 110;
6012 		req->v1.fragmented_dwell = 44;
6013 		req->v1.extended_dwell = 90;
6014 	}
6015 
6016 	if (bgscan) {
6017 		const uint32_t timeout = htole32(120);
6018 		if (isset(sc->sc_ucode_api,
6019 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
6020 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6021 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6022 		} else if (isset(sc->sc_ucode_api,
6023 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
6024 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6025 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6026 		} else {
6027 			req->v1.max_out_time = timeout;
6028 			req->v1.suspend_time = timeout;
6029 		}
6030 	}
6031 
6032 	req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
6033 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
6034 
6035 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
6036 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
6037 	chanparam->count = iwm_umac_scan_fill_channels(sc,
6038 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
6039 	    ic->ic_des_esslen != 0, bgscan);
6040 	chanparam->flags = 0;
6041 
6042 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
6043 	    sc->sc_capa_n_scan_channels;
6044 	tail = tail_data;
6045 	/* tail v1 layout differs in preq and direct_scan member fields. */
6046 	tailv1 = tail_data;
6047 
6048 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
6049 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
6050 
6051 	/* Check if we're doing an active directed scan. */
6052 	if (ic->ic_des_esslen != 0) {
6053 		if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6054 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
6055 			tail->direct_scan[0].len = ic->ic_des_esslen;
6056 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
6057 			    ic->ic_des_esslen);
6058 		} else {
6059 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
6060 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
6061 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
6062 			    ic->ic_des_esslen);
6063 		}
6064 		req->general_flags |=
6065 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
6066 	} else
6067 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
6068 
6069 	if (isset(sc->sc_enabled_capa,
6070 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
6071 		req->general_flags |=
6072 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
6073 
6074 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
6075 		req->general_flags |=
6076 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
6077 	} else {
6078 		req->general_flags |=
6079 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
6080 	}
6081 
6082 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
6083 		err = iwm_fill_probe_req(sc, &tail->preq);
6084 	else
6085 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
6086 	if (err) {
6087 		free(req, M_DEVBUF, req_len);
6088 		return err;
6089 	}
6090 
6091 	/* Specify the scan plan: We'll do one iteration. */
6092 	tail->schedule[0].interval = 0;
6093 	tail->schedule[0].iter_count = 1;
6094 
6095 	err = iwm_send_cmd(sc, &hcmd);
6096 	free(req, M_DEVBUF, req_len);
6097 	return err;
6098 }
6099 
6100 uint8_t
6101 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6102 {
6103 	int i;
6104 	uint8_t rval;
6105 
6106 	for (i = 0; i < rs->rs_nrates; i++) {
6107 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6108 		if (rval == iwm_rates[ridx].rate)
6109 			return rs->rs_rates[i];
6110 	}
6111 
6112 	return 0;
6113 }
6114 
6115 int
6116 iwm_rval2ridx(int rval)
6117 {
6118 	int ridx;
6119 
6120 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
6121 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
6122 			continue;
6123 		if (rval == iwm_rates[ridx].rate)
6124 			break;
6125 	}
6126 
6127        return ridx;
6128 }
6129 
6130 void
6131 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
6132     int *ofdm_rates)
6133 {
6134 	struct ieee80211_node *ni = &in->in_ni;
6135 	struct ieee80211_rateset *rs = &ni->ni_rates;
6136 	int lowest_present_ofdm = -1;
6137 	int lowest_present_cck = -1;
6138 	uint8_t cck = 0;
6139 	uint8_t ofdm = 0;
6140 	int i;
6141 
6142 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6143 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6144 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
6145 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6146 				continue;
6147 			cck |= (1 << i);
6148 			if (lowest_present_cck == -1 || lowest_present_cck > i)
6149 				lowest_present_cck = i;
6150 		}
6151 	}
6152 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
6153 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6154 			continue;
6155 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
6156 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6157 			lowest_present_ofdm = i;
6158 	}
6159 
6160 	/*
6161 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
6162 	 * variables. This isn't sufficient though, as there might not
6163 	 * be all the right rates in the bitmap. E.g. if the only basic
6164 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6165 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6166 	 *
6167 	 *    [...] a STA responding to a received frame shall transmit
6168 	 *    its Control Response frame [...] at the highest rate in the
6169 	 *    BSSBasicRateSet parameter that is less than or equal to the
6170 	 *    rate of the immediately previous frame in the frame exchange
6171 	 *    sequence ([...]) and that is of the same modulation class
6172 	 *    ([...]) as the received frame. If no rate contained in the
6173 	 *    BSSBasicRateSet parameter meets these conditions, then the
6174 	 *    control frame sent in response to a received frame shall be
6175 	 *    transmitted at the highest mandatory rate of the PHY that is
6176 	 *    less than or equal to the rate of the received frame, and
6177 	 *    that is of the same modulation class as the received frame.
6178 	 *
6179 	 * As a consequence, we need to add all mandatory rates that are
6180 	 * lower than all of the basic rates to these bitmaps.
6181 	 */
6182 
6183 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
6184 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
6185 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
6186 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
6187 	/* 6M already there or needed so always add */
6188 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
6189 
6190 	/*
6191 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6192 	 * Note, however:
6193 	 *  - if no CCK rates are basic, it must be ERP since there must
6194 	 *    be some basic rates at all, so they're OFDM => ERP PHY
6195 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
6196 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6197 	 *  - if 5.5M is basic, 1M and 2M are mandatory
6198 	 *  - if 2M is basic, 1M is mandatory
6199 	 *  - if 1M is basic, that's the only valid ACK rate.
6200 	 * As a consequence, it's not as complicated as it sounds, just add
6201 	 * any lower rates to the ACK rate bitmap.
6202 	 */
6203 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
6204 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
6205 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
6206 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
6207 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
6208 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
6209 	/* 1M already there or needed so always add */
6210 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
6211 
6212 	*cck_rates = cck;
6213 	*ofdm_rates = ofdm;
6214 }
6215 
6216 void
6217 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
6218     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
6219 {
6220 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6221 	struct ieee80211com *ic = &sc->sc_ic;
6222 	struct ieee80211_node *ni = ic->ic_bss;
6223 	int cck_ack_rates, ofdm_ack_rates;
6224 	int i;
6225 
6226 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
6227 	    in->in_color));
6228 	cmd->action = htole32(action);
6229 
6230 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6231 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
6232 	else if (ic->ic_opmode == IEEE80211_M_STA)
6233 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
6234 	else
6235 		panic("unsupported operating mode %d\n", ic->ic_opmode);
6236 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
6237 
6238 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
6239 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6240 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6241 		return;
6242 	}
6243 
6244 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
6245 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6246 	cmd->cck_rates = htole32(cck_ack_rates);
6247 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6248 
6249 	cmd->cck_short_preamble
6250 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6251 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
6252 	cmd->short_slot
6253 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6254 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
6255 
6256 	for (i = 0; i < EDCA_NUM_AC; i++) {
6257 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
6258 		int txf = iwm_ac_to_tx_fifo[i];
6259 
6260 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
6261 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
6262 		cmd->ac[txf].aifsn = ac->ac_aifsn;
6263 		cmd->ac[txf].fifos_mask = (1 << txf);
6264 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
6265 	}
6266 	if (ni->ni_flags & IEEE80211_NODE_QOS)
6267 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
6268 
6269 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6270 		enum ieee80211_htprot htprot =
6271 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
6272 		switch (htprot) {
6273 		case IEEE80211_HTPROT_NONE:
6274 			break;
6275 		case IEEE80211_HTPROT_NONMEMBER:
6276 		case IEEE80211_HTPROT_NONHT_MIXED:
6277 			cmd->protection_flags |=
6278 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
6279 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
6280 				cmd->protection_flags |=
6281 				    htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
6282 			break;
6283 		case IEEE80211_HTPROT_20MHZ:
6284 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
6285 				/* XXX ... and if our channel is 40 MHz ... */
6286 				cmd->protection_flags |=
6287 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
6288 				    IWM_MAC_PROT_FLG_FAT_PROT);
6289 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
6290 					cmd->protection_flags |= htole32(
6291 					    IWM_MAC_PROT_FLG_SELF_CTS_EN);
6292 			}
6293 			break;
6294 		default:
6295 			break;
6296 		}
6297 
6298 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
6299 	}
6300 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6301 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
6302 
6303 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
6304 #undef IWM_EXP2
6305 }
6306 
6307 void
6308 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
6309     struct iwm_mac_data_sta *sta, int assoc)
6310 {
6311 	struct ieee80211_node *ni = &in->in_ni;
6312 	uint32_t dtim_off;
6313 	uint64_t tsf;
6314 
6315 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
6316 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
6317 	tsf = letoh64(tsf);
6318 
6319 	sta->is_assoc = htole32(assoc);
6320 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
6321 	sta->dtim_tsf = htole64(tsf + dtim_off);
6322 	sta->bi = htole32(ni->ni_intval);
6323 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
6324 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
6325 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
6326 	sta->listen_interval = htole32(10);
6327 	sta->assoc_id = htole32(ni->ni_associd);
6328 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
6329 }
6330 
6331 int
6332 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
6333     int assoc)
6334 {
6335 	struct ieee80211com *ic = &sc->sc_ic;
6336 	struct ieee80211_node *ni = &in->in_ni;
6337 	struct iwm_mac_ctx_cmd cmd;
6338 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
6339 
6340 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
6341 		panic("MAC already added");
6342 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
6343 		panic("MAC already removed");
6344 
6345 	memset(&cmd, 0, sizeof(cmd));
6346 
6347 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
6348 
6349 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6350 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
6351 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
6352 		    IWM_MAC_FILTER_ACCEPT_GRP |
6353 		    IWM_MAC_FILTER_IN_BEACON |
6354 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
6355 		    IWM_MAC_FILTER_IN_CRC32);
6356 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
6357 		/*
6358 		 * Allow beacons to pass through as long as we are not
6359 		 * associated or we do not have dtim period information.
6360 		 */
6361 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
6362 	else
6363 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6364 
6365 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6366 }
6367 
6368 int
6369 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
6370 {
6371 	struct iwm_time_quota_cmd cmd;
6372 	int i, idx, num_active_macs, quota, quota_rem;
6373 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
6374 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
6375 	uint16_t id;
6376 
6377 	memset(&cmd, 0, sizeof(cmd));
6378 
6379 	/* currently, PHY ID == binding ID */
6380 	if (in && in->in_phyctxt) {
6381 		id = in->in_phyctxt->id;
6382 		KASSERT(id < IWM_MAX_BINDINGS);
6383 		colors[id] = in->in_phyctxt->color;
6384 		if (running)
6385 			n_ifs[id] = 1;
6386 	}
6387 
6388 	/*
6389 	 * The FW's scheduling session consists of
6390 	 * IWM_MAX_QUOTA fragments. Divide these fragments
6391 	 * equally between all the bindings that require quota
6392 	 */
6393 	num_active_macs = 0;
6394 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
6395 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
6396 		num_active_macs += n_ifs[i];
6397 	}
6398 
6399 	quota = 0;
6400 	quota_rem = 0;
6401 	if (num_active_macs) {
6402 		quota = IWM_MAX_QUOTA / num_active_macs;
6403 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
6404 	}
6405 
6406 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
6407 		if (colors[i] < 0)
6408 			continue;
6409 
6410 		cmd.quotas[idx].id_and_color =
6411 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
6412 
6413 		if (n_ifs[i] <= 0) {
6414 			cmd.quotas[idx].quota = htole32(0);
6415 			cmd.quotas[idx].max_duration = htole32(0);
6416 		} else {
6417 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
6418 			cmd.quotas[idx].max_duration = htole32(0);
6419 		}
6420 		idx++;
6421 	}
6422 
6423 	/* Give the remainder of the session to the first binding */
6424 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
6425 
6426 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
6427 	    sizeof(cmd), &cmd);
6428 }
6429 
6430 void
6431 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
6432 {
6433 	int s = splnet();
6434 
6435 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
6436 		splx(s);
6437 		return;
6438 	}
6439 
6440 	refcnt_take(&sc->task_refs);
6441 	if (!task_add(taskq, task))
6442 		refcnt_rele_wake(&sc->task_refs);
6443 	splx(s);
6444 }
6445 
6446 void
6447 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
6448 {
6449 	if (task_del(taskq, task))
6450 		refcnt_rele(&sc->task_refs);
6451 }
6452 
6453 int
6454 iwm_scan(struct iwm_softc *sc)
6455 {
6456 	struct ieee80211com *ic = &sc->sc_ic;
6457 	struct ifnet *ifp = IC2IFP(ic);
6458 	int err;
6459 
6460 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
6461 		err = iwm_scan_abort(sc);
6462 		if (err) {
6463 			printf("%s: could not abort background scan\n",
6464 			    DEVNAME(sc));
6465 			return err;
6466 		}
6467 	}
6468 
6469 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6470 		err = iwm_umac_scan(sc, 0);
6471 	else
6472 		err = iwm_lmac_scan(sc, 0);
6473 	if (err) {
6474 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6475 		return err;
6476 	}
6477 
6478 	/*
6479 	 * The current mode might have been fixed during association.
6480 	 * Ensure all channels get scanned.
6481 	 */
6482 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
6483 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
6484 
6485 	sc->sc_flags |= IWM_FLAG_SCANNING;
6486 	if (ifp->if_flags & IFF_DEBUG)
6487 		printf("%s: %s -> %s\n", ifp->if_xname,
6488 		    ieee80211_state_name[ic->ic_state],
6489 		    ieee80211_state_name[IEEE80211_S_SCAN]);
6490 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
6491 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
6492 		ieee80211_node_cleanup(ic, ic->ic_bss);
6493 	}
6494 	ic->ic_state = IEEE80211_S_SCAN;
6495 	iwm_led_blink_start(sc);
6496 	wakeup(&ic->ic_state); /* wake iwm_init() */
6497 
6498 	return 0;
6499 }
6500 
6501 int
6502 iwm_bgscan(struct ieee80211com *ic)
6503 {
6504 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6505 	int err;
6506 
6507 	if (sc->sc_flags & IWM_FLAG_SCANNING)
6508 		return 0;
6509 
6510 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6511 		err = iwm_umac_scan(sc, 1);
6512 	else
6513 		err = iwm_lmac_scan(sc, 1);
6514 	if (err) {
6515 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6516 		return err;
6517 	}
6518 
6519 	sc->sc_flags |= IWM_FLAG_BGSCAN;
6520 	return 0;
6521 }
6522 
6523 int
6524 iwm_umac_scan_abort(struct iwm_softc *sc)
6525 {
6526 	struct iwm_umac_scan_abort cmd = { 0 };
6527 
6528 	return iwm_send_cmd_pdu(sc,
6529 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
6530 	    0, sizeof(cmd), &cmd);
6531 }
6532 
6533 int
6534 iwm_lmac_scan_abort(struct iwm_softc *sc)
6535 {
6536 	struct iwm_host_cmd cmd = {
6537 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
6538 	};
6539 	int err, status;
6540 
6541 	err = iwm_send_cmd_status(sc, &cmd, &status);
6542 	if (err)
6543 		return err;
6544 
6545 	if (status != IWM_CAN_ABORT_STATUS) {
6546 		/*
6547 		 * The scan abort will return 1 for success or
6548 		 * 2 for "failure".  A failure condition can be
6549 		 * due to simply not being in an active scan which
6550 		 * can occur if we send the scan abort before the
6551 		 * microcode has notified us that a scan is completed.
6552 		 */
6553 		return EBUSY;
6554 	}
6555 
6556 	return 0;
6557 }
6558 
6559 int
6560 iwm_scan_abort(struct iwm_softc *sc)
6561 {
6562 	int err;
6563 
6564 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6565 		err = iwm_umac_scan_abort(sc);
6566 	else
6567 		err = iwm_lmac_scan_abort(sc);
6568 
6569 	if (err == 0)
6570 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
6571 	return err;
6572 }
6573 
6574 int
6575 iwm_auth(struct iwm_softc *sc)
6576 {
6577 	struct ieee80211com *ic = &sc->sc_ic;
6578 	struct iwm_node *in = (void *)ic->ic_bss;
6579 	uint32_t duration;
6580 	int generation = sc->sc_generation, err;
6581 
6582 	splassert(IPL_NET);
6583 
6584 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6585 		sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
6586 	else
6587 		sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
6588 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
6589 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
6590 	if (err) {
6591 		printf("%s: could not update PHY context (error %d)\n",
6592 		    DEVNAME(sc), err);
6593 		return err;
6594 	}
6595 	in->in_phyctxt = &sc->sc_phyctxt[0];
6596 
6597 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
6598 	if (err) {
6599 		printf("%s: could not add MAC context (error %d)\n",
6600 		    DEVNAME(sc), err);
6601 		return err;
6602  	}
6603 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
6604 
6605 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
6606 	if (err) {
6607 		printf("%s: could not add binding (error %d)\n",
6608 		    DEVNAME(sc), err);
6609 		goto rm_mac_ctxt;
6610 	}
6611 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
6612 
6613 	err = iwm_add_sta_cmd(sc, in, 0);
6614 	if (err) {
6615 		printf("%s: could not add sta (error %d)\n",
6616 		    DEVNAME(sc), err);
6617 		goto rm_binding;
6618 	}
6619 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
6620 
6621 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6622 		return 0;
6623 
6624 	/*
6625 	 * Prevent the FW from wandering off channel during association
6626 	 * by "protecting" the session with a time event.
6627 	 */
6628 	if (in->in_ni.ni_intval)
6629 		duration = in->in_ni.ni_intval * 2;
6630 	else
6631 		duration = IEEE80211_DUR_TU;
6632 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
6633 
6634 	return 0;
6635 
6636 rm_binding:
6637 	if (generation == sc->sc_generation) {
6638 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
6639 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
6640 	}
6641 rm_mac_ctxt:
6642 	if (generation == sc->sc_generation) {
6643 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
6644 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
6645 	}
6646 	return err;
6647 }
6648 
6649 int
6650 iwm_deauth(struct iwm_softc *sc)
6651 {
6652 	struct ieee80211com *ic = &sc->sc_ic;
6653 	struct iwm_node *in = (void *)ic->ic_bss;
6654 	int ac, tfd_queue_msk, err;
6655 
6656 	splassert(IPL_NET);
6657 
6658 	iwm_unprotect_session(sc, in);
6659 
6660 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
6661 		err = iwm_rm_sta_cmd(sc, in);
6662 		if (err) {
6663 			printf("%s: could not remove STA (error %d)\n",
6664 			    DEVNAME(sc), err);
6665 			return err;
6666 		}
6667 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
6668 		sc->sc_rx_ba_sessions = 0;
6669 	}
6670 
6671 	tfd_queue_msk = 0;
6672 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
6673 		int qid = ac;
6674 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6675 			qid += IWM_DQA_MIN_MGMT_QUEUE;
6676 		tfd_queue_msk |= htole32(1 << qid);
6677 	}
6678 
6679 	err = iwm_flush_tx_path(sc, tfd_queue_msk);
6680 	if (err) {
6681 		printf("%s: could not flush Tx path (error %d)\n",
6682 		    DEVNAME(sc), err);
6683 		return err;
6684 	}
6685 
6686 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
6687 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
6688 		if (err) {
6689 			printf("%s: could not remove binding (error %d)\n",
6690 			    DEVNAME(sc), err);
6691 			return err;
6692 		}
6693 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
6694 	}
6695 
6696 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
6697 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
6698 		if (err) {
6699 			printf("%s: could not remove MAC context (error %d)\n",
6700 			    DEVNAME(sc), err);
6701 			return err;
6702 		}
6703 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
6704 	}
6705 
6706 	return 0;
6707 }
6708 
6709 int
6710 iwm_assoc(struct iwm_softc *sc)
6711 {
6712 	struct ieee80211com *ic = &sc->sc_ic;
6713 	struct iwm_node *in = (void *)ic->ic_bss;
6714 	int update_sta = (sc->sc_flags & IWM_FLAG_STA_ACTIVE);
6715 	int err;
6716 
6717 	splassert(IPL_NET);
6718 
6719 	err = iwm_add_sta_cmd(sc, in, update_sta);
6720 	if (err) {
6721 		printf("%s: could not %s STA (error %d)\n",
6722 		    DEVNAME(sc), update_sta ? "update" : "add", err);
6723 		return err;
6724 	}
6725 
6726 	return 0;
6727 }
6728 
6729 int
6730 iwm_disassoc(struct iwm_softc *sc)
6731 {
6732 	struct ieee80211com *ic = &sc->sc_ic;
6733 	struct iwm_node *in = (void *)ic->ic_bss;
6734 	int err;
6735 
6736 	splassert(IPL_NET);
6737 
6738 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
6739 		err = iwm_rm_sta_cmd(sc, in);
6740 		if (err) {
6741 			printf("%s: could not remove STA (error %d)\n",
6742 			    DEVNAME(sc), err);
6743 			return err;
6744 		}
6745 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
6746 		sc->sc_rx_ba_sessions = 0;
6747 	}
6748 
6749 	return 0;
6750 }
6751 
6752 int
6753 iwm_run(struct iwm_softc *sc)
6754 {
6755 	struct ieee80211com *ic = &sc->sc_ic;
6756 	struct iwm_node *in = (void *)ic->ic_bss;
6757 	int err;
6758 
6759 	splassert(IPL_NET);
6760 
6761 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6762 		/* Add a MAC context and a sniffing STA. */
6763 		err = iwm_auth(sc);
6764 		if (err)
6765 			return err;
6766 	}
6767 
6768 	/* Configure Rx chains for MIMO. */
6769 	if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
6770 	    (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
6771 	    iwm_mimo_enabled(sc)) {
6772 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
6773 		    2, 2, IWM_FW_CTXT_ACTION_MODIFY, 0);
6774 		if (err) {
6775 			printf("%s: failed to update PHY\n",
6776 			    DEVNAME(sc));
6777 			return err;
6778 		}
6779 	}
6780 
6781 	/* Update STA again, for HT-related settings such as MIMO. */
6782 	err = iwm_add_sta_cmd(sc, in, 1);
6783 	if (err) {
6784 		printf("%s: could not update STA (error %d)\n",
6785 		    DEVNAME(sc), err);
6786 		return err;
6787 	}
6788 
6789 	/* We have now been assigned an associd by the AP. */
6790 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6791 	if (err) {
6792 		printf("%s: failed to update MAC\n", DEVNAME(sc));
6793 		return err;
6794 	}
6795 
6796 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
6797 	if (err) {
6798 		printf("%s: could not set sf full on (error %d)\n",
6799 		    DEVNAME(sc), err);
6800 		return err;
6801 	}
6802 
6803 	err = iwm_allow_mcast(sc);
6804 	if (err) {
6805 		printf("%s: could not allow mcast (error %d)\n",
6806 		    DEVNAME(sc), err);
6807 		return err;
6808 	}
6809 
6810 	err = iwm_power_update_device(sc);
6811 	if (err) {
6812 		printf("%s: could not send power command (error %d)\n",
6813 		    DEVNAME(sc), err);
6814 		return err;
6815 	}
6816 #ifdef notyet
6817 	/*
6818 	 * Disabled for now. Default beacon filter settings
6819 	 * prevent net80211 from getting ERP and HT protection
6820 	 * updates from beacons.
6821 	 */
6822 	err = iwm_enable_beacon_filter(sc, in);
6823 	if (err) {
6824 		printf("%s: could not enable beacon filter\n",
6825 		    DEVNAME(sc));
6826 		return err;
6827 	}
6828 #endif
6829 	err = iwm_power_mac_update_mode(sc, in);
6830 	if (err) {
6831 		printf("%s: could not update MAC power (error %d)\n",
6832 		    DEVNAME(sc), err);
6833 		return err;
6834 	}
6835 
6836 	err = iwm_update_quotas(sc, in, 1);
6837 	if (err) {
6838 		printf("%s: could not update quotas (error %d)\n",
6839 		    DEVNAME(sc), err);
6840 		return err;
6841 	}
6842 
6843 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6844 	ieee80211_mira_node_init(&in->in_mn);
6845 
6846 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6847 		iwm_led_blink_start(sc);
6848 		return 0;
6849 	}
6850 
6851 	/* Start at lowest available bit-rate, AMRR will raise. */
6852 	in->in_ni.ni_txrate = 0;
6853 	in->in_ni.ni_txmcs = 0;
6854 	in->chosen_txrate = 0;
6855 	in->chosen_txmcs = 0;
6856 	iwm_setrates(in, 0);
6857 
6858 	timeout_add_msec(&sc->sc_calib_to, 500);
6859 	iwm_led_enable(sc);
6860 
6861 	return 0;
6862 }
6863 
6864 int
6865 iwm_run_stop(struct iwm_softc *sc)
6866 {
6867 	struct ieee80211com *ic = &sc->sc_ic;
6868 	struct iwm_node *in = (void *)ic->ic_bss;
6869 	int err;
6870 
6871 	splassert(IPL_NET);
6872 
6873 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6874 		iwm_led_blink_stop(sc);
6875 
6876 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
6877 	if (err)
6878 		return err;
6879 
6880 	iwm_disable_beacon_filter(sc);
6881 
6882 	err = iwm_update_quotas(sc, in, 0);
6883 	if (err) {
6884 		printf("%s: could not update quotas (error %d)\n",
6885 		    DEVNAME(sc), err);
6886 		return err;
6887 	}
6888 
6889 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
6890 	if (err) {
6891 		printf("%s: failed to update MAC\n", DEVNAME(sc));
6892 		return err;
6893 	}
6894 
6895 	/* Reset Tx chains in case MIMO was enabled. */
6896 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
6897 	    iwm_mimo_enabled(sc)) {
6898 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
6899 		    IWM_FW_CTXT_ACTION_MODIFY, 0);
6900 		if (err) {
6901 			printf("%s: failed to update PHY\n", DEVNAME(sc));
6902 			return err;
6903 		}
6904 	}
6905 
6906 	return 0;
6907 }
6908 
6909 struct ieee80211_node *
6910 iwm_node_alloc(struct ieee80211com *ic)
6911 {
6912 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
6913 }
6914 
6915 int
6916 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
6917     struct ieee80211_key *k)
6918 {
6919 	struct iwm_softc *sc = ic->ic_softc;
6920 	struct iwm_add_sta_key_cmd_v1 cmd;
6921 
6922 	memset(&cmd, 0, sizeof(cmd));
6923 
6924 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
6925 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
6926 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
6927 	    IWM_STA_KEY_FLG_KEYID_MSK));
6928 	if (k->k_flags & IEEE80211_KEY_GROUP)
6929 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
6930 
6931 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6932 	cmd.common.key_offset = 0;
6933 	cmd.common.sta_id = IWM_STATION_ID;
6934 
6935 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
6936 	    sizeof(cmd), &cmd);
6937 }
6938 
6939 int
6940 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6941     struct ieee80211_key *k)
6942 {
6943 	struct iwm_softc *sc = ic->ic_softc;
6944 	struct iwm_add_sta_key_cmd cmd;
6945 
6946 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6947 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
6948 		/* Fallback to software crypto for other ciphers. */
6949 		return (ieee80211_set_key(ic, ni, k));
6950 	}
6951 
6952 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
6953 		return iwm_set_key_v1(ic, ni, k);
6954 
6955 	memset(&cmd, 0, sizeof(cmd));
6956 
6957 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
6958 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
6959 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
6960 	    IWM_STA_KEY_FLG_KEYID_MSK));
6961 	if (k->k_flags & IEEE80211_KEY_GROUP)
6962 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
6963 
6964 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6965 	cmd.common.key_offset = 0;
6966 	cmd.common.sta_id = IWM_STATION_ID;
6967 
6968 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
6969 
6970 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
6971 	    sizeof(cmd), &cmd);
6972 }
6973 
6974 void
6975 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
6976     struct ieee80211_key *k)
6977 {
6978 	struct iwm_softc *sc = ic->ic_softc;
6979 	struct iwm_add_sta_key_cmd_v1 cmd;
6980 
6981 	memset(&cmd, 0, sizeof(cmd));
6982 
6983 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
6984 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
6985 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
6986 	    IWM_STA_KEY_FLG_KEYID_MSK));
6987 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6988 	cmd.common.key_offset = 0;
6989 	cmd.common.sta_id = IWM_STATION_ID;
6990 
6991 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
6992 }
6993 
6994 void
6995 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6996     struct ieee80211_key *k)
6997 {
6998 	struct iwm_softc *sc = ic->ic_softc;
6999 	struct iwm_add_sta_key_cmd cmd;
7000 
7001 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
7002 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
7003 		/* Fallback to software crypto for other ciphers. */
7004                 ieee80211_delete_key(ic, ni, k);
7005 		return;
7006 	}
7007 
7008 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
7009 		return iwm_delete_key_v1(ic, ni, k);
7010 
7011 	memset(&cmd, 0, sizeof(cmd));
7012 
7013 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
7014 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
7015 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
7016 	    IWM_STA_KEY_FLG_KEYID_MSK));
7017 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7018 	cmd.common.key_offset = 0;
7019 	cmd.common.sta_id = IWM_STATION_ID;
7020 
7021 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
7022 }
7023 
7024 void
7025 iwm_calib_timeout(void *arg)
7026 {
7027 	struct iwm_softc *sc = arg;
7028 	struct ieee80211com *ic = &sc->sc_ic;
7029 	struct iwm_node *in = (void *)ic->ic_bss;
7030 	struct ieee80211_node *ni = &in->in_ni;
7031 	int s;
7032 
7033 	s = splnet();
7034 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
7035 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
7036 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
7037 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
7038 		/*
7039 		 * If AMRR has chosen a new TX rate we must update
7040 		 * the firwmare's LQ rate table from process context.
7041 		 * ni_txrate may change again before the task runs so
7042 		 * cache the chosen rate in the iwm_node structure.
7043 		 */
7044 		if (ni->ni_txrate != in->chosen_txrate) {
7045 			in->chosen_txrate = ni->ni_txrate;
7046 			iwm_setrates(in, 1);
7047 		}
7048 	}
7049 
7050 	splx(s);
7051 
7052 	timeout_add_msec(&sc->sc_calib_to, 500);
7053 }
7054 
7055 void
7056 iwm_setrates(struct iwm_node *in, int async)
7057 {
7058 	struct ieee80211_node *ni = &in->in_ni;
7059 	struct ieee80211com *ic = ni->ni_ic;
7060 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
7061 	struct iwm_lq_cmd lqcmd;
7062 	struct ieee80211_rateset *rs = &ni->ni_rates;
7063 	int i, ridx, ridx_min, ridx_max, j, sgi_ok = 0, mimo, tab = 0;
7064 	struct iwm_host_cmd cmd = {
7065 		.id = IWM_LQ_CMD,
7066 		.len = { sizeof(lqcmd), },
7067 	};
7068 
7069 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
7070 
7071 	memset(&lqcmd, 0, sizeof(lqcmd));
7072 	lqcmd.sta_id = IWM_STATION_ID;
7073 
7074 	if (ic->ic_flags & IEEE80211_F_USEPROT)
7075 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
7076 
7077 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
7078 	    ieee80211_node_supports_ht_sgi20(ni)) {
7079 		ni->ni_flags |= IEEE80211_NODE_HT_SGI20;
7080 		sgi_ok = 1;
7081 	}
7082 
7083 	/*
7084 	 * Fill the LQ rate selection table with legacy and/or HT rates
7085 	 * in descending order, i.e. with the node's current TX rate first.
7086 	 * In cases where throughput of an HT rate corresponds to a legacy
7087 	 * rate it makes no sense to add both. We rely on the fact that
7088 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
7089 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
7090 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
7091 	 */
7092 	j = 0;
7093 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
7094 	mimo = iwm_is_mimo_mcs(in->chosen_txmcs);
7095 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
7096 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
7097 		uint8_t plcp = iwm_rates[ridx].plcp;
7098 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
7099 
7100 		if (j >= nitems(lqcmd.rs_table))
7101 			break;
7102 		tab = 0;
7103 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7104 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
7105 				continue;
7106 	 		/* Do not mix SISO and MIMO HT rates. */
7107 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
7108 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
7109 				continue;
7110 			for (i = in->chosen_txmcs; i >= 0; i--) {
7111 				if (isclr(ni->ni_rxmcs, i))
7112 					continue;
7113 				if (ridx == iwm_mcs2ridx[i]) {
7114 					tab = ht_plcp;
7115 					tab |= IWM_RATE_MCS_HT_MSK;
7116 					if (sgi_ok)
7117 						tab |= IWM_RATE_MCS_SGI_MSK;
7118 					break;
7119 				}
7120 			}
7121 		} else if (plcp != IWM_RATE_INVM_PLCP) {
7122 			for (i = in->chosen_txrate; i >= 0; i--) {
7123 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
7124 				    IEEE80211_RATE_VAL)) {
7125 					tab = plcp;
7126 					break;
7127 				}
7128 			}
7129 		}
7130 
7131 		if (tab == 0)
7132 			continue;
7133 
7134 		if (iwm_is_mimo_ht_plcp(ht_plcp))
7135 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
7136 		else
7137 			tab |= IWM_RATE_MCS_ANT_A_MSK;
7138 
7139 		if (IWM_RIDX_IS_CCK(ridx))
7140 			tab |= IWM_RATE_MCS_CCK_MSK;
7141 		lqcmd.rs_table[j++] = htole32(tab);
7142 	}
7143 
7144 	lqcmd.mimo_delim = (mimo ? j : 0);
7145 
7146 	/* Fill the rest with the lowest possible rate */
7147 	while (j < nitems(lqcmd.rs_table)) {
7148 		tab = iwm_rates[ridx_min].plcp;
7149 		if (IWM_RIDX_IS_CCK(ridx_min))
7150 			tab |= IWM_RATE_MCS_CCK_MSK;
7151 		tab |= IWM_RATE_MCS_ANT_A_MSK;
7152 		lqcmd.rs_table[j++] = htole32(tab);
7153 	}
7154 
7155 	lqcmd.single_stream_ant_msk = IWM_ANT_A;
7156 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
7157 
7158 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
7159 	lqcmd.agg_disable_start_th = 3;
7160 #ifdef notyet
7161 	lqcmd.agg_frame_cnt_limit = 0x3f;
7162 #else
7163 	lqcmd.agg_frame_cnt_limit = 1; /* tx agg disabled */
7164 #endif
7165 
7166 	cmd.data[0] = &lqcmd;
7167 	iwm_send_cmd(sc, &cmd);
7168 }
7169 
7170 int
7171 iwm_media_change(struct ifnet *ifp)
7172 {
7173 	struct iwm_softc *sc = ifp->if_softc;
7174 	struct ieee80211com *ic = &sc->sc_ic;
7175 	uint8_t rate, ridx;
7176 	int err;
7177 
7178 	err = ieee80211_media_change(ifp);
7179 	if (err != ENETRESET)
7180 		return err;
7181 
7182 	if (ic->ic_fixed_mcs != -1)
7183 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
7184 	else if (ic->ic_fixed_rate != -1) {
7185 		rate = ic->ic_sup_rates[ic->ic_curmode].
7186 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
7187 		/* Map 802.11 rate to HW rate index. */
7188 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
7189 			if (iwm_rates[ridx].rate == rate)
7190 				break;
7191 		sc->sc_fixed_ridx = ridx;
7192 	}
7193 
7194 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7195 	    (IFF_UP | IFF_RUNNING)) {
7196 		iwm_stop(ifp);
7197 		err = iwm_init(ifp);
7198 	}
7199 	return err;
7200 }
7201 
7202 void
7203 iwm_newstate_task(void *psc)
7204 {
7205 	struct iwm_softc *sc = (struct iwm_softc *)psc;
7206 	struct ieee80211com *ic = &sc->sc_ic;
7207 	enum ieee80211_state nstate = sc->ns_nstate;
7208 	enum ieee80211_state ostate = ic->ic_state;
7209 	int arg = sc->ns_arg;
7210 	int err = 0, s = splnet();
7211 
7212 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7213 		/* iwm_stop() is waiting for us. */
7214 		refcnt_rele_wake(&sc->task_refs);
7215 		splx(s);
7216 		return;
7217 	}
7218 
7219 	if (ostate == IEEE80211_S_SCAN) {
7220 		if (nstate == ostate) {
7221 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
7222 				refcnt_rele_wake(&sc->task_refs);
7223 				splx(s);
7224 				return;
7225 			}
7226 			/* Firmware is no longer scanning. Do another scan. */
7227 			goto next_scan;
7228 		} else
7229 			iwm_led_blink_stop(sc);
7230 	}
7231 
7232 	if (nstate <= ostate) {
7233 		switch (ostate) {
7234 		case IEEE80211_S_RUN:
7235 			err = iwm_run_stop(sc);
7236 			if (err)
7237 				goto out;
7238 			/* FALLTHROUGH */
7239 		case IEEE80211_S_ASSOC:
7240 			if (nstate <= IEEE80211_S_ASSOC) {
7241 				err = iwm_disassoc(sc);
7242 				if (err)
7243 					goto out;
7244 			}
7245 			/* FALLTHROUGH */
7246 		case IEEE80211_S_AUTH:
7247 			if (nstate <= IEEE80211_S_AUTH) {
7248 				err = iwm_deauth(sc);
7249 				if (err)
7250 					goto out;
7251 			}
7252 			/* FALLTHROUGH */
7253 		case IEEE80211_S_SCAN:
7254 		case IEEE80211_S_INIT:
7255 			break;
7256 		}
7257 
7258 		/* Die now if iwm_stop() was called while we were sleeping. */
7259 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7260 			refcnt_rele_wake(&sc->task_refs);
7261 			splx(s);
7262 			return;
7263 		}
7264 	}
7265 
7266 	switch (nstate) {
7267 	case IEEE80211_S_INIT:
7268 		break;
7269 
7270 	case IEEE80211_S_SCAN:
7271 next_scan:
7272 		err = iwm_scan(sc);
7273 		if (err)
7274 			break;
7275 		refcnt_rele_wake(&sc->task_refs);
7276 		splx(s);
7277 		return;
7278 
7279 	case IEEE80211_S_AUTH:
7280 		err = iwm_auth(sc);
7281 		break;
7282 
7283 	case IEEE80211_S_ASSOC:
7284 		err = iwm_assoc(sc);
7285 		break;
7286 
7287 	case IEEE80211_S_RUN:
7288 		err = iwm_run(sc);
7289 		break;
7290 	}
7291 
7292 out:
7293 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
7294 		if (err)
7295 			task_add(systq, &sc->init_task);
7296 		else
7297 			sc->sc_newstate(ic, nstate, arg);
7298 	}
7299 	refcnt_rele_wake(&sc->task_refs);
7300 	splx(s);
7301 }
7302 
7303 int
7304 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
7305 {
7306 	struct ifnet *ifp = IC2IFP(ic);
7307 	struct iwm_softc *sc = ifp->if_softc;
7308 	struct iwm_node *in = (void *)ic->ic_bss;
7309 
7310 	if (ic->ic_state == IEEE80211_S_RUN) {
7311 		timeout_del(&sc->sc_calib_to);
7312 		ieee80211_mira_cancel_timeouts(&in->in_mn);
7313 		iwm_del_task(sc, systq, &sc->ba_task);
7314 		iwm_del_task(sc, systq, &sc->htprot_task);
7315 	}
7316 
7317 	sc->ns_nstate = nstate;
7318 	sc->ns_arg = arg;
7319 
7320 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
7321 
7322 	return 0;
7323 }
7324 
7325 void
7326 iwm_endscan(struct iwm_softc *sc)
7327 {
7328 	struct ieee80211com *ic = &sc->sc_ic;
7329 
7330 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
7331 		return;
7332 
7333 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
7334 	ieee80211_end_scan(&ic->ic_if);
7335 }
7336 
7337 /*
7338  * Aging and idle timeouts for the different possible scenarios
7339  * in default configuration
7340  */
7341 static const uint32_t
7342 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
7343 	{
7344 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
7345 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
7346 	},
7347 	{
7348 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
7349 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
7350 	},
7351 	{
7352 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
7353 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
7354 	},
7355 	{
7356 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
7357 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
7358 	},
7359 	{
7360 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
7361 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
7362 	},
7363 };
7364 
7365 /*
7366  * Aging and idle timeouts for the different possible scenarios
7367  * in single BSS MAC configuration.
7368  */
7369 static const uint32_t
7370 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
7371 	{
7372 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
7373 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
7374 	},
7375 	{
7376 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
7377 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
7378 	},
7379 	{
7380 		htole32(IWM_SF_MCAST_AGING_TIMER),
7381 		htole32(IWM_SF_MCAST_IDLE_TIMER)
7382 	},
7383 	{
7384 		htole32(IWM_SF_BA_AGING_TIMER),
7385 		htole32(IWM_SF_BA_IDLE_TIMER)
7386 	},
7387 	{
7388 		htole32(IWM_SF_TX_RE_AGING_TIMER),
7389 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
7390 	},
7391 };
7392 
7393 void
7394 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
7395     struct ieee80211_node *ni)
7396 {
7397 	int i, j, watermark;
7398 
7399 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
7400 
7401 	/*
7402 	 * If we are in association flow - check antenna configuration
7403 	 * capabilities of the AP station, and choose the watermark accordingly.
7404 	 */
7405 	if (ni) {
7406 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7407 			if (ni->ni_rxmcs[1] != 0)
7408 				watermark = IWM_SF_W_MARK_MIMO2;
7409 			else
7410 				watermark = IWM_SF_W_MARK_SISO;
7411 		} else {
7412 			watermark = IWM_SF_W_MARK_LEGACY;
7413 		}
7414 	/* default watermark value for unassociated mode. */
7415 	} else {
7416 		watermark = IWM_SF_W_MARK_MIMO2;
7417 	}
7418 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
7419 
7420 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
7421 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
7422 			sf_cmd->long_delay_timeouts[i][j] =
7423 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
7424 		}
7425 	}
7426 
7427 	if (ni) {
7428 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
7429 		       sizeof(iwm_sf_full_timeout));
7430 	} else {
7431 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
7432 		       sizeof(iwm_sf_full_timeout_def));
7433 	}
7434 
7435 }
7436 
7437 int
7438 iwm_sf_config(struct iwm_softc *sc, int new_state)
7439 {
7440 	struct ieee80211com *ic = &sc->sc_ic;
7441 	struct iwm_sf_cfg_cmd sf_cmd = {
7442 		.state = htole32(new_state),
7443 	};
7444 	int err = 0;
7445 
7446 #if 0	/* only used for models with sdio interface, in iwlwifi */
7447 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7448 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
7449 #endif
7450 
7451 	switch (new_state) {
7452 	case IWM_SF_UNINIT:
7453 	case IWM_SF_INIT_OFF:
7454 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
7455 		break;
7456 	case IWM_SF_FULL_ON:
7457 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
7458 		break;
7459 	default:
7460 		return EINVAL;
7461 	}
7462 
7463 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
7464 				   sizeof(sf_cmd), &sf_cmd);
7465 	return err;
7466 }
7467 
7468 int
7469 iwm_send_bt_init_conf(struct iwm_softc *sc)
7470 {
7471 	struct iwm_bt_coex_cmd bt_cmd;
7472 
7473 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
7474 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
7475 
7476 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
7477 	    &bt_cmd);
7478 }
7479 
7480 int
7481 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
7482 {
7483 	struct iwm_mcc_update_cmd mcc_cmd;
7484 	struct iwm_host_cmd hcmd = {
7485 		.id = IWM_MCC_UPDATE_CMD,
7486 		.flags = IWM_CMD_WANT_RESP,
7487 		.data = { &mcc_cmd },
7488 	};
7489 	int err;
7490 	int resp_v2 = isset(sc->sc_enabled_capa,
7491 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
7492 
7493 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
7494 	    !sc->sc_nvm.lar_enabled) {
7495 		return 0;
7496 	}
7497 
7498 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
7499 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
7500 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
7501 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
7502 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
7503 	else
7504 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
7505 
7506 	if (resp_v2) {
7507 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
7508 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
7509 		    sizeof(struct iwm_mcc_update_resp);
7510 	} else {
7511 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
7512 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
7513 		    sizeof(struct iwm_mcc_update_resp_v1);
7514 	}
7515 
7516 	err = iwm_send_cmd(sc, &hcmd);
7517 	if (err)
7518 		return err;
7519 
7520 	iwm_free_resp(sc, &hcmd);
7521 
7522 	return 0;
7523 }
7524 
7525 void
7526 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
7527 {
7528 	struct iwm_host_cmd cmd = {
7529 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
7530 		.len = { sizeof(uint32_t), },
7531 		.data = { &backoff, },
7532 	};
7533 
7534 	iwm_send_cmd(sc, &cmd);
7535 }
7536 
7537 void
7538 iwm_free_fw_paging(struct iwm_softc *sc)
7539 {
7540 	int i;
7541 
7542 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
7543 		return;
7544 
7545 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
7546 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
7547 	}
7548 
7549 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
7550 }
7551 
7552 int
7553 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
7554 {
7555 	int sec_idx, idx;
7556 	uint32_t offset = 0;
7557 
7558 	/*
7559 	 * find where is the paging image start point:
7560 	 * if CPU2 exist and it's in paging format, then the image looks like:
7561 	 * CPU1 sections (2 or more)
7562 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
7563 	 * CPU2 sections (not paged)
7564 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
7565 	 * non paged to CPU2 paging sec
7566 	 * CPU2 paging CSS
7567 	 * CPU2 paging image (including instruction and data)
7568 	 */
7569 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
7570 		if (image->fw_sect[sec_idx].fws_devoff ==
7571 		    IWM_PAGING_SEPARATOR_SECTION) {
7572 			sec_idx++;
7573 			break;
7574 		}
7575 	}
7576 
7577 	/*
7578 	 * If paging is enabled there should be at least 2 more sections left
7579 	 * (one for CSS and one for Paging data)
7580 	 */
7581 	if (sec_idx >= nitems(image->fw_sect) - 1) {
7582 		printf("%s: Paging: Missing CSS and/or paging sections\n",
7583 		    DEVNAME(sc));
7584 		iwm_free_fw_paging(sc);
7585 		return EINVAL;
7586 	}
7587 
7588 	/* copy the CSS block to the dram */
7589 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
7590 	    DEVNAME(sc), sec_idx));
7591 
7592 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
7593 	    image->fw_sect[sec_idx].fws_data,
7594 	    sc->fw_paging_db[0].fw_paging_size);
7595 
7596 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
7597 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
7598 
7599 	sec_idx++;
7600 
7601 	/*
7602 	 * copy the paging blocks to the dram
7603 	 * loop index start from 1 since that CSS block already copied to dram
7604 	 * and CSS index is 0.
7605 	 * loop stop at num_of_paging_blk since that last block is not full.
7606 	 */
7607 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
7608 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
7609 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
7610 		    sc->fw_paging_db[idx].fw_paging_size);
7611 
7612 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
7613 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
7614 
7615 		offset += sc->fw_paging_db[idx].fw_paging_size;
7616 	}
7617 
7618 	/* copy the last paging block */
7619 	if (sc->num_of_pages_in_last_blk > 0) {
7620 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
7621 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
7622 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
7623 
7624 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
7625 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
7626 	}
7627 
7628 	return 0;
7629 }
7630 
7631 int
7632 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
7633 {
7634 	int blk_idx = 0;
7635 	int error, num_of_pages;
7636 
7637 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
7638 		int i;
7639 		/* Device got reset, and we setup firmware paging again */
7640 		bus_dmamap_sync(sc->sc_dmat,
7641 		    sc->fw_paging_db[0].fw_paging_block.map,
7642 		    0, IWM_FW_PAGING_SIZE,
7643 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
7644 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
7645 			bus_dmamap_sync(sc->sc_dmat,
7646 			    sc->fw_paging_db[i].fw_paging_block.map,
7647 			    0, IWM_PAGING_BLOCK_SIZE,
7648 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
7649 		}
7650 		return 0;
7651 	}
7652 
7653 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
7654 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
7655 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
7656 #endif
7657 
7658 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
7659 	sc->num_of_paging_blk =
7660 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
7661 
7662 	sc->num_of_pages_in_last_blk =
7663 		num_of_pages -
7664 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
7665 
7666 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
7667 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
7668 	    sc->num_of_paging_blk,
7669 	    sc->num_of_pages_in_last_blk));
7670 
7671 	/* allocate block of 4Kbytes for paging CSS */
7672 	error = iwm_dma_contig_alloc(sc->sc_dmat,
7673 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
7674 	    4096);
7675 	if (error) {
7676 		/* free all the previous pages since we failed */
7677 		iwm_free_fw_paging(sc);
7678 		return ENOMEM;
7679 	}
7680 
7681 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
7682 
7683 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
7684 	    DEVNAME(sc)));
7685 
7686 	/*
7687 	 * allocate blocks in dram.
7688 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
7689 	 */
7690 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
7691 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
7692 		/* XXX Use iwm_dma_contig_alloc for allocating */
7693 		error = iwm_dma_contig_alloc(sc->sc_dmat,
7694 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
7695 		    IWM_PAGING_BLOCK_SIZE, 4096);
7696 		if (error) {
7697 			/* free all the previous pages since we failed */
7698 			iwm_free_fw_paging(sc);
7699 			return ENOMEM;
7700 		}
7701 
7702 		sc->fw_paging_db[blk_idx].fw_paging_size =
7703 		    IWM_PAGING_BLOCK_SIZE;
7704 
7705 		DPRINTF((
7706 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
7707 		    DEVNAME(sc)));
7708 	}
7709 
7710 	return 0;
7711 }
7712 
7713 int
7714 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
7715 {
7716 	int ret;
7717 
7718 	ret = iwm_alloc_fw_paging_mem(sc, fw);
7719 	if (ret)
7720 		return ret;
7721 
7722 	return iwm_fill_paging_mem(sc, fw);
7723 }
7724 
7725 /* send paging cmd to FW in case CPU2 has paging image */
7726 int
7727 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
7728 {
7729 	int blk_idx;
7730 	uint32_t dev_phy_addr;
7731 	struct iwm_fw_paging_cmd fw_paging_cmd = {
7732 		.flags =
7733 			htole32(IWM_PAGING_CMD_IS_SECURED |
7734 				IWM_PAGING_CMD_IS_ENABLED |
7735 				(sc->num_of_pages_in_last_blk <<
7736 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
7737 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
7738 		.block_num = htole32(sc->num_of_paging_blk),
7739 	};
7740 
7741 	/* loop for for all paging blocks + CSS block */
7742 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
7743 		dev_phy_addr = htole32(
7744 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
7745 		    IWM_PAGE_2_EXP_SIZE);
7746 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
7747 		bus_dmamap_sync(sc->sc_dmat,
7748 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
7749 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
7750 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
7751 	}
7752 
7753 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
7754 					       IWM_LONG_GROUP, 0),
7755 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
7756 }
7757 
7758 int
7759 iwm_init_hw(struct iwm_softc *sc)
7760 {
7761 	struct ieee80211com *ic = &sc->sc_ic;
7762 	int err, i, ac, qid;
7763 
7764 	err = iwm_preinit(sc);
7765 	if (err)
7766 		return err;
7767 
7768 	err = iwm_start_hw(sc);
7769 	if (err) {
7770 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7771 		return err;
7772 	}
7773 
7774 	err = iwm_run_init_mvm_ucode(sc, 0);
7775 	if (err)
7776 		return err;
7777 
7778 	/* Should stop and start HW since INIT image just loaded. */
7779 	iwm_stop_device(sc);
7780 	err = iwm_start_hw(sc);
7781 	if (err) {
7782 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7783 		return err;
7784 	}
7785 
7786 	/* Restart, this time with the regular firmware */
7787 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
7788 	if (err) {
7789 		printf("%s: could not load firmware\n", DEVNAME(sc));
7790 		goto err;
7791 	}
7792 
7793 	if (!iwm_nic_lock(sc))
7794 		return EBUSY;
7795 
7796 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
7797 	if (err) {
7798 		printf("%s: could not init tx ant config (error %d)\n",
7799 		    DEVNAME(sc), err);
7800 		goto err;
7801 	}
7802 
7803 	err = iwm_send_phy_db_data(sc);
7804 	if (err) {
7805 		printf("%s: could not init phy db (error %d)\n",
7806 		    DEVNAME(sc), err);
7807 		goto err;
7808 	}
7809 
7810 	err = iwm_send_phy_cfg_cmd(sc);
7811 	if (err) {
7812 		printf("%s: could not send phy config (error %d)\n",
7813 		    DEVNAME(sc), err);
7814 		goto err;
7815 	}
7816 
7817 	err = iwm_send_bt_init_conf(sc);
7818 	if (err) {
7819 		printf("%s: could not init bt coex (error %d)\n",
7820 		    DEVNAME(sc), err);
7821 		return err;
7822 	}
7823 
7824 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7825 		err = iwm_send_dqa_cmd(sc);
7826 		if (err)
7827 			return err;
7828 	}
7829 
7830 	/* Add auxiliary station for scanning */
7831 	err = iwm_add_aux_sta(sc);
7832 	if (err) {
7833 		printf("%s: could not add aux station (error %d)\n",
7834 		    DEVNAME(sc), err);
7835 		goto err;
7836 	}
7837 
7838 	for (i = 0; i < 1; i++) {
7839 		/*
7840 		 * The channel used here isn't relevant as it's
7841 		 * going to be overwritten in the other flows.
7842 		 * For now use the first channel we have.
7843 		 */
7844 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
7845 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
7846 		    IWM_FW_CTXT_ACTION_ADD, 0);
7847 		if (err) {
7848 			printf("%s: could not add phy context %d (error %d)\n",
7849 			    DEVNAME(sc), i, err);
7850 			goto err;
7851 		}
7852 	}
7853 
7854 	/* Initialize tx backoffs to the minimum. */
7855 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
7856 		iwm_tt_tx_backoff(sc, 0);
7857 
7858 
7859 	err = iwm_config_ltr(sc);
7860 	if (err) {
7861 		printf("%s: PCIe LTR configuration failed (error %d)\n",
7862 		    DEVNAME(sc), err);
7863 	}
7864 
7865 	err = iwm_power_update_device(sc);
7866 	if (err) {
7867 		printf("%s: could not send power command (error %d)\n",
7868 		    DEVNAME(sc), err);
7869 		goto err;
7870 	}
7871 
7872 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
7873 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
7874 		if (err) {
7875 			printf("%s: could not init LAR (error %d)\n",
7876 			    DEVNAME(sc), err);
7877 			goto err;
7878 		}
7879 	}
7880 
7881 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
7882 		err = iwm_config_umac_scan(sc);
7883 		if (err) {
7884 			printf("%s: could not configure scan (error %d)\n",
7885 			    DEVNAME(sc), err);
7886 			goto err;
7887 		}
7888 	}
7889 
7890 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7891 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7892 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
7893 		else
7894 			qid = IWM_AUX_QUEUE;
7895 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
7896 		    iwm_ac_to_tx_fifo[EDCA_AC_BE]);
7897 		if (err) {
7898 			printf("%s: could not enable monitor inject Tx queue "
7899 			    "(error %d)\n", DEVNAME(sc), err);
7900 			goto err;
7901 		}
7902 	} else {
7903 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
7904 			if (isset(sc->sc_enabled_capa,
7905 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7906 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
7907 			else
7908 				qid = ac;
7909 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
7910 			    iwm_ac_to_tx_fifo[ac]);
7911 			if (err) {
7912 				printf("%s: could not enable Tx queue %d "
7913 				    "(error %d)\n", DEVNAME(sc), ac, err);
7914 				goto err;
7915 			}
7916 		}
7917 	}
7918 
7919 	err = iwm_disable_beacon_filter(sc);
7920 	if (err) {
7921 		printf("%s: could not disable beacon filter (error %d)\n",
7922 		    DEVNAME(sc), err);
7923 		goto err;
7924 	}
7925 
7926 err:
7927 	iwm_nic_unlock(sc);
7928 	return err;
7929 }
7930 
7931 /* Allow multicast from our BSSID. */
7932 int
7933 iwm_allow_mcast(struct iwm_softc *sc)
7934 {
7935 	struct ieee80211com *ic = &sc->sc_ic;
7936 	struct ieee80211_node *ni = ic->ic_bss;
7937 	struct iwm_mcast_filter_cmd *cmd;
7938 	size_t size;
7939 	int err;
7940 
7941 	size = roundup(sizeof(*cmd), 4);
7942 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
7943 	if (cmd == NULL)
7944 		return ENOMEM;
7945 	cmd->filter_own = 1;
7946 	cmd->port_id = 0;
7947 	cmd->count = 0;
7948 	cmd->pass_all = 1;
7949 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
7950 
7951 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
7952 	    0, size, cmd);
7953 	free(cmd, M_DEVBUF, size);
7954 	return err;
7955 }
7956 
7957 int
7958 iwm_init(struct ifnet *ifp)
7959 {
7960 	struct iwm_softc *sc = ifp->if_softc;
7961 	struct ieee80211com *ic = &sc->sc_ic;
7962 	int err, generation;
7963 
7964 	rw_assert_wrlock(&sc->ioctl_rwl);
7965 
7966 	generation = ++sc->sc_generation;
7967 
7968 	KASSERT(sc->task_refs.refs == 0);
7969 	refcnt_init(&sc->task_refs);
7970 
7971 	err = iwm_init_hw(sc);
7972 	if (err) {
7973 		if (generation == sc->sc_generation)
7974 			iwm_stop(ifp);
7975 		return err;
7976 	}
7977 
7978 	if (sc->sc_nvm.sku_cap_11n_enable)
7979 		iwm_setup_ht_rates(sc);
7980 
7981 	ifq_clr_oactive(&ifp->if_snd);
7982 	ifp->if_flags |= IFF_RUNNING;
7983 
7984 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7985 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
7986 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
7987 		return 0;
7988 	}
7989 
7990 	ieee80211_begin_scan(ifp);
7991 
7992 	/*
7993 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
7994 	 * Wait until the transition to SCAN state has completed.
7995 	 */
7996 	do {
7997 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
7998 		    SEC_TO_NSEC(1));
7999 		if (generation != sc->sc_generation)
8000 			return ENXIO;
8001 		if (err)
8002 			return err;
8003 	} while (ic->ic_state != IEEE80211_S_SCAN);
8004 
8005 	return 0;
8006 }
8007 
8008 void
8009 iwm_start(struct ifnet *ifp)
8010 {
8011 	struct iwm_softc *sc = ifp->if_softc;
8012 	struct ieee80211com *ic = &sc->sc_ic;
8013 	struct ieee80211_node *ni;
8014 	struct ether_header *eh;
8015 	struct mbuf *m;
8016 	int ac = EDCA_AC_BE; /* XXX */
8017 
8018 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
8019 		return;
8020 
8021 	for (;;) {
8022 		/* why isn't this done per-queue? */
8023 		if (sc->qfullmsk != 0) {
8024 			ifq_set_oactive(&ifp->if_snd);
8025 			break;
8026 		}
8027 
8028 		/* need to send management frames even if we're not RUNning */
8029 		m = mq_dequeue(&ic->ic_mgtq);
8030 		if (m) {
8031 			ni = m->m_pkthdr.ph_cookie;
8032 			goto sendit;
8033 		}
8034 
8035 		if (ic->ic_state != IEEE80211_S_RUN ||
8036 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
8037 			break;
8038 
8039 		m = ifq_dequeue(&ifp->if_snd);
8040 		if (!m)
8041 			break;
8042 		if (m->m_len < sizeof (*eh) &&
8043 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
8044 			ifp->if_oerrors++;
8045 			continue;
8046 		}
8047 #if NBPFILTER > 0
8048 		if (ifp->if_bpf != NULL)
8049 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
8050 #endif
8051 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
8052 			ifp->if_oerrors++;
8053 			continue;
8054 		}
8055 
8056  sendit:
8057 #if NBPFILTER > 0
8058 		if (ic->ic_rawbpf != NULL)
8059 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
8060 #endif
8061 		if (iwm_tx(sc, m, ni, ac) != 0) {
8062 			ieee80211_release_node(ic, ni);
8063 			ifp->if_oerrors++;
8064 			continue;
8065 		}
8066 
8067 		if (ifp->if_flags & IFF_UP) {
8068 			sc->sc_tx_timer = 15;
8069 			ifp->if_timer = 1;
8070 		}
8071 	}
8072 
8073 	return;
8074 }
8075 
8076 void
8077 iwm_stop(struct ifnet *ifp)
8078 {
8079 	struct iwm_softc *sc = ifp->if_softc;
8080 	struct ieee80211com *ic = &sc->sc_ic;
8081 	struct iwm_node *in = (void *)ic->ic_bss;
8082 	int i, s = splnet();
8083 
8084 	rw_assert_wrlock(&sc->ioctl_rwl);
8085 
8086 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
8087 
8088 	/* Cancel scheduled tasks and let any stale tasks finish up. */
8089 	task_del(systq, &sc->init_task);
8090 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
8091 	iwm_del_task(sc, systq, &sc->ba_task);
8092 	iwm_del_task(sc, systq, &sc->htprot_task);
8093 	KASSERT(sc->task_refs.refs >= 1);
8094 	refcnt_finalize(&sc->task_refs, "iwmstop");
8095 
8096 	iwm_stop_device(sc);
8097 
8098 	/* Reset soft state. */
8099 
8100 	sc->sc_generation++;
8101 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
8102 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
8103 		sc->sc_cmd_resp_pkt[i] = NULL;
8104 		sc->sc_cmd_resp_len[i] = 0;
8105 	}
8106 	ifp->if_flags &= ~IFF_RUNNING;
8107 	ifq_clr_oactive(&ifp->if_snd);
8108 
8109 	in->in_phyctxt = NULL;
8110 	if (ic->ic_state == IEEE80211_S_RUN)
8111 		ieee80211_mira_cancel_timeouts(&in->in_mn); /* XXX refcount? */
8112 
8113 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8114 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8115 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8116 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8117 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
8118 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
8119 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
8120 
8121 	sc->sc_rx_ba_sessions = 0;
8122 
8123 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
8124 
8125 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
8126 	iwm_led_blink_stop(sc);
8127 	ifp->if_timer = sc->sc_tx_timer = 0;
8128 
8129 	splx(s);
8130 }
8131 
8132 void
8133 iwm_watchdog(struct ifnet *ifp)
8134 {
8135 	struct iwm_softc *sc = ifp->if_softc;
8136 
8137 	ifp->if_timer = 0;
8138 	if (sc->sc_tx_timer > 0) {
8139 		if (--sc->sc_tx_timer == 0) {
8140 			printf("%s: device timeout\n", DEVNAME(sc));
8141 #ifdef IWM_DEBUG
8142 			iwm_nic_error(sc);
8143 #endif
8144 			if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8145 				task_add(systq, &sc->init_task);
8146 			ifp->if_oerrors++;
8147 			return;
8148 		}
8149 		ifp->if_timer = 1;
8150 	}
8151 
8152 	ieee80211_watchdog(ifp);
8153 }
8154 
8155 int
8156 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
8157 {
8158 	struct iwm_softc *sc = ifp->if_softc;
8159 	int s, err = 0, generation = sc->sc_generation;
8160 
8161 	/*
8162 	 * Prevent processes from entering this function while another
8163 	 * process is tsleep'ing in it.
8164 	 */
8165 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
8166 	if (err == 0 && generation != sc->sc_generation) {
8167 		rw_exit(&sc->ioctl_rwl);
8168 		return ENXIO;
8169 	}
8170 	if (err)
8171 		return err;
8172 	s = splnet();
8173 
8174 	switch (cmd) {
8175 	case SIOCSIFADDR:
8176 		ifp->if_flags |= IFF_UP;
8177 		/* FALLTHROUGH */
8178 	case SIOCSIFFLAGS:
8179 		if (ifp->if_flags & IFF_UP) {
8180 			if (!(ifp->if_flags & IFF_RUNNING)) {
8181 				err = iwm_init(ifp);
8182 			}
8183 		} else {
8184 			if (ifp->if_flags & IFF_RUNNING)
8185 				iwm_stop(ifp);
8186 		}
8187 		break;
8188 
8189 	default:
8190 		err = ieee80211_ioctl(ifp, cmd, data);
8191 	}
8192 
8193 	if (err == ENETRESET) {
8194 		err = 0;
8195 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8196 		    (IFF_UP | IFF_RUNNING)) {
8197 			iwm_stop(ifp);
8198 			err = iwm_init(ifp);
8199 		}
8200 	}
8201 
8202 	splx(s);
8203 	rw_exit(&sc->ioctl_rwl);
8204 
8205 	return err;
8206 }
8207 
8208 #ifdef IWM_DEBUG
8209 /*
8210  * Note: This structure is read from the device with IO accesses,
8211  * and the reading already does the endian conversion. As it is
8212  * read with uint32_t-sized accesses, any members with a different size
8213  * need to be ordered correctly though!
8214  */
8215 struct iwm_error_event_table {
8216 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8217 	uint32_t error_id;		/* type of error */
8218 	uint32_t trm_hw_status0;	/* TRM HW status */
8219 	uint32_t trm_hw_status1;	/* TRM HW status */
8220 	uint32_t blink2;		/* branch link */
8221 	uint32_t ilink1;		/* interrupt link */
8222 	uint32_t ilink2;		/* interrupt link */
8223 	uint32_t data1;		/* error-specific data */
8224 	uint32_t data2;		/* error-specific data */
8225 	uint32_t data3;		/* error-specific data */
8226 	uint32_t bcon_time;		/* beacon timer */
8227 	uint32_t tsf_low;		/* network timestamp function timer */
8228 	uint32_t tsf_hi;		/* network timestamp function timer */
8229 	uint32_t gp1;		/* GP1 timer register */
8230 	uint32_t gp2;		/* GP2 timer register */
8231 	uint32_t fw_rev_type;	/* firmware revision type */
8232 	uint32_t major;		/* uCode version major */
8233 	uint32_t minor;		/* uCode version minor */
8234 	uint32_t hw_ver;		/* HW Silicon version */
8235 	uint32_t brd_ver;		/* HW board version */
8236 	uint32_t log_pc;		/* log program counter */
8237 	uint32_t frame_ptr;		/* frame pointer */
8238 	uint32_t stack_ptr;		/* stack pointer */
8239 	uint32_t hcmd;		/* last host command header */
8240 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
8241 				 * rxtx_flag */
8242 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
8243 				 * host_flag */
8244 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
8245 				 * enc_flag */
8246 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
8247 				 * time_flag */
8248 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
8249 				 * wico interrupt */
8250 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
8251 	uint32_t wait_event;		/* wait event() caller address */
8252 	uint32_t l2p_control;	/* L2pControlField */
8253 	uint32_t l2p_duration;	/* L2pDurationField */
8254 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
8255 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
8256 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
8257 				 * (LMPM_PMG_SEL) */
8258 	uint32_t u_timestamp;	/* indicate when the date and time of the
8259 				 * compilation */
8260 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
8261 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8262 
8263 /*
8264  * UMAC error struct - relevant starting from family 8000 chip.
8265  * Note: This structure is read from the device with IO accesses,
8266  * and the reading already does the endian conversion. As it is
8267  * read with u32-sized accesses, any members with a different size
8268  * need to be ordered correctly though!
8269  */
8270 struct iwm_umac_error_event_table {
8271 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8272 	uint32_t error_id;	/* type of error */
8273 	uint32_t blink1;	/* branch link */
8274 	uint32_t blink2;	/* branch link */
8275 	uint32_t ilink1;	/* interrupt link */
8276 	uint32_t ilink2;	/* interrupt link */
8277 	uint32_t data1;		/* error-specific data */
8278 	uint32_t data2;		/* error-specific data */
8279 	uint32_t data3;		/* error-specific data */
8280 	uint32_t umac_major;
8281 	uint32_t umac_minor;
8282 	uint32_t frame_pointer;	/* core register 27*/
8283 	uint32_t stack_pointer;	/* core register 28 */
8284 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
8285 	uint32_t nic_isr_pref;	/* ISR status register */
8286 } __packed;
8287 
8288 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
8289 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
8290 
8291 void
8292 iwm_nic_umac_error(struct iwm_softc *sc)
8293 {
8294 	struct iwm_umac_error_event_table table;
8295 	uint32_t base;
8296 
8297 	base = sc->sc_uc.uc_umac_error_event_table;
8298 
8299 	if (base < 0x800000) {
8300 		printf("%s: Invalid error log pointer 0x%08x\n",
8301 		    DEVNAME(sc), base);
8302 		return;
8303 	}
8304 
8305 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8306 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8307 		return;
8308 	}
8309 
8310 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8311 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8312 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8313 			sc->sc_flags, table.valid);
8314 	}
8315 
8316 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8317 		iwm_desc_lookup(table.error_id));
8318 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8319 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8320 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8321 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8322 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8323 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8324 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8325 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8326 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8327 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8328 	    table.frame_pointer);
8329 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8330 	    table.stack_pointer);
8331 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8332 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8333 	    table.nic_isr_pref);
8334 }
8335 
8336 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
8337 static struct {
8338 	const char *name;
8339 	uint8_t num;
8340 } advanced_lookup[] = {
8341 	{ "NMI_INTERRUPT_WDG", 0x34 },
8342 	{ "SYSASSERT", 0x35 },
8343 	{ "UCODE_VERSION_MISMATCH", 0x37 },
8344 	{ "BAD_COMMAND", 0x38 },
8345 	{ "BAD_COMMAND", 0x39 },
8346 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8347 	{ "FATAL_ERROR", 0x3D },
8348 	{ "NMI_TRM_HW_ERR", 0x46 },
8349 	{ "NMI_INTERRUPT_TRM", 0x4C },
8350 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8351 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8352 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8353 	{ "NMI_INTERRUPT_HOST", 0x66 },
8354 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8355 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8356 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8357 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
8358 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
8359 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8360 	{ "ADVANCED_SYSASSERT", 0 },
8361 };
8362 
8363 const char *
8364 iwm_desc_lookup(uint32_t num)
8365 {
8366 	int i;
8367 
8368 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8369 		if (advanced_lookup[i].num ==
8370 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
8371 			return advanced_lookup[i].name;
8372 
8373 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8374 	return advanced_lookup[i].name;
8375 }
8376 
8377 /*
8378  * Support for dumping the error log seemed like a good idea ...
8379  * but it's mostly hex junk and the only sensible thing is the
8380  * hw/ucode revision (which we know anyway).  Since it's here,
8381  * I'll just leave it in, just in case e.g. the Intel guys want to
8382  * help us decipher some "ADVANCED_SYSASSERT" later.
8383  */
8384 void
8385 iwm_nic_error(struct iwm_softc *sc)
8386 {
8387 	struct iwm_error_event_table table;
8388 	uint32_t base;
8389 
8390 	printf("%s: dumping device error log\n", DEVNAME(sc));
8391 	base = sc->sc_uc.uc_error_event_table;
8392 	if (base < 0x800000) {
8393 		printf("%s: Invalid error log pointer 0x%08x\n",
8394 		    DEVNAME(sc), base);
8395 		return;
8396 	}
8397 
8398 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8399 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8400 		return;
8401 	}
8402 
8403 	if (!table.valid) {
8404 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8405 		return;
8406 	}
8407 
8408 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8409 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8410 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8411 		    sc->sc_flags, table.valid);
8412 	}
8413 
8414 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8415 	    iwm_desc_lookup(table.error_id));
8416 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8417 	    table.trm_hw_status0);
8418 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8419 	    table.trm_hw_status1);
8420 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8421 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8422 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8423 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8424 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8425 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8426 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8427 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8428 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8429 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8430 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8431 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8432 	    table.fw_rev_type);
8433 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8434 	    table.major);
8435 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8436 	    table.minor);
8437 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8438 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8439 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8440 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8441 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8442 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8443 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8444 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8445 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8446 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8447 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8448 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8449 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8450 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8451 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8452 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8453 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8454 
8455 	if (sc->sc_uc.uc_umac_error_event_table)
8456 		iwm_nic_umac_error(sc);
8457 }
8458 #endif
8459 
8460 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
8461 do {									\
8462 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8463 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
8464 	_var_ = (void *)((_pkt_)+1);					\
8465 } while (/*CONSTCOND*/0)
8466 
8467 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
8468 do {									\
8469 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8470 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
8471 	_ptr_ = (void *)((_pkt_)+1);					\
8472 } while (/*CONSTCOND*/0)
8473 
8474 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
8475 
8476 int
8477 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
8478 {
8479 	int qid, idx, code;
8480 
8481 	qid = pkt->hdr.qid & ~0x80;
8482 	idx = pkt->hdr.idx;
8483 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8484 
8485 	return (!(qid == 0 && idx == 0 && code == 0) &&
8486 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
8487 }
8488 
8489 void
8490 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
8491 {
8492 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8493 	struct iwm_rx_packet *pkt, *nextpkt;
8494 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8495 	struct mbuf *m0, *m;
8496 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8497 	int qid, idx, code, handled = 1;
8498 
8499 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
8500 	    BUS_DMASYNC_POSTREAD);
8501 
8502 	m0 = data->m;
8503 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
8504 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
8505 		qid = pkt->hdr.qid;
8506 		idx = pkt->hdr.idx;
8507 
8508 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8509 
8510 		if (!iwm_rx_pkt_valid(pkt))
8511 			break;
8512 
8513 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
8514 		if (len < sizeof(pkt->hdr) ||
8515 		    len > (IWM_RBUF_SIZE - offset - minsz))
8516 			break;
8517 
8518 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8519 			/* Take mbuf m0 off the RX ring. */
8520 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
8521 				ifp->if_ierrors++;
8522 				break;
8523 			}
8524 			KASSERT(data->m != m0);
8525 		}
8526 
8527 		switch (code) {
8528 		case IWM_REPLY_RX_PHY_CMD:
8529 			iwm_rx_rx_phy_cmd(sc, pkt, data);
8530 			break;
8531 
8532 		case IWM_REPLY_RX_MPDU_CMD: {
8533 			size_t maxlen = IWM_RBUF_SIZE - offset - minsz;
8534 			nextoff = offset +
8535 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
8536 			nextpkt = (struct iwm_rx_packet *)
8537 			    (m0->m_data + nextoff);
8538 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
8539 			    !iwm_rx_pkt_valid(nextpkt)) {
8540 				/* No need to copy last frame in buffer. */
8541 				if (offset > 0)
8542 					m_adj(m0, offset);
8543 				if (sc->sc_mqrx_supported)
8544 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
8545 					    maxlen, ml);
8546 				else
8547 					iwm_rx_mpdu(sc, m0, pkt->data,
8548 					    maxlen, ml);
8549 				m0 = NULL; /* stack owns m0 now; abort loop */
8550 			} else {
8551 				/*
8552 				 * Create an mbuf which points to the current
8553 				 * packet. Always copy from offset zero to
8554 				 * preserve m_pkthdr.
8555 				 */
8556 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
8557 				if (m == NULL) {
8558 					ifp->if_ierrors++;
8559 					m_freem(m0);
8560 					m0 = NULL;
8561 					break;
8562 				}
8563 				m_adj(m, offset);
8564 				if (sc->sc_mqrx_supported)
8565 					iwm_rx_mpdu_mq(sc, m, pkt->data,
8566 					    maxlen, ml);
8567 				else
8568 					iwm_rx_mpdu(sc, m, pkt->data,
8569 					    maxlen, ml);
8570 			}
8571  			break;
8572 		}
8573 
8574 		case IWM_TX_CMD:
8575 			iwm_rx_tx_cmd(sc, pkt, data);
8576 			break;
8577 
8578 		case IWM_MISSED_BEACONS_NOTIFICATION:
8579 			iwm_rx_bmiss(sc, pkt, data);
8580 			break;
8581 
8582 		case IWM_MFUART_LOAD_NOTIFICATION:
8583 			break;
8584 
8585 		case IWM_ALIVE: {
8586 			struct iwm_alive_resp_v1 *resp1;
8587 			struct iwm_alive_resp_v2 *resp2;
8588 			struct iwm_alive_resp_v3 *resp3;
8589 
8590 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
8591 				SYNC_RESP_STRUCT(resp1, pkt);
8592 				sc->sc_uc.uc_error_event_table
8593 				    = le32toh(resp1->error_event_table_ptr);
8594 				sc->sc_uc.uc_log_event_table
8595 				    = le32toh(resp1->log_event_table_ptr);
8596 				sc->sched_base = le32toh(resp1->scd_base_ptr);
8597 				if (resp1->status == IWM_ALIVE_STATUS_OK)
8598 					sc->sc_uc.uc_ok = 1;
8599 				else
8600 					sc->sc_uc.uc_ok = 0;
8601 			}
8602 
8603 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
8604 				SYNC_RESP_STRUCT(resp2, pkt);
8605 				sc->sc_uc.uc_error_event_table
8606 				    = le32toh(resp2->error_event_table_ptr);
8607 				sc->sc_uc.uc_log_event_table
8608 				    = le32toh(resp2->log_event_table_ptr);
8609 				sc->sched_base = le32toh(resp2->scd_base_ptr);
8610 				sc->sc_uc.uc_umac_error_event_table
8611 				    = le32toh(resp2->error_info_addr);
8612 				if (resp2->status == IWM_ALIVE_STATUS_OK)
8613 					sc->sc_uc.uc_ok = 1;
8614 				else
8615 					sc->sc_uc.uc_ok = 0;
8616 			}
8617 
8618 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
8619 				SYNC_RESP_STRUCT(resp3, pkt);
8620 				sc->sc_uc.uc_error_event_table
8621 				    = le32toh(resp3->error_event_table_ptr);
8622 				sc->sc_uc.uc_log_event_table
8623 				    = le32toh(resp3->log_event_table_ptr);
8624 				sc->sched_base = le32toh(resp3->scd_base_ptr);
8625 				sc->sc_uc.uc_umac_error_event_table
8626 				    = le32toh(resp3->error_info_addr);
8627 				if (resp3->status == IWM_ALIVE_STATUS_OK)
8628 					sc->sc_uc.uc_ok = 1;
8629 				else
8630 					sc->sc_uc.uc_ok = 0;
8631 			}
8632 
8633 			sc->sc_uc.uc_intr = 1;
8634 			wakeup(&sc->sc_uc);
8635 			break;
8636 		}
8637 
8638 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
8639 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
8640 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
8641 			iwm_phy_db_set_section(sc, phy_db_notif);
8642 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
8643 			wakeup(&sc->sc_init_complete);
8644 			break;
8645 		}
8646 
8647 		case IWM_STATISTICS_NOTIFICATION: {
8648 			struct iwm_notif_statistics *stats;
8649 			SYNC_RESP_STRUCT(stats, pkt);
8650 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
8651 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
8652 			break;
8653 		}
8654 
8655 		case IWM_MCC_CHUB_UPDATE_CMD: {
8656 			struct iwm_mcc_chub_notif *notif;
8657 			SYNC_RESP_STRUCT(notif, pkt);
8658 
8659 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
8660 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
8661 			sc->sc_fw_mcc[2] = '\0';
8662 		}
8663 
8664 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
8665 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
8666 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
8667 			break;
8668 
8669 		case IWM_ADD_STA_KEY:
8670 		case IWM_PHY_CONFIGURATION_CMD:
8671 		case IWM_TX_ANT_CONFIGURATION_CMD:
8672 		case IWM_ADD_STA:
8673 		case IWM_MAC_CONTEXT_CMD:
8674 		case IWM_REPLY_SF_CFG_CMD:
8675 		case IWM_POWER_TABLE_CMD:
8676 		case IWM_LTR_CONFIG:
8677 		case IWM_PHY_CONTEXT_CMD:
8678 		case IWM_BINDING_CONTEXT_CMD:
8679 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
8680 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
8681 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
8682 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
8683 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
8684 		case IWM_REPLY_BEACON_FILTERING_CMD:
8685 		case IWM_MAC_PM_POWER_TABLE:
8686 		case IWM_TIME_QUOTA_CMD:
8687 		case IWM_REMOVE_STA:
8688 		case IWM_TXPATH_FLUSH:
8689 		case IWM_LQ_CMD:
8690 		case IWM_WIDE_ID(IWM_LONG_GROUP,
8691 				 IWM_FW_PAGING_BLOCK_CMD):
8692 		case IWM_BT_CONFIG:
8693 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
8694 		case IWM_NVM_ACCESS_CMD:
8695 		case IWM_MCC_UPDATE_CMD:
8696 		case IWM_TIME_EVENT_CMD: {
8697 			size_t pkt_len;
8698 
8699 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
8700 				break;
8701 
8702 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
8703 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8704 
8705 			pkt_len = sizeof(pkt->len_n_flags) +
8706 			    iwm_rx_packet_len(pkt);
8707 
8708 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
8709 			    pkt_len < sizeof(*pkt) ||
8710 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
8711 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
8712 				    sc->sc_cmd_resp_len[idx]);
8713 				sc->sc_cmd_resp_pkt[idx] = NULL;
8714 				break;
8715 			}
8716 
8717 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
8718 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8719 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
8720 			break;
8721 		}
8722 
8723 		/* ignore */
8724 		case IWM_PHY_DB_CMD:
8725 			break;
8726 
8727 		case IWM_INIT_COMPLETE_NOTIF:
8728 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
8729 			wakeup(&sc->sc_init_complete);
8730 			break;
8731 
8732 		case IWM_SCAN_OFFLOAD_COMPLETE: {
8733 			struct iwm_periodic_scan_complete *notif;
8734 			SYNC_RESP_STRUCT(notif, pkt);
8735 			break;
8736 		}
8737 
8738 		case IWM_SCAN_ITERATION_COMPLETE: {
8739 			struct iwm_lmac_scan_complete_notif *notif;
8740 			SYNC_RESP_STRUCT(notif, pkt);
8741 			iwm_endscan(sc);
8742 			break;
8743 		}
8744 
8745 		case IWM_SCAN_COMPLETE_UMAC: {
8746 			struct iwm_umac_scan_complete *notif;
8747 			SYNC_RESP_STRUCT(notif, pkt);
8748 			iwm_endscan(sc);
8749 			break;
8750 		}
8751 
8752 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
8753 			struct iwm_umac_scan_iter_complete_notif *notif;
8754 			SYNC_RESP_STRUCT(notif, pkt);
8755 			iwm_endscan(sc);
8756 			break;
8757 		}
8758 
8759 		case IWM_REPLY_ERROR: {
8760 			struct iwm_error_resp *resp;
8761 			SYNC_RESP_STRUCT(resp, pkt);
8762 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
8763 				DEVNAME(sc), le32toh(resp->error_type),
8764 				resp->cmd_id);
8765 			break;
8766 		}
8767 
8768 		case IWM_TIME_EVENT_NOTIFICATION: {
8769 			struct iwm_time_event_notif *notif;
8770 			uint32_t action;
8771 			SYNC_RESP_STRUCT(notif, pkt);
8772 
8773 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
8774 				break;
8775 			action = le32toh(notif->action);
8776 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
8777 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
8778 			break;
8779 		}
8780 
8781 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
8782 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
8783 		    break;
8784 
8785 		/*
8786 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
8787 		 * messages. Just ignore them for now.
8788 		 */
8789 		case IWM_DEBUG_LOG_MSG:
8790 			break;
8791 
8792 		case IWM_MCAST_FILTER_CMD:
8793 			break;
8794 
8795 		case IWM_SCD_QUEUE_CFG: {
8796 			struct iwm_scd_txq_cfg_rsp *rsp;
8797 			SYNC_RESP_STRUCT(rsp, pkt);
8798 
8799 			break;
8800 		}
8801 
8802 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
8803 			break;
8804 
8805 		default:
8806 			handled = 0;
8807 			printf("%s: unhandled firmware response 0x%x/0x%x "
8808 			    "rx ring %d[%d]\n",
8809 			    DEVNAME(sc), code, pkt->len_n_flags,
8810 			    (qid & ~0x80), idx);
8811 			break;
8812 		}
8813 
8814 		/*
8815 		 * uCode sets bit 0x80 when it originates the notification,
8816 		 * i.e. when the notification is not a direct response to a
8817 		 * command sent by the driver.
8818 		 * For example, uCode issues IWM_REPLY_RX when it sends a
8819 		 * received frame to the driver.
8820 		 */
8821 		if (handled && !(qid & (1 << 7))) {
8822 			iwm_cmd_done(sc, qid, idx, code);
8823 		}
8824 
8825 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
8826 	}
8827 
8828 	if (m0 && m0 != data->m)
8829 		m_freem(m0);
8830 }
8831 
8832 void
8833 iwm_notif_intr(struct iwm_softc *sc)
8834 {
8835 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
8836 	uint32_t wreg;
8837 	uint16_t hw;
8838 	int count;
8839 
8840 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
8841 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
8842 
8843 	if (sc->sc_mqrx_supported) {
8844 		count = IWM_RX_MQ_RING_COUNT;
8845 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
8846 	} else {
8847 		count = IWM_RX_RING_COUNT;
8848 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
8849 	}
8850 
8851 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
8852 	hw &= (count - 1);
8853 	while (sc->rxq.cur != hw) {
8854 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
8855 		iwm_rx_pkt(sc, data, &ml);
8856 		ADVANCE_RXQ(sc);
8857 	}
8858 	if_input(&sc->sc_ic.ic_if, &ml);
8859 
8860 	/*
8861 	 * Tell the firmware what we have processed.
8862 	 * Seems like the hardware gets upset unless we align the write by 8??
8863 	 */
8864 	hw = (hw == 0) ? count - 1 : hw - 1;
8865 	IWM_WRITE(sc, wreg, hw & ~7);
8866 }
8867 
8868 int
8869 iwm_intr(void *arg)
8870 {
8871 	struct iwm_softc *sc = arg;
8872 	int handled = 0;
8873 	int rv = 0;
8874 	uint32_t r1, r2;
8875 
8876 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
8877 
8878 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
8879 		uint32_t *ict = sc->ict_dma.vaddr;
8880 		int tmp;
8881 
8882 		tmp = htole32(ict[sc->ict_cur]);
8883 		if (!tmp)
8884 			goto out_ena;
8885 
8886 		/*
8887 		 * ok, there was something.  keep plowing until we have all.
8888 		 */
8889 		r1 = r2 = 0;
8890 		while (tmp) {
8891 			r1 |= tmp;
8892 			ict[sc->ict_cur] = 0;
8893 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
8894 			tmp = htole32(ict[sc->ict_cur]);
8895 		}
8896 
8897 		/* this is where the fun begins.  don't ask */
8898 		if (r1 == 0xffffffff)
8899 			r1 = 0;
8900 
8901 		/*
8902 		 * Workaround for hardware bug where bits are falsely cleared
8903 		 * when using interrupt coalescing.  Bit 15 should be set if
8904 		 * bits 18 and 19 are set.
8905 		 */
8906 		if (r1 & 0xc0000)
8907 			r1 |= 0x8000;
8908 
8909 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
8910 	} else {
8911 		r1 = IWM_READ(sc, IWM_CSR_INT);
8912 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
8913 	}
8914 	if (r1 == 0 && r2 == 0) {
8915 		goto out_ena;
8916 	}
8917 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
8918 		goto out;
8919 
8920 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
8921 
8922 	/* ignored */
8923 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
8924 
8925 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
8926 		handled |= IWM_CSR_INT_BIT_RF_KILL;
8927 		iwm_check_rfkill(sc);
8928 		task_add(systq, &sc->init_task);
8929 		rv = 1;
8930 		goto out_ena;
8931 	}
8932 
8933 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
8934 #ifdef IWM_DEBUG
8935 		int i;
8936 
8937 		iwm_nic_error(sc);
8938 
8939 		/* Dump driver status (TX and RX rings) while we're here. */
8940 		DPRINTF(("driver status:\n"));
8941 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
8942 			struct iwm_tx_ring *ring = &sc->txq[i];
8943 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
8944 			    "queued=%-3d\n",
8945 			    i, ring->qid, ring->cur, ring->queued));
8946 		}
8947 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
8948 		DPRINTF(("  802.11 state %s\n",
8949 		    ieee80211_state_name[sc->sc_ic.ic_state]));
8950 #endif
8951 
8952 		printf("%s: fatal firmware error\n", DEVNAME(sc));
8953 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8954 			task_add(systq, &sc->init_task);
8955 		rv = 1;
8956 		goto out;
8957 
8958 	}
8959 
8960 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
8961 		handled |= IWM_CSR_INT_BIT_HW_ERR;
8962 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
8963 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
8964 			sc->sc_flags |= IWM_FLAG_HW_ERR;
8965 			task_add(systq, &sc->init_task);
8966 		}
8967 		rv = 1;
8968 		goto out;
8969 	}
8970 
8971 	/* firmware chunk loaded */
8972 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
8973 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
8974 		handled |= IWM_CSR_INT_BIT_FH_TX;
8975 
8976 		sc->sc_fw_chunk_done = 1;
8977 		wakeup(&sc->sc_fw);
8978 	}
8979 
8980 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
8981 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
8982 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
8983 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
8984 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
8985 		}
8986 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
8987 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
8988 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
8989 		}
8990 
8991 		/* Disable periodic interrupt; we use it as just a one-shot. */
8992 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
8993 
8994 		/*
8995 		 * Enable periodic interrupt in 8 msec only if we received
8996 		 * real RX interrupt (instead of just periodic int), to catch
8997 		 * any dangling Rx interrupt.  If it was just the periodic
8998 		 * interrupt, there was no dangling Rx activity, and no need
8999 		 * to extend the periodic interrupt; one-shot is enough.
9000 		 */
9001 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
9002 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
9003 			    IWM_CSR_INT_PERIODIC_ENA);
9004 
9005 		iwm_notif_intr(sc);
9006 	}
9007 
9008 	rv = 1;
9009 
9010  out_ena:
9011 	iwm_restore_interrupts(sc);
9012  out:
9013 	return rv;
9014 }
9015 
9016 int
9017 iwm_intr_msix(void *arg)
9018 {
9019 	struct iwm_softc *sc = arg;
9020 	uint32_t inta_fh, inta_hw;
9021 	int vector = 0;
9022 
9023 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
9024 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
9025 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9026 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9027 	inta_fh &= sc->sc_fh_mask;
9028 	inta_hw &= sc->sc_hw_mask;
9029 
9030 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
9031 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
9032 		iwm_notif_intr(sc);
9033 	}
9034 
9035 	/* firmware chunk loaded */
9036 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9037 		sc->sc_fw_chunk_done = 1;
9038 		wakeup(&sc->sc_fw);
9039 	}
9040 
9041 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
9042 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9043 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9044 #ifdef IWM_DEBUG
9045 		int i;
9046 
9047 		iwm_nic_error(sc);
9048 
9049 		/* Dump driver status (TX and RX rings) while we're here. */
9050 		DPRINTF(("driver status:\n"));
9051 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
9052 			struct iwm_tx_ring *ring = &sc->txq[i];
9053 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
9054 			    "queued=%-3d\n",
9055 			    i, ring->qid, ring->cur, ring->queued));
9056 		}
9057 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
9058 		DPRINTF(("  802.11 state %s\n",
9059 		    ieee80211_state_name[sc->sc_ic.ic_state]));
9060 #endif
9061 
9062 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9063 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
9064 			task_add(systq, &sc->init_task);
9065 		return 1;
9066 	}
9067 
9068 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9069 		iwm_check_rfkill(sc);
9070 		task_add(systq, &sc->init_task);
9071 	}
9072 
9073 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9074 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9075 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9076 			sc->sc_flags |= IWM_FLAG_HW_ERR;
9077 			task_add(systq, &sc->init_task);
9078 		}
9079 		return 1;
9080 	}
9081 
9082 	/*
9083 	 * Before sending the interrupt the HW disables it to prevent
9084 	 * a nested interrupt. This is done by writing 1 to the corresponding
9085 	 * bit in the mask register. After handling the interrupt, it should be
9086 	 * re-enabled by clearing this bit. This register is defined as
9087 	 * write 1 clear (W1C) register, meaning that it's being clear
9088 	 * by writing 1 to the bit.
9089 	 */
9090 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9091 	return 1;
9092 }
9093 
9094 typedef void *iwm_match_t;
9095 
9096 static const struct pci_matchid iwm_devices[] = {
9097 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
9098 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
9099 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
9100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
9101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
9102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
9103 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
9104 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
9105 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
9106 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
9107 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
9108 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
9109 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
9110 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
9111 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
9112 };
9113 
9114 int
9115 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
9116 {
9117 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
9118 	    nitems(iwm_devices));
9119 }
9120 
9121 int
9122 iwm_preinit(struct iwm_softc *sc)
9123 {
9124 	struct ieee80211com *ic = &sc->sc_ic;
9125 	struct ifnet *ifp = IC2IFP(ic);
9126 	int err;
9127 	static int attached;
9128 
9129 	err = iwm_prepare_card_hw(sc);
9130 	if (err) {
9131 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9132 		return err;
9133 	}
9134 
9135 	if (attached) {
9136 		/* Update MAC in case the upper layers changed it. */
9137 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
9138 		    ((struct arpcom *)ifp)->ac_enaddr);
9139 		return 0;
9140 	}
9141 
9142 	err = iwm_start_hw(sc);
9143 	if (err) {
9144 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9145 		return err;
9146 	}
9147 
9148 	err = iwm_run_init_mvm_ucode(sc, 1);
9149 	iwm_stop_device(sc);
9150 	if (err)
9151 		return err;
9152 
9153 	/* Print version info and MAC address on first successful fw load. */
9154 	attached = 1;
9155 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
9156 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
9157 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9158 
9159 	if (sc->sc_nvm.sku_cap_11n_enable)
9160 		iwm_setup_ht_rates(sc);
9161 
9162 	/* not all hardware can do 5GHz band */
9163 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9164 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9165 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9166 
9167 	/* Configure channel information obtained from firmware. */
9168 	ieee80211_channel_init(ifp);
9169 
9170 	/* Configure MAC address. */
9171 	err = if_setlladdr(ifp, ic->ic_myaddr);
9172 	if (err)
9173 		printf("%s: could not set MAC address (error %d)\n",
9174 		    DEVNAME(sc), err);
9175 
9176 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
9177 
9178 	return 0;
9179 }
9180 
9181 void
9182 iwm_attach_hook(struct device *self)
9183 {
9184 	struct iwm_softc *sc = (void *)self;
9185 
9186 	KASSERT(!cold);
9187 
9188 	iwm_preinit(sc);
9189 }
9190 
9191 void
9192 iwm_attach(struct device *parent, struct device *self, void *aux)
9193 {
9194 	struct iwm_softc *sc = (void *)self;
9195 	struct pci_attach_args *pa = aux;
9196 	pci_intr_handle_t ih;
9197 	pcireg_t reg, memtype;
9198 	struct ieee80211com *ic = &sc->sc_ic;
9199 	struct ifnet *ifp = &ic->ic_if;
9200 	const char *intrstr;
9201 	int err;
9202 	int txq_i, i;
9203 
9204 	sc->sc_pct = pa->pa_pc;
9205 	sc->sc_pcitag = pa->pa_tag;
9206 	sc->sc_dmat = pa->pa_dmat;
9207 
9208 	rw_init(&sc->ioctl_rwl, "iwmioctl");
9209 
9210 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
9211 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
9212 	if (err == 0) {
9213 		printf("%s: PCIe capability structure not found!\n",
9214 		    DEVNAME(sc));
9215 		return;
9216 	}
9217 
9218 	/* Clear device-specific "PCI retry timeout" register (41h). */
9219 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9220 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9221 
9222 	/* Enable bus-mastering and hardware bug workaround. */
9223 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
9224 	reg |= PCI_COMMAND_MASTER_ENABLE;
9225 	/* if !MSI */
9226 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
9227 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
9228 	}
9229 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
9230 
9231 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
9232 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
9233 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
9234 	if (err) {
9235 		printf("%s: can't map mem space\n", DEVNAME(sc));
9236 		return;
9237 	}
9238 
9239 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
9240 		sc->sc_msix = 1;
9241 	} else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
9242 		printf("%s: can't map interrupt\n", DEVNAME(sc));
9243 		return;
9244 	}
9245 
9246 	intrstr = pci_intr_string(sc->sc_pct, ih);
9247 	if (sc->sc_msix)
9248 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9249 		    iwm_intr_msix, sc, DEVNAME(sc));
9250 	else
9251 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9252 		    iwm_intr, sc, DEVNAME(sc));
9253 
9254 	if (sc->sc_ih == NULL) {
9255 		printf("\n");
9256 		printf("%s: can't establish interrupt", DEVNAME(sc));
9257 		if (intrstr != NULL)
9258 			printf(" at %s", intrstr);
9259 		printf("\n");
9260 		return;
9261 	}
9262 	printf(", %s\n", intrstr);
9263 
9264 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
9265 	switch (PCI_PRODUCT(pa->pa_id)) {
9266 	case PCI_PRODUCT_INTEL_WL_3160_1:
9267 	case PCI_PRODUCT_INTEL_WL_3160_2:
9268 		sc->sc_fwname = "iwm-3160-17";
9269 		sc->host_interrupt_operation_mode = 1;
9270 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9271 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9272 		sc->sc_nvm_max_section_size = 16384;
9273 		sc->nvm_type = IWM_NVM;
9274 		break;
9275 	case PCI_PRODUCT_INTEL_WL_3165_1:
9276 	case PCI_PRODUCT_INTEL_WL_3165_2:
9277 		sc->sc_fwname = "iwm-7265-17";
9278 		sc->host_interrupt_operation_mode = 0;
9279 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9280 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9281 		sc->sc_nvm_max_section_size = 16384;
9282 		sc->nvm_type = IWM_NVM;
9283 		break;
9284 	case PCI_PRODUCT_INTEL_WL_3168_1:
9285 		sc->sc_fwname = "iwm-3168-29";
9286 		sc->host_interrupt_operation_mode = 0;
9287 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9288 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9289 		sc->sc_nvm_max_section_size = 16384;
9290 		sc->nvm_type = IWM_NVM_SDP;
9291 		break;
9292 	case PCI_PRODUCT_INTEL_WL_7260_1:
9293 	case PCI_PRODUCT_INTEL_WL_7260_2:
9294 		sc->sc_fwname = "iwm-7260-17";
9295 		sc->host_interrupt_operation_mode = 1;
9296 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9297 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9298 		sc->sc_nvm_max_section_size = 16384;
9299 		sc->nvm_type = IWM_NVM;
9300 		break;
9301 	case PCI_PRODUCT_INTEL_WL_7265_1:
9302 	case PCI_PRODUCT_INTEL_WL_7265_2:
9303 		sc->sc_fwname = "iwm-7265-17";
9304 		sc->host_interrupt_operation_mode = 0;
9305 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9306 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9307 		sc->sc_nvm_max_section_size = 16384;
9308 		sc->nvm_type = IWM_NVM;
9309 		break;
9310 	case PCI_PRODUCT_INTEL_WL_8260_1:
9311 	case PCI_PRODUCT_INTEL_WL_8260_2:
9312 		sc->sc_fwname = "iwm-8000C-34";
9313 		sc->host_interrupt_operation_mode = 0;
9314 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
9315 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9316 		sc->sc_nvm_max_section_size = 32768;
9317 		sc->nvm_type = IWM_NVM_EXT;
9318 		break;
9319 	case PCI_PRODUCT_INTEL_WL_8265_1:
9320 		sc->sc_fwname = "iwm-8265-34";
9321 		sc->host_interrupt_operation_mode = 0;
9322 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
9323 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9324 		sc->sc_nvm_max_section_size = 32768;
9325 		sc->nvm_type = IWM_NVM_EXT;
9326 		break;
9327 	case PCI_PRODUCT_INTEL_WL_9260_1:
9328 		sc->sc_fwname = "iwm-9260-34";
9329 		sc->host_interrupt_operation_mode = 0;
9330 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
9331 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9332 		sc->sc_nvm_max_section_size = 32768;
9333 		sc->sc_mqrx_supported = 1;
9334 		break;
9335 	case PCI_PRODUCT_INTEL_WL_9560_1:
9336 	case PCI_PRODUCT_INTEL_WL_9560_2:
9337 		sc->sc_fwname = "iwm-9000-34";
9338 		sc->host_interrupt_operation_mode = 0;
9339 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
9340 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9341 		sc->sc_nvm_max_section_size = 32768;
9342 		sc->sc_mqrx_supported = 1;
9343 		sc->sc_integrated = 1;
9344 		break;
9345 	default:
9346 		printf("%s: unknown adapter type\n", DEVNAME(sc));
9347 		return;
9348 	}
9349 
9350 	/*
9351 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
9352 	 * changed, and now the revision step also includes bit 0-1 (no more
9353 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
9354 	 * in the old format.
9355 	 */
9356 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
9357 		uint32_t hw_step;
9358 
9359 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
9360 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
9361 
9362 		if (iwm_prepare_card_hw(sc) != 0) {
9363 			printf("%s: could not initialize hardware\n",
9364 			    DEVNAME(sc));
9365 			return;
9366 		}
9367 
9368 		/*
9369 		 * In order to recognize C step the driver should read the
9370 		 * chip version id located at the AUX bus MISC address.
9371 		 */
9372 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
9373 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
9374 		DELAY(2);
9375 
9376 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
9377 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9378 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9379 				   25000);
9380 		if (!err) {
9381 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
9382 			return;
9383 		}
9384 
9385 		if (iwm_nic_lock(sc)) {
9386 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
9387 			hw_step |= IWM_ENABLE_WFPM;
9388 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
9389 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
9390 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
9391 			if (hw_step == 0x3)
9392 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
9393 						(IWM_SILICON_C_STEP << 2);
9394 			iwm_nic_unlock(sc);
9395 		} else {
9396 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
9397 			return;
9398 		}
9399 	}
9400 
9401 	/*
9402 	 * Allocate DMA memory for firmware transfers.
9403 	 * Must be aligned on a 16-byte boundary.
9404 	 */
9405 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
9406 	    sc->sc_fwdmasegsz, 16);
9407 	if (err) {
9408 		printf("%s: could not allocate memory for firmware\n",
9409 		    DEVNAME(sc));
9410 		return;
9411 	}
9412 
9413 	/* Allocate "Keep Warm" page, used internally by the card. */
9414 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
9415 	if (err) {
9416 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
9417 		goto fail1;
9418 	}
9419 
9420 	/* Allocate interrupt cause table (ICT).*/
9421 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
9422 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
9423 	if (err) {
9424 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
9425 		goto fail2;
9426 	}
9427 
9428 	/* TX scheduler rings must be aligned on a 1KB boundary. */
9429 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
9430 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
9431 	if (err) {
9432 		printf("%s: could not allocate TX scheduler rings\n",
9433 		    DEVNAME(sc));
9434 		goto fail3;
9435 	}
9436 
9437 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
9438 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
9439 		if (err) {
9440 			printf("%s: could not allocate TX ring %d\n",
9441 			    DEVNAME(sc), txq_i);
9442 			goto fail4;
9443 		}
9444 	}
9445 
9446 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
9447 	if (err) {
9448 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
9449 		goto fail4;
9450 	}
9451 
9452 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
9453 	if (sc->sc_nswq == NULL)
9454 		goto fail4;
9455 
9456 	/* Clear pending interrupts. */
9457 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
9458 
9459 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
9460 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
9461 	ic->ic_state = IEEE80211_S_INIT;
9462 
9463 	/* Set device capabilities. */
9464 	ic->ic_caps =
9465 	    IEEE80211_C_WEP |		/* WEP */
9466 	    IEEE80211_C_RSN |		/* WPA/RSN */
9467 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
9468 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
9469 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
9470 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
9471 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
9472 
9473 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
9474 	ic->ic_htcaps |=
9475 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
9476 	ic->ic_htxcaps = 0;
9477 	ic->ic_txbfcaps = 0;
9478 	ic->ic_aselcaps = 0;
9479 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
9480 
9481 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
9482 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
9483 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
9484 
9485 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
9486 		sc->sc_phyctxt[i].id = i;
9487 	}
9488 
9489 	sc->sc_amrr.amrr_min_success_threshold =  1;
9490 	sc->sc_amrr.amrr_max_success_threshold = 15;
9491 
9492 	/* IBSS channel undefined for now. */
9493 	ic->ic_ibss_chan = &ic->ic_channels[1];
9494 
9495 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
9496 
9497 	ifp->if_softc = sc;
9498 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9499 	ifp->if_ioctl = iwm_ioctl;
9500 	ifp->if_start = iwm_start;
9501 	ifp->if_watchdog = iwm_watchdog;
9502 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
9503 
9504 	if_attach(ifp);
9505 	ieee80211_ifattach(ifp);
9506 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
9507 
9508 #if NBPFILTER > 0
9509 	iwm_radiotap_attach(sc);
9510 #endif
9511 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
9512 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
9513 	task_set(&sc->init_task, iwm_init_task, sc);
9514 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
9515 	task_set(&sc->ba_task, iwm_ba_task, sc);
9516 	task_set(&sc->htprot_task, iwm_htprot_task, sc);
9517 
9518 	ic->ic_node_alloc = iwm_node_alloc;
9519 	ic->ic_bgscan_start = iwm_bgscan;
9520 	ic->ic_set_key = iwm_set_key;
9521 	ic->ic_delete_key = iwm_delete_key;
9522 
9523 	/* Override 802.11 state transition machine. */
9524 	sc->sc_newstate = ic->ic_newstate;
9525 	ic->ic_newstate = iwm_newstate;
9526 	ic->ic_update_htprot = iwm_update_htprot;
9527 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
9528 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
9529 #ifdef notyet
9530 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
9531 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
9532 #endif
9533 	/*
9534 	 * We cannot read the MAC address without loading the
9535 	 * firmware from disk. Postpone until mountroot is done.
9536 	 */
9537 	config_mountroot(self, iwm_attach_hook);
9538 
9539 	return;
9540 
9541 fail4:	while (--txq_i >= 0)
9542 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
9543 	iwm_free_rx_ring(sc, &sc->rxq);
9544 	iwm_dma_contig_free(&sc->sched_dma);
9545 fail3:	if (sc->ict_dma.vaddr != NULL)
9546 		iwm_dma_contig_free(&sc->ict_dma);
9547 
9548 fail2:	iwm_dma_contig_free(&sc->kw_dma);
9549 fail1:	iwm_dma_contig_free(&sc->fw_dma);
9550 	return;
9551 }
9552 
9553 #if NBPFILTER > 0
9554 void
9555 iwm_radiotap_attach(struct iwm_softc *sc)
9556 {
9557 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
9558 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
9559 
9560 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
9561 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
9562 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
9563 
9564 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
9565 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
9566 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
9567 }
9568 #endif
9569 
9570 void
9571 iwm_init_task(void *arg1)
9572 {
9573 	struct iwm_softc *sc = arg1;
9574 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9575 	int s = splnet();
9576 	int generation = sc->sc_generation;
9577 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
9578 
9579 	rw_enter_write(&sc->ioctl_rwl);
9580 	if (generation != sc->sc_generation) {
9581 		rw_exit(&sc->ioctl_rwl);
9582 		splx(s);
9583 		return;
9584 	}
9585 
9586 	if (ifp->if_flags & IFF_RUNNING)
9587 		iwm_stop(ifp);
9588 	else
9589 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
9590 
9591 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
9592 		iwm_init(ifp);
9593 
9594 	rw_exit(&sc->ioctl_rwl);
9595 	splx(s);
9596 }
9597 
9598 int
9599 iwm_resume(struct iwm_softc *sc)
9600 {
9601 	pcireg_t reg;
9602 
9603 	/* Clear device-specific "PCI retry timeout" register (41h). */
9604 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9605 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9606 
9607 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
9608 	iwm_conf_msix_hw(sc, 0);
9609 
9610 	iwm_enable_rfkill_int(sc);
9611 	iwm_check_rfkill(sc);
9612 
9613 	return iwm_prepare_card_hw(sc);
9614 }
9615 
9616 int
9617 iwm_activate(struct device *self, int act)
9618 {
9619 	struct iwm_softc *sc = (struct iwm_softc *)self;
9620 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9621 	int err = 0;
9622 
9623 	switch (act) {
9624 	case DVACT_QUIESCE:
9625 		if (ifp->if_flags & IFF_RUNNING) {
9626 			rw_enter_write(&sc->ioctl_rwl);
9627 			iwm_stop(ifp);
9628 			rw_exit(&sc->ioctl_rwl);
9629 		}
9630 		break;
9631 	case DVACT_RESUME:
9632 		err = iwm_resume(sc);
9633 		if (err)
9634 			printf("%s: could not initialize hardware\n",
9635 			    DEVNAME(sc));
9636 		break;
9637 	case DVACT_WAKEUP:
9638 		/* Hardware should be up at this point. */
9639 		if (iwm_set_hw_ready(sc))
9640 			task_add(systq, &sc->init_task);
9641 		break;
9642 	}
9643 
9644 	return 0;
9645 }
9646 
9647 struct cfdriver iwm_cd = {
9648 	NULL, "iwm", DV_IFNET
9649 };
9650 
9651 struct cfattach iwm_ca = {
9652 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
9653 	NULL, iwm_activate
9654 };
9655