xref: /openbsd/sys/dev/pci/if_iwm.c (revision 3cab2bb3)
1 /*	$OpenBSD: if_iwm.c,v 1.313 2020/07/10 13:22:20 patrick Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 #include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/if_ether.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_mira.h>
146 #include <net80211/ieee80211_radiotap.h>
147 
148 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
149 
150 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
151 
152 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
153 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
154 
155 #ifdef IWM_DEBUG
156 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
157 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
158 int iwm_debug = 1;
159 #else
160 #define DPRINTF(x)	do { ; } while (0)
161 #define DPRINTFN(n, x)	do { ; } while (0)
162 #endif
163 
164 #include <dev/pci/if_iwmreg.h>
165 #include <dev/pci/if_iwmvar.h>
166 
167 const uint8_t iwm_nvm_channels[] = {
168 	/* 2.4 GHz */
169 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
170 	/* 5 GHz */
171 	36, 40, 44 , 48, 52, 56, 60, 64,
172 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
173 	149, 153, 157, 161, 165
174 };
175 
176 const uint8_t iwm_nvm_channels_8000[] = {
177 	/* 2.4 GHz */
178 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
179 	/* 5 GHz */
180 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
181 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
182 	149, 153, 157, 161, 165, 169, 173, 177, 181
183 };
184 
185 #define IWM_NUM_2GHZ_CHANNELS	14
186 
187 const struct iwm_rate {
188 	uint16_t rate;
189 	uint8_t plcp;
190 	uint8_t ht_plcp;
191 } iwm_rates[] = {
192 		/* Legacy */		/* HT */
193 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
194 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
196 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
198 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
199 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
200 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
201 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
202 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
203 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
204 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
205 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
206 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
207 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
208 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
209 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
210 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
211 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
212 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
213 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
214 };
215 #define IWM_RIDX_CCK	0
216 #define IWM_RIDX_OFDM	4
217 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
218 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
219 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
220 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
221 
222 /* Convert an MCS index into an iwm_rates[] index. */
223 const int iwm_mcs2ridx[] = {
224 	IWM_RATE_MCS_0_INDEX,
225 	IWM_RATE_MCS_1_INDEX,
226 	IWM_RATE_MCS_2_INDEX,
227 	IWM_RATE_MCS_3_INDEX,
228 	IWM_RATE_MCS_4_INDEX,
229 	IWM_RATE_MCS_5_INDEX,
230 	IWM_RATE_MCS_6_INDEX,
231 	IWM_RATE_MCS_7_INDEX,
232 	IWM_RATE_MCS_8_INDEX,
233 	IWM_RATE_MCS_9_INDEX,
234 	IWM_RATE_MCS_10_INDEX,
235 	IWM_RATE_MCS_11_INDEX,
236 	IWM_RATE_MCS_12_INDEX,
237 	IWM_RATE_MCS_13_INDEX,
238 	IWM_RATE_MCS_14_INDEX,
239 	IWM_RATE_MCS_15_INDEX,
240 };
241 
242 struct iwm_nvm_section {
243 	uint16_t length;
244 	uint8_t *data;
245 };
246 
247 int	iwm_is_mimo_ht_plcp(uint8_t);
248 int	iwm_is_mimo_mcs(int);
249 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
250 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
251 	    uint8_t *, size_t);
252 int	iwm_set_default_calib(struct iwm_softc *, const void *);
253 void	iwm_fw_info_free(struct iwm_fw_info *);
254 int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
255 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
256 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
257 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
258 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
259 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
260 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
261 int	iwm_nic_lock(struct iwm_softc *);
262 void	iwm_nic_assert_locked(struct iwm_softc *);
263 void	iwm_nic_unlock(struct iwm_softc *);
264 void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
265 	    uint32_t);
266 void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
267 void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
268 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
269 	    bus_size_t);
270 void	iwm_dma_contig_free(struct iwm_dma_info *);
271 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 void	iwm_disable_rx_dma(struct iwm_softc *);
273 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
276 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 void	iwm_enable_rfkill_int(struct iwm_softc *);
279 int	iwm_check_rfkill(struct iwm_softc *);
280 void	iwm_enable_interrupts(struct iwm_softc *);
281 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
282 void	iwm_restore_interrupts(struct iwm_softc *);
283 void	iwm_disable_interrupts(struct iwm_softc *);
284 void	iwm_ict_reset(struct iwm_softc *);
285 int	iwm_set_hw_ready(struct iwm_softc *);
286 int	iwm_prepare_card_hw(struct iwm_softc *);
287 void	iwm_apm_config(struct iwm_softc *);
288 int	iwm_apm_init(struct iwm_softc *);
289 void	iwm_apm_stop(struct iwm_softc *);
290 int	iwm_allow_mcast(struct iwm_softc *);
291 void	iwm_init_msix_hw(struct iwm_softc *);
292 void	iwm_conf_msix_hw(struct iwm_softc *, int);
293 int	iwm_start_hw(struct iwm_softc *);
294 void	iwm_stop_device(struct iwm_softc *);
295 void	iwm_nic_config(struct iwm_softc *);
296 int	iwm_nic_rx_init(struct iwm_softc *);
297 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
298 int	iwm_nic_rx_mq_init(struct iwm_softc *);
299 int	iwm_nic_tx_init(struct iwm_softc *);
300 int	iwm_nic_init(struct iwm_softc *);
301 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
302 int	iwm_enable_txq(struct iwm_softc *, int, int, int);
303 int	iwm_post_alive(struct iwm_softc *);
304 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
305 	    uint16_t);
306 int	iwm_phy_db_set_section(struct iwm_softc *,
307 	    struct iwm_calib_res_notif_phy_db *);
308 int	iwm_is_valid_channel(uint16_t);
309 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
310 uint16_t iwm_channel_id_to_papd(uint16_t);
311 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
312 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
313 	    uint16_t *, uint16_t);
314 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
315 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
316 	    uint8_t);
317 int	iwm_send_phy_db_data(struct iwm_softc *);
318 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
319 	    uint32_t);
320 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
321 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
322 	    uint8_t *, uint16_t *);
323 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
324 	    uint16_t *, size_t);
325 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
326 	    const uint8_t *nvm_channels, int nchan);
327 int	iwm_mimo_enabled(struct iwm_softc *);
328 void	iwm_setup_ht_rates(struct iwm_softc *);
329 void	iwm_htprot_task(void *);
330 void	iwm_update_htprot(struct ieee80211com *, struct ieee80211_node *);
331 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
332 	    uint8_t);
333 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
334 	    uint8_t);
335 void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
336 	    uint16_t, uint16_t, int);
337 #ifdef notyet
338 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
339 	    uint8_t);
340 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
341 	    uint8_t);
342 #endif
343 void	iwm_ba_task(void *);
344 
345 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
346 	    const uint16_t *, const uint16_t *,
347 	    const uint16_t *, const uint16_t *,
348 	    const uint16_t *, int);
349 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
350 	    const uint16_t *, const uint16_t *);
351 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
352 int	iwm_nvm_init(struct iwm_softc *);
353 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
354 	    uint32_t);
355 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
356 	    uint32_t);
357 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
358 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
359 	    int , int *);
360 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
361 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
362 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
363 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
364 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
365 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
366 int	iwm_send_dqa_cmd(struct iwm_softc *);
367 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
368 int	iwm_config_ltr(struct iwm_softc *);
369 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
370 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
371 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
372 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
373 	    struct iwm_rx_data *);
374 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
375 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
376 	    struct ieee80211_node *);
377 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
378 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
379 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
380 	    struct iwm_node *, int, int);
381 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
382 	    struct iwm_rx_data *);
383 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
384 	    struct iwm_rx_data *);
385 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
386 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
387 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
388 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
389 	    struct ieee80211_channel *, uint8_t, uint8_t);
390 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
391 	    uint8_t, uint32_t, uint32_t);
392 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
393 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
394 	    const void *);
395 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
396 	    uint32_t *);
397 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
398 	    const void *, uint32_t *);
399 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
400 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
401 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
402 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
403 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
404 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
405 int	iwm_flush_tx_path(struct iwm_softc *, int);
406 void	iwm_led_enable(struct iwm_softc *);
407 void	iwm_led_disable(struct iwm_softc *);
408 int	iwm_led_is_enabled(struct iwm_softc *);
409 void	iwm_led_blink_timeout(void *);
410 void	iwm_led_blink_start(struct iwm_softc *);
411 void	iwm_led_blink_stop(struct iwm_softc *);
412 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
413 	    struct iwm_beacon_filter_cmd *);
414 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
415 	    struct iwm_beacon_filter_cmd *);
416 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
417 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
418 	    struct iwm_mac_power_cmd *);
419 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
420 int	iwm_power_update_device(struct iwm_softc *);
421 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
422 int	iwm_disable_beacon_filter(struct iwm_softc *);
423 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
424 int	iwm_add_aux_sta(struct iwm_softc *);
425 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
426 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
427 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
428 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
429 	    struct iwm_scan_channel_cfg_lmac *, int, int);
430 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
431 int	iwm_lmac_scan(struct iwm_softc *, int);
432 int	iwm_config_umac_scan(struct iwm_softc *);
433 int	iwm_umac_scan(struct iwm_softc *, int);
434 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
435 int	iwm_rval2ridx(int);
436 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
437 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
438 	    struct iwm_mac_ctx_cmd *, uint32_t);
439 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
440 	    struct iwm_mac_data_sta *, int);
441 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
442 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
443 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
444 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
445 int	iwm_scan(struct iwm_softc *);
446 int	iwm_bgscan(struct ieee80211com *);
447 int	iwm_umac_scan_abort(struct iwm_softc *);
448 int	iwm_lmac_scan_abort(struct iwm_softc *);
449 int	iwm_scan_abort(struct iwm_softc *);
450 int	iwm_auth(struct iwm_softc *);
451 int	iwm_deauth(struct iwm_softc *);
452 int	iwm_assoc(struct iwm_softc *);
453 int	iwm_disassoc(struct iwm_softc *);
454 int	iwm_run(struct iwm_softc *);
455 int	iwm_run_stop(struct iwm_softc *);
456 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
457 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
458 	    struct ieee80211_key *);
459 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
460 	    struct ieee80211_key *);
461 void	iwm_delete_key_v1(struct ieee80211com *,
462 	    struct ieee80211_node *, struct ieee80211_key *);
463 void	iwm_delete_key(struct ieee80211com *,
464 	    struct ieee80211_node *, struct ieee80211_key *);
465 void	iwm_calib_timeout(void *);
466 void	iwm_setrates(struct iwm_node *, int);
467 int	iwm_media_change(struct ifnet *);
468 void	iwm_newstate_task(void *);
469 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
470 void	iwm_endscan(struct iwm_softc *);
471 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
472 	    struct ieee80211_node *);
473 int	iwm_sf_config(struct iwm_softc *, int);
474 int	iwm_send_bt_init_conf(struct iwm_softc *);
475 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
476 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
477 void	iwm_free_fw_paging(struct iwm_softc *);
478 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
479 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
480 int	iwm_init_hw(struct iwm_softc *);
481 int	iwm_init(struct ifnet *);
482 void	iwm_start(struct ifnet *);
483 void	iwm_stop(struct ifnet *);
484 void	iwm_watchdog(struct ifnet *);
485 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
486 #ifdef IWM_DEBUG
487 const char *iwm_desc_lookup(uint32_t);
488 void	iwm_nic_error(struct iwm_softc *);
489 void	iwm_nic_umac_error(struct iwm_softc *);
490 #endif
491 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
492 	    struct mbuf_list *);
493 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
494 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
495 	    struct mbuf_list *);
496 void	iwm_notif_intr(struct iwm_softc *);
497 int	iwm_intr(void *);
498 int	iwm_intr_msix(void *);
499 int	iwm_match(struct device *, void *, void *);
500 int	iwm_preinit(struct iwm_softc *);
501 void	iwm_attach_hook(struct device *);
502 void	iwm_attach(struct device *, struct device *, void *);
503 void	iwm_init_task(void *);
504 int	iwm_activate(struct device *, int);
505 int	iwm_resume(struct iwm_softc *);
506 
507 #if NBPFILTER > 0
508 void	iwm_radiotap_attach(struct iwm_softc *);
509 #endif
510 
511 int
512 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
513 {
514 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
515 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
516 }
517 
518 int
519 iwm_is_mimo_mcs(int mcs)
520 {
521 	int ridx = iwm_mcs2ridx[mcs];
522 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
523 
524 }
525 
526 int
527 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
528 {
529 	struct iwm_fw_cscheme_list *l = (void *)data;
530 
531 	if (dlen < sizeof(*l) ||
532 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
533 		return EINVAL;
534 
535 	/* we don't actually store anything for now, always use s/w crypto */
536 
537 	return 0;
538 }
539 
540 int
541 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
542     uint8_t *data, size_t dlen)
543 {
544 	struct iwm_fw_sects *fws;
545 	struct iwm_fw_onesect *fwone;
546 
547 	if (type >= IWM_UCODE_TYPE_MAX)
548 		return EINVAL;
549 	if (dlen < sizeof(uint32_t))
550 		return EINVAL;
551 
552 	fws = &sc->sc_fw.fw_sects[type];
553 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
554 		return EINVAL;
555 
556 	fwone = &fws->fw_sect[fws->fw_count];
557 
558 	/* first 32bit are device load offset */
559 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
560 
561 	/* rest is data */
562 	fwone->fws_data = data + sizeof(uint32_t);
563 	fwone->fws_len = dlen - sizeof(uint32_t);
564 
565 	fws->fw_count++;
566 	fws->fw_totlen += fwone->fws_len;
567 
568 	return 0;
569 }
570 
571 #define IWM_DEFAULT_SCAN_CHANNELS	40
572 /* Newer firmware might support more channels. Raise this value if needed. */
573 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
574 
575 struct iwm_tlv_calib_data {
576 	uint32_t ucode_type;
577 	struct iwm_tlv_calib_ctrl calib;
578 } __packed;
579 
580 int
581 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
582 {
583 	const struct iwm_tlv_calib_data *def_calib = data;
584 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
585 
586 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
587 		return EINVAL;
588 
589 	sc->sc_default_calib[ucode_type].flow_trigger =
590 	    def_calib->calib.flow_trigger;
591 	sc->sc_default_calib[ucode_type].event_trigger =
592 	    def_calib->calib.event_trigger;
593 
594 	return 0;
595 }
596 
597 void
598 iwm_fw_info_free(struct iwm_fw_info *fw)
599 {
600 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
601 	fw->fw_rawdata = NULL;
602 	fw->fw_rawsize = 0;
603 	/* don't touch fw->fw_status */
604 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
605 }
606 
607 int
608 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
609 {
610 	struct iwm_fw_info *fw = &sc->sc_fw;
611 	struct iwm_tlv_ucode_header *uhdr;
612 	struct iwm_ucode_tlv tlv;
613 	uint32_t tlv_type;
614 	uint8_t *data;
615 	uint32_t usniffer_img;
616 	uint32_t paging_mem_size;
617 	int err;
618 	size_t len;
619 
620 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
621 	    ucode_type != IWM_UCODE_TYPE_INIT)
622 		return 0;
623 
624 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
625 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
626 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
627 
628 	if (fw->fw_rawdata != NULL)
629 		iwm_fw_info_free(fw);
630 
631 	err = loadfirmware(sc->sc_fwname,
632 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
633 	if (err) {
634 		printf("%s: could not read firmware %s (error %d)\n",
635 		    DEVNAME(sc), sc->sc_fwname, err);
636 		goto out;
637 	}
638 
639 	sc->sc_capaflags = 0;
640 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
641 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
642 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
643 
644 	uhdr = (void *)fw->fw_rawdata;
645 	if (*(uint32_t *)fw->fw_rawdata != 0
646 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
647 		printf("%s: invalid firmware %s\n",
648 		    DEVNAME(sc), sc->sc_fwname);
649 		err = EINVAL;
650 		goto out;
651 	}
652 
653 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
654 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
655 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
656 	    IWM_UCODE_API(le32toh(uhdr->ver)));
657 	data = uhdr->data;
658 	len = fw->fw_rawsize - sizeof(*uhdr);
659 
660 	while (len >= sizeof(tlv)) {
661 		size_t tlv_len;
662 		void *tlv_data;
663 
664 		memcpy(&tlv, data, sizeof(tlv));
665 		tlv_len = le32toh(tlv.length);
666 		tlv_type = le32toh(tlv.type);
667 
668 		len -= sizeof(tlv);
669 		data += sizeof(tlv);
670 		tlv_data = data;
671 
672 		if (len < tlv_len) {
673 			printf("%s: firmware too short: %zu bytes\n",
674 			    DEVNAME(sc), len);
675 			err = EINVAL;
676 			goto parse_out;
677 		}
678 
679 		switch (tlv_type) {
680 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
681 			if (tlv_len < sizeof(uint32_t)) {
682 				err = EINVAL;
683 				goto parse_out;
684 			}
685 			sc->sc_capa_max_probe_len
686 			    = le32toh(*(uint32_t *)tlv_data);
687 			if (sc->sc_capa_max_probe_len >
688 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
689 				err = EINVAL;
690 				goto parse_out;
691 			}
692 			break;
693 		case IWM_UCODE_TLV_PAN:
694 			if (tlv_len) {
695 				err = EINVAL;
696 				goto parse_out;
697 			}
698 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
699 			break;
700 		case IWM_UCODE_TLV_FLAGS:
701 			if (tlv_len < sizeof(uint32_t)) {
702 				err = EINVAL;
703 				goto parse_out;
704 			}
705 			/*
706 			 * Apparently there can be many flags, but Linux driver
707 			 * parses only the first one, and so do we.
708 			 *
709 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
710 			 * Intentional or a bug?  Observations from
711 			 * current firmware file:
712 			 *  1) TLV_PAN is parsed first
713 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
714 			 * ==> this resets TLV_PAN to itself... hnnnk
715 			 */
716 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
717 			break;
718 		case IWM_UCODE_TLV_CSCHEME:
719 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
720 			if (err)
721 				goto parse_out;
722 			break;
723 		case IWM_UCODE_TLV_NUM_OF_CPU: {
724 			uint32_t num_cpu;
725 			if (tlv_len != sizeof(uint32_t)) {
726 				err = EINVAL;
727 				goto parse_out;
728 			}
729 			num_cpu = le32toh(*(uint32_t *)tlv_data);
730 			if (num_cpu < 1 || num_cpu > 2) {
731 				err = EINVAL;
732 				goto parse_out;
733 			}
734 			break;
735 		}
736 		case IWM_UCODE_TLV_SEC_RT:
737 			err = iwm_firmware_store_section(sc,
738 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
739 			if (err)
740 				goto parse_out;
741 			break;
742 		case IWM_UCODE_TLV_SEC_INIT:
743 			err = iwm_firmware_store_section(sc,
744 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
745 			if (err)
746 				goto parse_out;
747 			break;
748 		case IWM_UCODE_TLV_SEC_WOWLAN:
749 			err = iwm_firmware_store_section(sc,
750 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
751 			if (err)
752 				goto parse_out;
753 			break;
754 		case IWM_UCODE_TLV_DEF_CALIB:
755 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
756 				err = EINVAL;
757 				goto parse_out;
758 			}
759 			err = iwm_set_default_calib(sc, tlv_data);
760 			if (err)
761 				goto parse_out;
762 			break;
763 		case IWM_UCODE_TLV_PHY_SKU:
764 			if (tlv_len != sizeof(uint32_t)) {
765 				err = EINVAL;
766 				goto parse_out;
767 			}
768 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
769 			break;
770 
771 		case IWM_UCODE_TLV_API_CHANGES_SET: {
772 			struct iwm_ucode_api *api;
773 			int idx, i;
774 			if (tlv_len != sizeof(*api)) {
775 				err = EINVAL;
776 				goto parse_out;
777 			}
778 			api = (struct iwm_ucode_api *)tlv_data;
779 			idx = le32toh(api->api_index);
780 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
781 				err = EINVAL;
782 				goto parse_out;
783 			}
784 			for (i = 0; i < 32; i++) {
785 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
786 					continue;
787 				setbit(sc->sc_ucode_api, i + (32 * idx));
788 			}
789 			break;
790 		}
791 
792 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
793 			struct iwm_ucode_capa *capa;
794 			int idx, i;
795 			if (tlv_len != sizeof(*capa)) {
796 				err = EINVAL;
797 				goto parse_out;
798 			}
799 			capa = (struct iwm_ucode_capa *)tlv_data;
800 			idx = le32toh(capa->api_index);
801 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
802 				goto parse_out;
803 			}
804 			for (i = 0; i < 32; i++) {
805 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
806 					continue;
807 				setbit(sc->sc_enabled_capa, i + (32 * idx));
808 			}
809 			break;
810 		}
811 
812 		case 48: /* undocumented TLV */
813 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
814 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
815 			/* ignore, not used by current driver */
816 			break;
817 
818 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
819 			err = iwm_firmware_store_section(sc,
820 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
821 			    tlv_len);
822 			if (err)
823 				goto parse_out;
824 			break;
825 
826 		case IWM_UCODE_TLV_PAGING:
827 			if (tlv_len != sizeof(uint32_t)) {
828 				err = EINVAL;
829 				goto parse_out;
830 			}
831 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
832 
833 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
834 			    DEVNAME(sc), paging_mem_size));
835 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
836 				printf("%s: Driver only supports up to %u"
837 				    " bytes for paging image (%u requested)\n",
838 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
839 				    paging_mem_size);
840 				err = EINVAL;
841 				goto out;
842 			}
843 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
844 				printf("%s: Paging: image isn't multiple of %u\n",
845 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
846 				err = EINVAL;
847 				goto out;
848 			}
849 
850 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
851 			    paging_mem_size;
852 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
853 			fw->fw_sects[usniffer_img].paging_mem_size =
854 			    paging_mem_size;
855 			break;
856 
857 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
858 			if (tlv_len != sizeof(uint32_t)) {
859 				err = EINVAL;
860 				goto parse_out;
861 			}
862 			sc->sc_capa_n_scan_channels =
863 			  le32toh(*(uint32_t *)tlv_data);
864 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
865 				err = ERANGE;
866 				goto parse_out;
867 			}
868 			break;
869 
870 		case IWM_UCODE_TLV_FW_VERSION:
871 			if (tlv_len != sizeof(uint32_t) * 3) {
872 				err = EINVAL;
873 				goto parse_out;
874 			}
875 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
876 			    "%u.%u.%u",
877 			    le32toh(((uint32_t *)tlv_data)[0]),
878 			    le32toh(((uint32_t *)tlv_data)[1]),
879 			    le32toh(((uint32_t *)tlv_data)[2]));
880 			break;
881 
882 		case IWM_UCODE_TLV_FW_DBG_DEST:
883 		case IWM_UCODE_TLV_FW_DBG_CONF:
884 			break;
885 
886 		case IWM_UCODE_TLV_FW_MEM_SEG:
887 			break;
888 
889 		default:
890 			err = EINVAL;
891 			goto parse_out;
892 		}
893 
894 		len -= roundup(tlv_len, 4);
895 		data += roundup(tlv_len, 4);
896 	}
897 
898 	KASSERT(err == 0);
899 
900  parse_out:
901 	if (err) {
902 		printf("%s: firmware parse error %d, "
903 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
904 	}
905 
906  out:
907 	if (err) {
908 		fw->fw_status = IWM_FW_STATUS_NONE;
909 		if (fw->fw_rawdata != NULL)
910 			iwm_fw_info_free(fw);
911 	} else
912 		fw->fw_status = IWM_FW_STATUS_DONE;
913 	wakeup(&sc->sc_fw);
914 
915 	return err;
916 }
917 
918 uint32_t
919 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
920 {
921 	iwm_nic_assert_locked(sc);
922 	IWM_WRITE(sc,
923 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
924 	IWM_BARRIER_READ_WRITE(sc);
925 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
926 }
927 
928 void
929 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
930 {
931 	iwm_nic_assert_locked(sc);
932 	IWM_WRITE(sc,
933 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
934 	IWM_BARRIER_WRITE(sc);
935 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
936 }
937 
938 void
939 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
940 {
941 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
942 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
943 }
944 
945 int
946 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
947 {
948 	int offs, err = 0;
949 	uint32_t *vals = buf;
950 
951 	if (iwm_nic_lock(sc)) {
952 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
953 		for (offs = 0; offs < dwords; offs++)
954 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
955 		iwm_nic_unlock(sc);
956 	} else {
957 		err = EBUSY;
958 	}
959 	return err;
960 }
961 
962 int
963 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
964 {
965 	int offs;
966 	const uint32_t *vals = buf;
967 
968 	if (iwm_nic_lock(sc)) {
969 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
970 		/* WADDR auto-increments */
971 		for (offs = 0; offs < dwords; offs++) {
972 			uint32_t val = vals ? vals[offs] : 0;
973 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
974 		}
975 		iwm_nic_unlock(sc);
976 	} else {
977 		return EBUSY;
978 	}
979 	return 0;
980 }
981 
982 int
983 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
984 {
985 	return iwm_write_mem(sc, addr, &val, 1);
986 }
987 
988 int
989 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
990     int timo)
991 {
992 	for (;;) {
993 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
994 			return 1;
995 		}
996 		if (timo < 10) {
997 			return 0;
998 		}
999 		timo -= 10;
1000 		DELAY(10);
1001 	}
1002 }
1003 
1004 int
1005 iwm_nic_lock(struct iwm_softc *sc)
1006 {
1007 	if (sc->sc_nic_locks > 0) {
1008 		iwm_nic_assert_locked(sc);
1009 		sc->sc_nic_locks++;
1010 		return 1; /* already locked */
1011 	}
1012 
1013 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1014 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1015 
1016 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1017 		DELAY(2);
1018 
1019 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1020 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1021 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1022 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1023 		sc->sc_nic_locks++;
1024 		return 1;
1025 	}
1026 
1027 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1028 	return 0;
1029 }
1030 
1031 void
1032 iwm_nic_assert_locked(struct iwm_softc *sc)
1033 {
1034 	uint32_t reg = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1035 	if ((reg & IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
1036 		panic("%s: mac clock not ready", DEVNAME(sc));
1037 	if (reg & IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
1038 		panic("%s: mac gone to sleep", DEVNAME(sc));
1039 	if (sc->sc_nic_locks <= 0)
1040 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1041 }
1042 
1043 void
1044 iwm_nic_unlock(struct iwm_softc *sc)
1045 {
1046 	if (sc->sc_nic_locks > 0) {
1047 		if (--sc->sc_nic_locks == 0)
1048 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1049 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1050 	} else
1051 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1052 }
1053 
1054 void
1055 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1056     uint32_t mask)
1057 {
1058 	uint32_t val;
1059 
1060 	/* XXX: no error path? */
1061 	if (iwm_nic_lock(sc)) {
1062 		val = iwm_read_prph(sc, reg) & mask;
1063 		val |= bits;
1064 		iwm_write_prph(sc, reg, val);
1065 		iwm_nic_unlock(sc);
1066 	}
1067 }
1068 
1069 void
1070 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1071 {
1072 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1073 }
1074 
1075 void
1076 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1077 {
1078 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1079 }
1080 
1081 int
1082 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1083     bus_size_t size, bus_size_t alignment)
1084 {
1085 	int nsegs, err;
1086 	caddr_t va;
1087 
1088 	dma->tag = tag;
1089 	dma->size = size;
1090 
1091 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1092 	    &dma->map);
1093 	if (err)
1094 		goto fail;
1095 
1096 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1097 	    BUS_DMA_NOWAIT);
1098 	if (err)
1099 		goto fail;
1100 
1101 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1102 	    BUS_DMA_NOWAIT);
1103 	if (err)
1104 		goto fail;
1105 	dma->vaddr = va;
1106 
1107 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1108 	    BUS_DMA_NOWAIT);
1109 	if (err)
1110 		goto fail;
1111 
1112 	memset(dma->vaddr, 0, size);
1113 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1114 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1115 
1116 	return 0;
1117 
1118 fail:	iwm_dma_contig_free(dma);
1119 	return err;
1120 }
1121 
1122 void
1123 iwm_dma_contig_free(struct iwm_dma_info *dma)
1124 {
1125 	if (dma->map != NULL) {
1126 		if (dma->vaddr != NULL) {
1127 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1128 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1129 			bus_dmamap_unload(dma->tag, dma->map);
1130 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1131 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1132 			dma->vaddr = NULL;
1133 		}
1134 		bus_dmamap_destroy(dma->tag, dma->map);
1135 		dma->map = NULL;
1136 	}
1137 }
1138 
1139 int
1140 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1141 {
1142 	bus_size_t size;
1143 	size_t descsz;
1144 	int count, i, err;
1145 
1146 	ring->cur = 0;
1147 
1148 	if (sc->sc_mqrx_supported) {
1149 		count = IWM_RX_MQ_RING_COUNT;
1150 		descsz = sizeof(uint64_t);
1151 	} else {
1152 		count = IWM_RX_RING_COUNT;
1153 		descsz = sizeof(uint32_t);
1154 	}
1155 
1156 	/* Allocate RX descriptors (256-byte aligned). */
1157 	size = count * descsz;
1158 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1159 	if (err) {
1160 		printf("%s: could not allocate RX ring DMA memory\n",
1161 		    DEVNAME(sc));
1162 		goto fail;
1163 	}
1164 	ring->desc = ring->free_desc_dma.vaddr;
1165 
1166 	/* Allocate RX status area (16-byte aligned). */
1167 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1168 	    sizeof(*ring->stat), 16);
1169 	if (err) {
1170 		printf("%s: could not allocate RX status DMA memory\n",
1171 		    DEVNAME(sc));
1172 		goto fail;
1173 	}
1174 	ring->stat = ring->stat_dma.vaddr;
1175 
1176 	if (sc->sc_mqrx_supported) {
1177 		size = count * sizeof(uint32_t);
1178 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1179 		    size, 256);
1180 		if (err) {
1181 			printf("%s: could not allocate RX ring DMA memory\n",
1182 			    DEVNAME(sc));
1183 			goto fail;
1184 		}
1185 	}
1186 
1187 	for (i = 0; i < count; i++) {
1188 		struct iwm_rx_data *data = &ring->data[i];
1189 
1190 		memset(data, 0, sizeof(*data));
1191 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1192 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1193 		    &data->map);
1194 		if (err) {
1195 			printf("%s: could not create RX buf DMA map\n",
1196 			    DEVNAME(sc));
1197 			goto fail;
1198 		}
1199 
1200 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1201 		if (err)
1202 			goto fail;
1203 	}
1204 	return 0;
1205 
1206 fail:	iwm_free_rx_ring(sc, ring);
1207 	return err;
1208 }
1209 
1210 void
1211 iwm_disable_rx_dma(struct iwm_softc *sc)
1212 {
1213 	int ntries;
1214 
1215 	if (iwm_nic_lock(sc)) {
1216 		if (sc->sc_mqrx_supported) {
1217 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1218 			for (ntries = 0; ntries < 1000; ntries++) {
1219 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1220 				    IWM_RXF_DMA_IDLE)
1221 					break;
1222 				DELAY(10);
1223 			}
1224 		} else {
1225 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1226 			for (ntries = 0; ntries < 1000; ntries++) {
1227 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1228 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1229 					break;
1230 				DELAY(10);
1231 			}
1232 		}
1233 		iwm_nic_unlock(sc);
1234 	}
1235 }
1236 
1237 void
1238 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1239 {
1240 	ring->cur = 0;
1241 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1242 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1243 	memset(ring->stat, 0, sizeof(*ring->stat));
1244 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1245 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1246 
1247 }
1248 
1249 void
1250 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1251 {
1252 	int count, i;
1253 
1254 	iwm_dma_contig_free(&ring->free_desc_dma);
1255 	iwm_dma_contig_free(&ring->stat_dma);
1256 	iwm_dma_contig_free(&ring->used_desc_dma);
1257 
1258 	if (sc->sc_mqrx_supported)
1259 		count = IWM_RX_MQ_RING_COUNT;
1260 	else
1261 		count = IWM_RX_RING_COUNT;
1262 
1263 	for (i = 0; i < count; i++) {
1264 		struct iwm_rx_data *data = &ring->data[i];
1265 
1266 		if (data->m != NULL) {
1267 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1268 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1269 			bus_dmamap_unload(sc->sc_dmat, data->map);
1270 			m_freem(data->m);
1271 			data->m = NULL;
1272 		}
1273 		if (data->map != NULL)
1274 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1275 	}
1276 }
1277 
1278 int
1279 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1280 {
1281 	bus_addr_t paddr;
1282 	bus_size_t size;
1283 	int i, err;
1284 
1285 	ring->qid = qid;
1286 	ring->queued = 0;
1287 	ring->cur = 0;
1288 	ring->tail = 0;
1289 
1290 	/* Allocate TX descriptors (256-byte aligned). */
1291 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1292 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1293 	if (err) {
1294 		printf("%s: could not allocate TX ring DMA memory\n",
1295 		    DEVNAME(sc));
1296 		goto fail;
1297 	}
1298 	ring->desc = ring->desc_dma.vaddr;
1299 
1300 	/*
1301 	 * There is no need to allocate DMA buffers for unused rings.
1302 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1303 	 * than we currently need.
1304 	 *
1305 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1306 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1307 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1308 	 * in order to provide one queue per EDCA category.
1309 	 *
1310 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd).
1311 	 *
1312 	 * Tx aggregation will require additional queues (one queue per TID
1313 	 * for which aggregation is enabled) but we do not implement this yet.
1314 	 *
1315 	 * Unfortunately, we cannot tell if DQA will be used until the
1316 	 * firmware gets loaded later, so just allocate sufficient rings
1317 	 * in order to satisfy both cases.
1318 	 */
1319 	if (qid > IWM_CMD_QUEUE)
1320 		return 0;
1321 
1322 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1323 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1324 	if (err) {
1325 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1326 		goto fail;
1327 	}
1328 	ring->cmd = ring->cmd_dma.vaddr;
1329 
1330 	paddr = ring->cmd_dma.paddr;
1331 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1332 		struct iwm_tx_data *data = &ring->data[i];
1333 		size_t mapsize;
1334 
1335 		data->cmd_paddr = paddr;
1336 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1337 		    + offsetof(struct iwm_tx_cmd, scratch);
1338 		paddr += sizeof(struct iwm_device_cmd);
1339 
1340 		/* FW commands may require more mapped space than packets. */
1341 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1342 			mapsize = (sizeof(struct iwm_cmd_header) +
1343 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1344 		else
1345 			mapsize = MCLBYTES;
1346 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1347 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1348 		    &data->map);
1349 		if (err) {
1350 			printf("%s: could not create TX buf DMA map\n",
1351 			    DEVNAME(sc));
1352 			goto fail;
1353 		}
1354 	}
1355 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1356 	return 0;
1357 
1358 fail:	iwm_free_tx_ring(sc, ring);
1359 	return err;
1360 }
1361 
1362 void
1363 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1364 {
1365 	int i;
1366 
1367 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1368 		struct iwm_tx_data *data = &ring->data[i];
1369 
1370 		if (data->m != NULL) {
1371 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1372 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1373 			bus_dmamap_unload(sc->sc_dmat, data->map);
1374 			m_freem(data->m);
1375 			data->m = NULL;
1376 		}
1377 	}
1378 	/* Clear TX descriptors. */
1379 	memset(ring->desc, 0, ring->desc_dma.size);
1380 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1381 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1382 	sc->qfullmsk &= ~(1 << ring->qid);
1383 	/* 7000 family NICs are locked while commands are in progress. */
1384 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1385 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1386 			iwm_nic_unlock(sc);
1387 	}
1388 	ring->queued = 0;
1389 	ring->cur = 0;
1390 	ring->tail = 0;
1391 }
1392 
1393 void
1394 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1395 {
1396 	int i;
1397 
1398 	iwm_dma_contig_free(&ring->desc_dma);
1399 	iwm_dma_contig_free(&ring->cmd_dma);
1400 
1401 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1402 		struct iwm_tx_data *data = &ring->data[i];
1403 
1404 		if (data->m != NULL) {
1405 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1406 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1407 			bus_dmamap_unload(sc->sc_dmat, data->map);
1408 			m_freem(data->m);
1409 			data->m = NULL;
1410 		}
1411 		if (data->map != NULL)
1412 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1413 	}
1414 }
1415 
1416 void
1417 iwm_enable_rfkill_int(struct iwm_softc *sc)
1418 {
1419 	if (!sc->sc_msix) {
1420 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1421 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1422 	} else {
1423 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1424 		    sc->sc_fh_init_mask);
1425 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1426 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1427 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1428 	}
1429 
1430 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1431 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1432 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1433 }
1434 
1435 int
1436 iwm_check_rfkill(struct iwm_softc *sc)
1437 {
1438 	uint32_t v;
1439 	int s;
1440 	int rv;
1441 
1442 	s = splnet();
1443 
1444 	/*
1445 	 * "documentation" is not really helpful here:
1446 	 *  27:	HW_RF_KILL_SW
1447 	 *	Indicates state of (platform's) hardware RF-Kill switch
1448 	 *
1449 	 * But apparently when it's off, it's on ...
1450 	 */
1451 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1452 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1453 	if (rv) {
1454 		sc->sc_flags |= IWM_FLAG_RFKILL;
1455 	} else {
1456 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1457 	}
1458 
1459 	splx(s);
1460 	return rv;
1461 }
1462 
1463 void
1464 iwm_enable_interrupts(struct iwm_softc *sc)
1465 {
1466 	if (!sc->sc_msix) {
1467 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1468 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1469 	} else {
1470 		/*
1471 		 * fh/hw_mask keeps all the unmasked causes.
1472 		 * Unlike msi, in msix cause is enabled when it is unset.
1473 		 */
1474 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1475 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1476 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1477 		    ~sc->sc_fh_mask);
1478 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1479 		    ~sc->sc_hw_mask);
1480 	}
1481 }
1482 
1483 void
1484 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1485 {
1486 	if (!sc->sc_msix) {
1487 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1488 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1489 	} else {
1490 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1491 		    sc->sc_hw_init_mask);
1492 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1493 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1494 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1495 	}
1496 }
1497 
1498 void
1499 iwm_restore_interrupts(struct iwm_softc *sc)
1500 {
1501 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1502 }
1503 
1504 void
1505 iwm_disable_interrupts(struct iwm_softc *sc)
1506 {
1507 	int s = splnet();
1508 
1509 	if (!sc->sc_msix) {
1510 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1511 
1512 		/* acknowledge all interrupts */
1513 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1514 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1515 	} else {
1516 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1517 		    sc->sc_fh_init_mask);
1518 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1519 		    sc->sc_hw_init_mask);
1520 	}
1521 
1522 	splx(s);
1523 }
1524 
1525 void
1526 iwm_ict_reset(struct iwm_softc *sc)
1527 {
1528 	iwm_disable_interrupts(sc);
1529 
1530 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1531 	sc->ict_cur = 0;
1532 
1533 	/* Set physical address of ICT (4KB aligned). */
1534 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1535 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1536 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1537 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1538 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1539 
1540 	/* Switch to ICT interrupt mode in driver. */
1541 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1542 
1543 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1544 	iwm_enable_interrupts(sc);
1545 }
1546 
1547 #define IWM_HW_READY_TIMEOUT 50
1548 int
1549 iwm_set_hw_ready(struct iwm_softc *sc)
1550 {
1551 	int ready;
1552 
1553 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1554 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1555 
1556 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1557 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1558 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1559 	    IWM_HW_READY_TIMEOUT);
1560 	if (ready)
1561 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1562 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1563 
1564 	return ready;
1565 }
1566 #undef IWM_HW_READY_TIMEOUT
1567 
1568 int
1569 iwm_prepare_card_hw(struct iwm_softc *sc)
1570 {
1571 	int t = 0;
1572 
1573 	if (iwm_set_hw_ready(sc))
1574 		return 0;
1575 
1576 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1577 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1578 	DELAY(1000);
1579 
1580 
1581 	/* If HW is not ready, prepare the conditions to check again */
1582 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1583 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1584 
1585 	do {
1586 		if (iwm_set_hw_ready(sc))
1587 			return 0;
1588 		DELAY(200);
1589 		t += 200;
1590 	} while (t < 150000);
1591 
1592 	return ETIMEDOUT;
1593 }
1594 
1595 void
1596 iwm_apm_config(struct iwm_softc *sc)
1597 {
1598 	pcireg_t lctl, cap;
1599 
1600 	/*
1601 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1602 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1603 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1604 	 *    costs negligible amount of power savings.
1605 	 * If not (unlikely), enable L0S, so there is at least some
1606 	 *    power savings, even without L1.
1607 	 */
1608 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1609 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1610 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1611 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1612 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1613 	} else {
1614 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1615 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1616 	}
1617 
1618 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1619 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1620 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1621 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1622 	    DEVNAME(sc),
1623 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1624 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1625 }
1626 
1627 /*
1628  * Start up NIC's basic functionality after it has been reset
1629  * e.g. after platform boot or shutdown.
1630  * NOTE:  This does not load uCode nor start the embedded processor
1631  */
1632 int
1633 iwm_apm_init(struct iwm_softc *sc)
1634 {
1635 	int err = 0;
1636 
1637 	/* Disable L0S exit timer (platform NMI workaround) */
1638 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1639 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1640 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1641 
1642 	/*
1643 	 * Disable L0s without affecting L1;
1644 	 *  don't wait for ICH L0s (ICH bug W/A)
1645 	 */
1646 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1647 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1648 
1649 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1650 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1651 
1652 	/*
1653 	 * Enable HAP INTA (interrupt from management bus) to
1654 	 * wake device's PCI Express link L1a -> L0s
1655 	 */
1656 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1657 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1658 
1659 	iwm_apm_config(sc);
1660 
1661 #if 0 /* not for 7k/8k */
1662 	/* Configure analog phase-lock-loop before activating to D0A */
1663 	if (trans->cfg->base_params->pll_cfg_val)
1664 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1665 		    trans->cfg->base_params->pll_cfg_val);
1666 #endif
1667 
1668 	/*
1669 	 * Set "initialization complete" bit to move adapter from
1670 	 * D0U* --> D0A* (powered-up active) state.
1671 	 */
1672 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1673 
1674 	/*
1675 	 * Wait for clock stabilization; once stabilized, access to
1676 	 * device-internal resources is supported, e.g. iwm_write_prph()
1677 	 * and accesses to uCode SRAM.
1678 	 */
1679 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1680 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1681 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1682 		printf("%s: timeout waiting for clock stabilization\n",
1683 		    DEVNAME(sc));
1684 		err = ETIMEDOUT;
1685 		goto out;
1686 	}
1687 
1688 	if (sc->host_interrupt_operation_mode) {
1689 		/*
1690 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1691 		 * only check host_interrupt_operation_mode even if this is
1692 		 * not related to host_interrupt_operation_mode.
1693 		 *
1694 		 * Enable the oscillator to count wake up time for L1 exit. This
1695 		 * consumes slightly more power (100uA) - but allows to be sure
1696 		 * that we wake up from L1 on time.
1697 		 *
1698 		 * This looks weird: read twice the same register, discard the
1699 		 * value, set a bit, and yet again, read that same register
1700 		 * just to discard the value. But that's the way the hardware
1701 		 * seems to like it.
1702 		 */
1703 		if (iwm_nic_lock(sc)) {
1704 			iwm_read_prph(sc, IWM_OSC_CLK);
1705 			iwm_read_prph(sc, IWM_OSC_CLK);
1706 			iwm_nic_unlock(sc);
1707 		}
1708 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1709 		if (iwm_nic_lock(sc)) {
1710 			iwm_read_prph(sc, IWM_OSC_CLK);
1711 			iwm_read_prph(sc, IWM_OSC_CLK);
1712 			iwm_nic_unlock(sc);
1713 		}
1714 	}
1715 
1716 	/*
1717 	 * Enable DMA clock and wait for it to stabilize.
1718 	 *
1719 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1720 	 * do not disable clocks.  This preserves any hardware bits already
1721 	 * set by default in "CLK_CTRL_REG" after reset.
1722 	 */
1723 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1724 		if (iwm_nic_lock(sc)) {
1725 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1726 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1727 			iwm_nic_unlock(sc);
1728 		}
1729 		DELAY(20);
1730 
1731 		/* Disable L1-Active */
1732 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1733 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1734 
1735 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1736 		if (iwm_nic_lock(sc)) {
1737 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1738 			    IWM_APMG_RTC_INT_STT_RFKILL);
1739 			iwm_nic_unlock(sc);
1740 		}
1741 	}
1742  out:
1743 	if (err)
1744 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1745 	return err;
1746 }
1747 
1748 void
1749 iwm_apm_stop(struct iwm_softc *sc)
1750 {
1751 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1752 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1753 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1754 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1755 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1756 	DELAY(1000);
1757 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1758 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1759 	DELAY(5000);
1760 
1761 	/* stop device's busmaster DMA activity */
1762 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1763 
1764 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1765 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1766 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1767 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1768 
1769 	/*
1770 	 * Clear "initialization complete" bit to move adapter from
1771 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1772 	 */
1773 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1774 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1775 }
1776 
1777 void
1778 iwm_init_msix_hw(struct iwm_softc *sc)
1779 {
1780 	iwm_conf_msix_hw(sc, 0);
1781 
1782 	if (!sc->sc_msix)
1783 		return;
1784 
1785 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1786 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1787 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1788 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1789 }
1790 
1791 void
1792 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1793 {
1794 	int vector = 0;
1795 
1796 	if (!sc->sc_msix) {
1797 		/* Newer chips default to MSIX. */
1798 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1799 			iwm_write_prph(sc, IWM_UREG_CHICK,
1800 			    IWM_UREG_CHICK_MSI_ENABLE);
1801 			iwm_nic_unlock(sc);
1802 		}
1803 		return;
1804 	}
1805 
1806 	if (!stopped && iwm_nic_lock(sc)) {
1807 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1808 		iwm_nic_unlock(sc);
1809 	}
1810 
1811 	/* Disable all interrupts */
1812 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1813 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1814 
1815 	/* Map fallback-queue (command/mgmt) to a single vector */
1816 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1817 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1818 	/* Map RSS queue (data) to the same vector */
1819 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1820 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1821 
1822 	/* Enable the RX queues cause interrupts */
1823 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1824 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1825 
1826 	/* Map non-RX causes to the same vector */
1827 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1828 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1829 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1830 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1831 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1832 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1833 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1834 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1835 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1836 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1837 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1838 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1839 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1840 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1841 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1842 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1843 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1844 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1845 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1846 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1847 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1848 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1849 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1850 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1851 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1852 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1853 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1854 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1855 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1856 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1857 
1858 	/* Enable non-RX causes interrupts */
1859 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1860 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
1861 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
1862 	    IWM_MSIX_FH_INT_CAUSES_S2D |
1863 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
1864 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1865 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
1866 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
1867 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
1868 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
1869 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
1870 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
1871 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
1872 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
1873 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
1874 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
1875 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
1876 }
1877 
1878 int
1879 iwm_start_hw(struct iwm_softc *sc)
1880 {
1881 	int err;
1882 
1883 	err = iwm_prepare_card_hw(sc);
1884 	if (err)
1885 		return err;
1886 
1887 	/* Reset the entire device */
1888 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1889 	DELAY(5000);
1890 
1891 	err = iwm_apm_init(sc);
1892 	if (err)
1893 		return err;
1894 
1895 	iwm_init_msix_hw(sc);
1896 
1897 	iwm_enable_rfkill_int(sc);
1898 	iwm_check_rfkill(sc);
1899 
1900 	return 0;
1901 }
1902 
1903 
1904 void
1905 iwm_stop_device(struct iwm_softc *sc)
1906 {
1907 	int chnl, ntries;
1908 	int qid;
1909 
1910 	iwm_disable_interrupts(sc);
1911 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1912 
1913 	/* Stop all DMA channels. */
1914 	if (iwm_nic_lock(sc)) {
1915 		/* Deactivate TX scheduler. */
1916 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1917 
1918 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1919 			IWM_WRITE(sc,
1920 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1921 			for (ntries = 0; ntries < 200; ntries++) {
1922 				uint32_t r;
1923 
1924 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1925 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1926 				    chnl))
1927 					break;
1928 				DELAY(20);
1929 			}
1930 		}
1931 		iwm_nic_unlock(sc);
1932 	}
1933 	iwm_disable_rx_dma(sc);
1934 
1935 	iwm_reset_rx_ring(sc, &sc->rxq);
1936 
1937 	for (qid = 0; qid < nitems(sc->txq); qid++)
1938 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1939 
1940 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1941 		if (iwm_nic_lock(sc)) {
1942 			/* Power-down device's busmaster DMA clocks */
1943 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1944 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1945 			iwm_nic_unlock(sc);
1946 		}
1947 		DELAY(5);
1948 	}
1949 
1950 	/* Make sure (redundant) we've released our request to stay awake */
1951 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1952 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1953 	if (sc->sc_nic_locks > 0)
1954 		printf("%s: %d active NIC locks forcefully cleared\n",
1955 		    DEVNAME(sc), sc->sc_nic_locks);
1956 	sc->sc_nic_locks = 0;
1957 
1958 	/* Stop the device, and put it in low power state */
1959 	iwm_apm_stop(sc);
1960 
1961 	/* Reset the on-board processor. */
1962 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1963 	DELAY(5000);
1964 
1965 	/*
1966 	 * Upon stop, the IVAR table gets erased, so msi-x won't
1967 	 * work. This causes a bug in RF-KILL flows, since the interrupt
1968 	 * that enables radio won't fire on the correct irq, and the
1969 	 * driver won't be able to handle the interrupt.
1970 	 * Configure the IVAR table again after reset.
1971 	 */
1972 	iwm_conf_msix_hw(sc, 1);
1973 
1974 	/*
1975 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1976 	 * Clear the interrupt again.
1977 	 */
1978 	iwm_disable_interrupts(sc);
1979 
1980 	/* Even though we stop the HW we still want the RF kill interrupt. */
1981 	iwm_enable_rfkill_int(sc);
1982 	iwm_check_rfkill(sc);
1983 
1984 	iwm_prepare_card_hw(sc);
1985 }
1986 
1987 void
1988 iwm_nic_config(struct iwm_softc *sc)
1989 {
1990 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1991 	uint32_t mask, val, reg_val = 0;
1992 
1993 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1994 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1995 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1996 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1997 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1998 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1999 
2000 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2001 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2002 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2003 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2004 
2005 	/* radio configuration */
2006 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2007 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2008 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2009 
2010 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2011 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2012 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2013 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2014 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2015 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2016 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2017 
2018 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2019 	val &= ~mask;
2020 	val |= reg_val;
2021 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2022 
2023 	/*
2024 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2025 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2026 	 * to lose ownership and not being able to obtain it back.
2027 	 */
2028 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2029 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2030 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2031 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2032 }
2033 
2034 int
2035 iwm_nic_rx_init(struct iwm_softc *sc)
2036 {
2037 	if (sc->sc_mqrx_supported)
2038 		return iwm_nic_rx_mq_init(sc);
2039 	else
2040 		return iwm_nic_rx_legacy_init(sc);
2041 }
2042 
2043 int
2044 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2045 {
2046 	int enabled;
2047 
2048 	if (!iwm_nic_lock(sc))
2049 		return EBUSY;
2050 
2051 	/* Stop RX DMA. */
2052 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2053 	/* Disable RX used and free queue operation. */
2054 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2055 
2056 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2057 	    sc->rxq.free_desc_dma.paddr);
2058 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2059 	    sc->rxq.used_desc_dma.paddr);
2060 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2061 	    sc->rxq.stat_dma.paddr);
2062 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2063 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2064 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2065 
2066 	/* We configure only queue 0 for now. */
2067 	enabled = ((1 << 0) << 16) | (1 << 0);
2068 
2069 	/* Enable RX DMA, 4KB buffer size. */
2070 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2071 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2072 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2073 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2074 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2075 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2076 
2077 	/* Enable RX DMA snooping. */
2078 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2079 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2080 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2081 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2082 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2083 
2084 	/* Enable the configured queue(s). */
2085 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2086 
2087 	iwm_nic_unlock(sc);
2088 
2089 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2090 
2091 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2092 
2093 	return 0;
2094 }
2095 
2096 int
2097 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2098 {
2099 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2100 
2101 	iwm_disable_rx_dma(sc);
2102 
2103 	if (!iwm_nic_lock(sc))
2104 		return EBUSY;
2105 
2106 	/* reset and flush pointers */
2107 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2108 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2109 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2110 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2111 
2112 	/* Set physical address of RX ring (256-byte aligned). */
2113 	IWM_WRITE(sc,
2114 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2115 
2116 	/* Set physical address of RX status (16-byte aligned). */
2117 	IWM_WRITE(sc,
2118 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2119 
2120 	/* Enable RX. */
2121 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2122 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2123 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2124 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2125 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2126 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2127 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2128 
2129 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2130 
2131 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2132 	if (sc->host_interrupt_operation_mode)
2133 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2134 
2135 	iwm_nic_unlock(sc);
2136 
2137 	/*
2138 	 * This value should initially be 0 (before preparing any RBs),
2139 	 * and should be 8 after preparing the first 8 RBs (for example).
2140 	 */
2141 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2142 
2143 	return 0;
2144 }
2145 
2146 int
2147 iwm_nic_tx_init(struct iwm_softc *sc)
2148 {
2149 	int qid;
2150 
2151 	if (!iwm_nic_lock(sc))
2152 		return EBUSY;
2153 
2154 	/* Deactivate TX scheduler. */
2155 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2156 
2157 	/* Set physical address of "keep warm" page (16-byte aligned). */
2158 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2159 
2160 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2161 		struct iwm_tx_ring *txq = &sc->txq[qid];
2162 
2163 		/* Set physical address of TX ring (256-byte aligned). */
2164 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2165 		    txq->desc_dma.paddr >> 8);
2166 	}
2167 
2168 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2169 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2170 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2171 
2172 	iwm_nic_unlock(sc);
2173 
2174 	return 0;
2175 }
2176 
2177 int
2178 iwm_nic_init(struct iwm_softc *sc)
2179 {
2180 	int err;
2181 
2182 	iwm_apm_init(sc);
2183 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2184 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2185 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2186 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2187 
2188 	iwm_nic_config(sc);
2189 
2190 	err = iwm_nic_rx_init(sc);
2191 	if (err)
2192 		return err;
2193 
2194 	err = iwm_nic_tx_init(sc);
2195 	if (err)
2196 		return err;
2197 
2198 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2199 
2200 	return 0;
2201 }
2202 
2203 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2204 const uint8_t iwm_ac_to_tx_fifo[] = {
2205 	IWM_TX_FIFO_BE,
2206 	IWM_TX_FIFO_BK,
2207 	IWM_TX_FIFO_VI,
2208 	IWM_TX_FIFO_VO,
2209 };
2210 
2211 int
2212 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2213 {
2214 	iwm_nic_assert_locked(sc);
2215 
2216 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2217 
2218 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2219 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2220 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2221 
2222 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2223 
2224 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2225 
2226 	iwm_write_mem32(sc,
2227 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2228 
2229 	/* Set scheduler window size and frame limit. */
2230 	iwm_write_mem32(sc,
2231 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2232 	    sizeof(uint32_t),
2233 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2234 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2235 	    ((IWM_FRAME_LIMIT
2236 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2237 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2238 
2239 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2240 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2241 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2242 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2243 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2244 
2245 	if (qid == sc->cmdqid)
2246 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2247 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2248 
2249 	return 0;
2250 }
2251 
2252 int
2253 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
2254 {
2255 	struct iwm_scd_txq_cfg_cmd cmd;
2256 	int err;
2257 
2258 	iwm_nic_assert_locked(sc);
2259 
2260 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2261 
2262 	memset(&cmd, 0, sizeof(cmd));
2263 	cmd.scd_queue = qid;
2264 	cmd.enable = 1;
2265 	cmd.sta_id = sta_id;
2266 	cmd.tx_fifo = fifo;
2267 	cmd.aggregate = 0;
2268 	cmd.window = IWM_FRAME_LIMIT;
2269 
2270 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2271 	    sizeof(cmd), &cmd);
2272 	if (err)
2273 		return err;
2274 
2275 	return 0;
2276 }
2277 
2278 int
2279 iwm_post_alive(struct iwm_softc *sc)
2280 {
2281 	int nwords;
2282 	int err, chnl;
2283 	uint32_t base;
2284 
2285 	if (!iwm_nic_lock(sc))
2286 		return EBUSY;
2287 
2288 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2289 
2290 	iwm_ict_reset(sc);
2291 
2292 	iwm_nic_unlock(sc);
2293 
2294 	/* Clear TX scheduler state in SRAM. */
2295 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2296 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2297 	    / sizeof(uint32_t);
2298 	err = iwm_write_mem(sc,
2299 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2300 	    NULL, nwords);
2301 	if (err)
2302 		return err;
2303 
2304 	if (!iwm_nic_lock(sc))
2305 		return EBUSY;
2306 
2307 	/* Set physical address of TX scheduler rings (1KB aligned). */
2308 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2309 
2310 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2311 
2312 	/* enable command channel */
2313 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2314 	if (err) {
2315 		iwm_nic_unlock(sc);
2316 		return err;
2317 	}
2318 
2319 	/* Activate TX scheduler. */
2320 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2321 
2322 	/* Enable DMA channels. */
2323 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2324 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2325 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2326 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2327 	}
2328 
2329 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2330 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2331 
2332 	iwm_nic_unlock(sc);
2333 
2334 	/* Enable L1-Active */
2335 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
2336 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2337 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2338 
2339 	return err;
2340 }
2341 
2342 struct iwm_phy_db_entry *
2343 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2344 {
2345 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2346 
2347 	if (type >= IWM_PHY_DB_MAX)
2348 		return NULL;
2349 
2350 	switch (type) {
2351 	case IWM_PHY_DB_CFG:
2352 		return &phy_db->cfg;
2353 	case IWM_PHY_DB_CALIB_NCH:
2354 		return &phy_db->calib_nch;
2355 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2356 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2357 			return NULL;
2358 		return &phy_db->calib_ch_group_papd[chg_id];
2359 	case IWM_PHY_DB_CALIB_CHG_TXP:
2360 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2361 			return NULL;
2362 		return &phy_db->calib_ch_group_txp[chg_id];
2363 	default:
2364 		return NULL;
2365 	}
2366 	return NULL;
2367 }
2368 
2369 int
2370 iwm_phy_db_set_section(struct iwm_softc *sc,
2371     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2372 {
2373 	uint16_t type = le16toh(phy_db_notif->type);
2374 	uint16_t size  = le16toh(phy_db_notif->length);
2375 	struct iwm_phy_db_entry *entry;
2376 	uint16_t chg_id = 0;
2377 
2378 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2379 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2380 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2381 
2382 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2383 	if (!entry)
2384 		return EINVAL;
2385 
2386 	if (entry->data)
2387 		free(entry->data, M_DEVBUF, entry->size);
2388 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2389 	if (!entry->data) {
2390 		entry->size = 0;
2391 		return ENOMEM;
2392 	}
2393 	memcpy(entry->data, phy_db_notif->data, size);
2394 	entry->size = size;
2395 
2396 	return 0;
2397 }
2398 
2399 int
2400 iwm_is_valid_channel(uint16_t ch_id)
2401 {
2402 	if (ch_id <= 14 ||
2403 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2404 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2405 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2406 		return 1;
2407 	return 0;
2408 }
2409 
2410 uint8_t
2411 iwm_ch_id_to_ch_index(uint16_t ch_id)
2412 {
2413 	if (!iwm_is_valid_channel(ch_id))
2414 		return 0xff;
2415 
2416 	if (ch_id <= 14)
2417 		return ch_id - 1;
2418 	if (ch_id <= 64)
2419 		return (ch_id + 20) / 4;
2420 	if (ch_id <= 140)
2421 		return (ch_id - 12) / 4;
2422 	return (ch_id - 13) / 4;
2423 }
2424 
2425 
2426 uint16_t
2427 iwm_channel_id_to_papd(uint16_t ch_id)
2428 {
2429 	if (!iwm_is_valid_channel(ch_id))
2430 		return 0xff;
2431 
2432 	if (1 <= ch_id && ch_id <= 14)
2433 		return 0;
2434 	if (36 <= ch_id && ch_id <= 64)
2435 		return 1;
2436 	if (100 <= ch_id && ch_id <= 140)
2437 		return 2;
2438 	return 3;
2439 }
2440 
2441 uint16_t
2442 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2443 {
2444 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2445 	struct iwm_phy_db_chg_txp *txp_chg;
2446 	int i;
2447 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2448 
2449 	if (ch_index == 0xff)
2450 		return 0xff;
2451 
2452 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2453 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2454 		if (!txp_chg)
2455 			return 0xff;
2456 		/*
2457 		 * Looking for the first channel group the max channel
2458 		 * of which is higher than the requested channel.
2459 		 */
2460 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2461 			return i;
2462 	}
2463 	return 0xff;
2464 }
2465 
2466 int
2467 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2468     uint16_t *size, uint16_t ch_id)
2469 {
2470 	struct iwm_phy_db_entry *entry;
2471 	uint16_t ch_group_id = 0;
2472 
2473 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2474 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2475 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2476 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2477 
2478 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2479 	if (!entry)
2480 		return EINVAL;
2481 
2482 	*data = entry->data;
2483 	*size = entry->size;
2484 
2485 	return 0;
2486 }
2487 
2488 int
2489 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2490     void *data)
2491 {
2492 	struct iwm_phy_db_cmd phy_db_cmd;
2493 	struct iwm_host_cmd cmd = {
2494 		.id = IWM_PHY_DB_CMD,
2495 		.flags = IWM_CMD_ASYNC,
2496 	};
2497 
2498 	phy_db_cmd.type = le16toh(type);
2499 	phy_db_cmd.length = le16toh(length);
2500 
2501 	cmd.data[0] = &phy_db_cmd;
2502 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2503 	cmd.data[1] = data;
2504 	cmd.len[1] = length;
2505 
2506 	return iwm_send_cmd(sc, &cmd);
2507 }
2508 
2509 int
2510 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2511     uint8_t max_ch_groups)
2512 {
2513 	uint16_t i;
2514 	int err;
2515 	struct iwm_phy_db_entry *entry;
2516 
2517 	for (i = 0; i < max_ch_groups; i++) {
2518 		entry = iwm_phy_db_get_section(sc, type, i);
2519 		if (!entry)
2520 			return EINVAL;
2521 
2522 		if (!entry->size)
2523 			continue;
2524 
2525 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2526 		if (err)
2527 			return err;
2528 
2529 		DELAY(1000);
2530 	}
2531 
2532 	return 0;
2533 }
2534 
2535 int
2536 iwm_send_phy_db_data(struct iwm_softc *sc)
2537 {
2538 	uint8_t *data = NULL;
2539 	uint16_t size = 0;
2540 	int err;
2541 
2542 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2543 	if (err)
2544 		return err;
2545 
2546 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2547 	if (err)
2548 		return err;
2549 
2550 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2551 	    &data, &size, 0);
2552 	if (err)
2553 		return err;
2554 
2555 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2556 	if (err)
2557 		return err;
2558 
2559 	err = iwm_phy_db_send_all_channel_groups(sc,
2560 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2561 	if (err)
2562 		return err;
2563 
2564 	err = iwm_phy_db_send_all_channel_groups(sc,
2565 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2566 	if (err)
2567 		return err;
2568 
2569 	return 0;
2570 }
2571 
2572 /*
2573  * For the high priority TE use a time event type that has similar priority to
2574  * the FW's action scan priority.
2575  */
2576 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2577 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2578 
2579 int
2580 iwm_send_time_event_cmd(struct iwm_softc *sc,
2581     const struct iwm_time_event_cmd *cmd)
2582 {
2583 	struct iwm_rx_packet *pkt;
2584 	struct iwm_time_event_resp *resp;
2585 	struct iwm_host_cmd hcmd = {
2586 		.id = IWM_TIME_EVENT_CMD,
2587 		.flags = IWM_CMD_WANT_RESP,
2588 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2589 	};
2590 	uint32_t resp_len;
2591 	int err;
2592 
2593 	hcmd.data[0] = cmd;
2594 	hcmd.len[0] = sizeof(*cmd);
2595 	err = iwm_send_cmd(sc, &hcmd);
2596 	if (err)
2597 		return err;
2598 
2599 	pkt = hcmd.resp_pkt;
2600 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2601 		err = EIO;
2602 		goto out;
2603 	}
2604 
2605 	resp_len = iwm_rx_packet_payload_len(pkt);
2606 	if (resp_len != sizeof(*resp)) {
2607 		err = EIO;
2608 		goto out;
2609 	}
2610 
2611 	resp = (void *)pkt->data;
2612 	if (le32toh(resp->status) == 0)
2613 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2614 	else
2615 		err = EIO;
2616 out:
2617 	iwm_free_resp(sc, &hcmd);
2618 	return err;
2619 }
2620 
2621 void
2622 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2623     uint32_t duration, uint32_t max_delay)
2624 {
2625 	struct iwm_time_event_cmd time_cmd;
2626 
2627 	/* Do nothing if a time event is already scheduled. */
2628 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2629 		return;
2630 
2631 	memset(&time_cmd, 0, sizeof(time_cmd));
2632 
2633 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2634 	time_cmd.id_and_color =
2635 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2636 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2637 
2638 	time_cmd.apply_time = htole32(0);
2639 
2640 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2641 	time_cmd.max_delay = htole32(max_delay);
2642 	/* TODO: why do we need to interval = bi if it is not periodic? */
2643 	time_cmd.interval = htole32(1);
2644 	time_cmd.duration = htole32(duration);
2645 	time_cmd.repeat = 1;
2646 	time_cmd.policy
2647 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2648 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2649 		IWM_T2_V2_START_IMMEDIATELY);
2650 
2651 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2652 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2653 
2654 	DELAY(100);
2655 }
2656 
2657 void
2658 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2659 {
2660 	struct iwm_time_event_cmd time_cmd;
2661 
2662 	/* Do nothing if the time event has already ended. */
2663 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2664 		return;
2665 
2666 	memset(&time_cmd, 0, sizeof(time_cmd));
2667 
2668 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2669 	time_cmd.id_and_color =
2670 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2671 	time_cmd.id = htole32(sc->sc_time_event_uid);
2672 
2673 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2674 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2675 
2676 	DELAY(100);
2677 }
2678 
2679 /*
2680  * NVM read access and content parsing.  We do not support
2681  * external NVM or writing NVM.
2682  */
2683 
2684 /* list of NVM sections we are allowed/need to read */
2685 const int iwm_nvm_to_read[] = {
2686 	IWM_NVM_SECTION_TYPE_HW,
2687 	IWM_NVM_SECTION_TYPE_SW,
2688 	IWM_NVM_SECTION_TYPE_REGULATORY,
2689 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2690 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2691 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2692 	IWM_NVM_SECTION_TYPE_HW_8000,
2693 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2694 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2695 };
2696 
2697 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2698 
2699 #define IWM_NVM_WRITE_OPCODE 1
2700 #define IWM_NVM_READ_OPCODE 0
2701 
2702 int
2703 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2704     uint16_t length, uint8_t *data, uint16_t *len)
2705 {
2706 	offset = 0;
2707 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2708 		.offset = htole16(offset),
2709 		.length = htole16(length),
2710 		.type = htole16(section),
2711 		.op_code = IWM_NVM_READ_OPCODE,
2712 	};
2713 	struct iwm_nvm_access_resp *nvm_resp;
2714 	struct iwm_rx_packet *pkt;
2715 	struct iwm_host_cmd cmd = {
2716 		.id = IWM_NVM_ACCESS_CMD,
2717 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2718 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2719 		.data = { &nvm_access_cmd, },
2720 	};
2721 	int err, offset_read;
2722 	size_t bytes_read;
2723 	uint8_t *resp_data;
2724 
2725 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2726 
2727 	err = iwm_send_cmd(sc, &cmd);
2728 	if (err)
2729 		return err;
2730 
2731 	pkt = cmd.resp_pkt;
2732 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2733 		err = EIO;
2734 		goto exit;
2735 	}
2736 
2737 	/* Extract NVM response */
2738 	nvm_resp = (void *)pkt->data;
2739 	if (nvm_resp == NULL)
2740 		return EIO;
2741 
2742 	err = le16toh(nvm_resp->status);
2743 	bytes_read = le16toh(nvm_resp->length);
2744 	offset_read = le16toh(nvm_resp->offset);
2745 	resp_data = nvm_resp->data;
2746 	if (err) {
2747 		err = EINVAL;
2748 		goto exit;
2749 	}
2750 
2751 	if (offset_read != offset) {
2752 		err = EINVAL;
2753 		goto exit;
2754 	}
2755 
2756 	if (bytes_read > length) {
2757 		err = EINVAL;
2758 		goto exit;
2759 	}
2760 
2761 	memcpy(data + offset, resp_data, bytes_read);
2762 	*len = bytes_read;
2763 
2764  exit:
2765 	iwm_free_resp(sc, &cmd);
2766 	return err;
2767 }
2768 
2769 /*
2770  * Reads an NVM section completely.
2771  * NICs prior to 7000 family doesn't have a real NVM, but just read
2772  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2773  * by uCode, we need to manually check in this case that we don't
2774  * overflow and try to read more than the EEPROM size.
2775  */
2776 int
2777 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2778     uint16_t *len, size_t max_len)
2779 {
2780 	uint16_t chunklen, seglen;
2781 	int err = 0;
2782 
2783 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2784 	*len = 0;
2785 
2786 	/* Read NVM chunks until exhausted (reading less than requested) */
2787 	while (seglen == chunklen && *len < max_len) {
2788 		err = iwm_nvm_read_chunk(sc,
2789 		    section, *len, chunklen, data, &seglen);
2790 		if (err)
2791 			return err;
2792 
2793 		*len += seglen;
2794 	}
2795 
2796 	return err;
2797 }
2798 
2799 uint8_t
2800 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2801 {
2802 	uint8_t tx_ant;
2803 
2804 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2805 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2806 
2807 	if (sc->sc_nvm.valid_tx_ant)
2808 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2809 
2810 	return tx_ant;
2811 }
2812 
2813 uint8_t
2814 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2815 {
2816 	uint8_t rx_ant;
2817 
2818 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2819 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2820 
2821 	if (sc->sc_nvm.valid_rx_ant)
2822 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2823 
2824 	return rx_ant;
2825 }
2826 
2827 void
2828 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2829     const uint8_t *nvm_channels, int nchan)
2830 {
2831 	struct ieee80211com *ic = &sc->sc_ic;
2832 	struct iwm_nvm_data *data = &sc->sc_nvm;
2833 	int ch_idx;
2834 	struct ieee80211_channel *channel;
2835 	uint16_t ch_flags;
2836 	int is_5ghz;
2837 	int flags, hw_value;
2838 
2839 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2840 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2841 
2842 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2843 		    !data->sku_cap_band_52GHz_enable)
2844 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2845 
2846 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
2847 			continue;
2848 
2849 		hw_value = nvm_channels[ch_idx];
2850 		channel = &ic->ic_channels[hw_value];
2851 
2852 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2853 		if (!is_5ghz) {
2854 			flags = IEEE80211_CHAN_2GHZ;
2855 			channel->ic_flags
2856 			    = IEEE80211_CHAN_CCK
2857 			    | IEEE80211_CHAN_OFDM
2858 			    | IEEE80211_CHAN_DYN
2859 			    | IEEE80211_CHAN_2GHZ;
2860 		} else {
2861 			flags = IEEE80211_CHAN_5GHZ;
2862 			channel->ic_flags =
2863 			    IEEE80211_CHAN_A;
2864 		}
2865 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2866 
2867 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2868 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2869 
2870 		if (data->sku_cap_11n_enable)
2871 			channel->ic_flags |= IEEE80211_CHAN_HT;
2872 	}
2873 }
2874 
2875 int
2876 iwm_mimo_enabled(struct iwm_softc *sc)
2877 {
2878 	struct ieee80211com *ic = &sc->sc_ic;
2879 
2880 	return !sc->sc_nvm.sku_cap_mimo_disable &&
2881 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
2882 }
2883 
2884 void
2885 iwm_setup_ht_rates(struct iwm_softc *sc)
2886 {
2887 	struct ieee80211com *ic = &sc->sc_ic;
2888 	uint8_t rx_ant;
2889 
2890 	/* TX is supported with the same MCS as RX. */
2891 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2892 
2893 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
2894 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2895 
2896 	if (!iwm_mimo_enabled(sc))
2897 		return;
2898 
2899 	rx_ant = iwm_fw_valid_rx_ant(sc);
2900 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
2901 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
2902 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2903 }
2904 
2905 #define IWM_MAX_RX_BA_SESSIONS 16
2906 
2907 void
2908 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2909     uint16_t ssn, uint16_t winsize, int start)
2910 {
2911 	struct ieee80211com *ic = &sc->sc_ic;
2912 	struct iwm_add_sta_cmd cmd;
2913 	struct iwm_node *in = (void *)ni;
2914 	int err, s;
2915 	uint32_t status;
2916 	size_t cmdsize;
2917 
2918 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2919 		ieee80211_addba_req_refuse(ic, ni, tid);
2920 		return;
2921 	}
2922 
2923 	memset(&cmd, 0, sizeof(cmd));
2924 
2925 	cmd.sta_id = IWM_STATION_ID;
2926 	cmd.mac_id_n_color
2927 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2928 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2929 
2930 	if (start) {
2931 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2932 		cmd.add_immediate_ba_ssn = ssn;
2933 		cmd.rx_ba_window = winsize;
2934 	} else {
2935 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2936 	}
2937 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2938 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2939 
2940 	status = IWM_ADD_STA_SUCCESS;
2941 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
2942 		cmdsize = sizeof(cmd);
2943 	else
2944 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
2945 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
2946 	    &status);
2947 
2948 	s = splnet();
2949 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) == IWM_ADD_STA_SUCCESS) {
2950 		if (start) {
2951 			sc->sc_rx_ba_sessions++;
2952 			ieee80211_addba_req_accept(ic, ni, tid);
2953 		} else if (sc->sc_rx_ba_sessions > 0)
2954 			sc->sc_rx_ba_sessions--;
2955 	} else if (start)
2956 		ieee80211_addba_req_refuse(ic, ni, tid);
2957 
2958 	splx(s);
2959 }
2960 
2961 void
2962 iwm_htprot_task(void *arg)
2963 {
2964 	struct iwm_softc *sc = arg;
2965 	struct ieee80211com *ic = &sc->sc_ic;
2966 	struct iwm_node *in = (void *)ic->ic_bss;
2967 	int err, s = splnet();
2968 
2969 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
2970 		refcnt_rele_wake(&sc->task_refs);
2971 		splx(s);
2972 		return;
2973 	}
2974 
2975 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2976 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2977 	if (err)
2978 		printf("%s: could not change HT protection: error %d\n",
2979 		    DEVNAME(sc), err);
2980 
2981 	refcnt_rele_wake(&sc->task_refs);
2982 	splx(s);
2983 }
2984 
2985 /*
2986  * This function is called by upper layer when HT protection settings in
2987  * beacons have changed.
2988  */
2989 void
2990 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2991 {
2992 	struct iwm_softc *sc = ic->ic_softc;
2993 
2994 	/* assumes that ni == ic->ic_bss */
2995 	iwm_add_task(sc, systq, &sc->htprot_task);
2996 }
2997 
2998 void
2999 iwm_ba_task(void *arg)
3000 {
3001 	struct iwm_softc *sc = arg;
3002 	struct ieee80211com *ic = &sc->sc_ic;
3003 	struct ieee80211_node *ni = ic->ic_bss;
3004 	int s = splnet();
3005 
3006 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
3007 		refcnt_rele_wake(&sc->task_refs);
3008 		splx(s);
3009 		return;
3010 	}
3011 
3012 	if (sc->ba_start)
3013 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn,
3014 		    sc->ba_winsize, 1);
3015 	else
3016 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0, 0);
3017 
3018 	refcnt_rele_wake(&sc->task_refs);
3019 	splx(s);
3020 }
3021 
3022 /*
3023  * This function is called by upper layer when an ADDBA request is received
3024  * from another STA and before the ADDBA response is sent.
3025  */
3026 int
3027 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3028     uint8_t tid)
3029 {
3030 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3031 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3032 
3033 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
3034 		return ENOSPC;
3035 
3036 	sc->ba_start = 1;
3037 	sc->ba_tid = tid;
3038 	sc->ba_ssn = htole16(ba->ba_winstart);
3039 	sc->ba_winsize = htole16(ba->ba_winsize);
3040 	iwm_add_task(sc, systq, &sc->ba_task);
3041 
3042 	return EBUSY;
3043 }
3044 
3045 /*
3046  * This function is called by upper layer on teardown of an HT-immediate
3047  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3048  */
3049 void
3050 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3051     uint8_t tid)
3052 {
3053 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3054 
3055 	sc->ba_start = 0;
3056 	sc->ba_tid = tid;
3057 	iwm_add_task(sc, systq, &sc->ba_task);
3058 }
3059 
3060 void
3061 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3062     const uint16_t *mac_override, const uint16_t *nvm_hw)
3063 {
3064 	const uint8_t *hw_addr;
3065 
3066 	if (mac_override) {
3067 		static const uint8_t reserved_mac[] = {
3068 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3069 		};
3070 
3071 		hw_addr = (const uint8_t *)(mac_override +
3072 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3073 
3074 		/*
3075 		 * Store the MAC address from MAO section.
3076 		 * No byte swapping is required in MAO section
3077 		 */
3078 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3079 
3080 		/*
3081 		 * Force the use of the OTP MAC address in case of reserved MAC
3082 		 * address in the NVM, or if address is given but invalid.
3083 		 */
3084 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3085 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3086 		    sizeof(etherbroadcastaddr)) != 0) &&
3087 		    (memcmp(etheranyaddr, data->hw_addr,
3088 		    sizeof(etheranyaddr)) != 0) &&
3089 		    !ETHER_IS_MULTICAST(data->hw_addr))
3090 			return;
3091 	}
3092 
3093 	if (nvm_hw) {
3094 		/* Read the mac address from WFMP registers. */
3095 		uint32_t mac_addr0, mac_addr1;
3096 
3097 		if (!iwm_nic_lock(sc))
3098 			goto out;
3099 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3100 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3101 		iwm_nic_unlock(sc);
3102 
3103 		hw_addr = (const uint8_t *)&mac_addr0;
3104 		data->hw_addr[0] = hw_addr[3];
3105 		data->hw_addr[1] = hw_addr[2];
3106 		data->hw_addr[2] = hw_addr[1];
3107 		data->hw_addr[3] = hw_addr[0];
3108 
3109 		hw_addr = (const uint8_t *)&mac_addr1;
3110 		data->hw_addr[4] = hw_addr[1];
3111 		data->hw_addr[5] = hw_addr[0];
3112 
3113 		return;
3114 	}
3115 out:
3116 	printf("%s: mac address not found\n", DEVNAME(sc));
3117 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3118 }
3119 
3120 int
3121 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3122     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3123     const uint16_t *mac_override, const uint16_t *phy_sku,
3124     const uint16_t *regulatory, int n_regulatory)
3125 {
3126 	struct iwm_nvm_data *data = &sc->sc_nvm;
3127 	uint8_t hw_addr[ETHER_ADDR_LEN];
3128 	uint32_t sku;
3129 	uint16_t lar_config;
3130 
3131 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3132 
3133 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3134 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3135 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3136 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3137 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3138 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3139 
3140 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3141 	} else {
3142 		uint32_t radio_cfg =
3143 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3144 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3145 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3146 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3147 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3148 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3149 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3150 
3151 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3152 	}
3153 
3154 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3155 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3156 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3157 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3158 
3159 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3160 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3161 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3162 				       IWM_NVM_LAR_OFFSET_8000;
3163 
3164 		lar_config = le16_to_cpup(regulatory + lar_offset);
3165 		data->lar_enabled = !!(lar_config &
3166 				       IWM_NVM_LAR_ENABLED_8000);
3167 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3168 	} else
3169 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3170 
3171 
3172 	/* The byte order is little endian 16 bit, meaning 214365 */
3173 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3174 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3175 		data->hw_addr[0] = hw_addr[1];
3176 		data->hw_addr[1] = hw_addr[0];
3177 		data->hw_addr[2] = hw_addr[3];
3178 		data->hw_addr[3] = hw_addr[2];
3179 		data->hw_addr[4] = hw_addr[5];
3180 		data->hw_addr[5] = hw_addr[4];
3181 	} else
3182 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3183 
3184 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3185 		if (sc->nvm_type == IWM_NVM_SDP) {
3186 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3187 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3188 		} else {
3189 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3190 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3191 		}
3192 	} else
3193 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3194 		    iwm_nvm_channels_8000,
3195 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3196 
3197 	data->calib_version = 255;   /* TODO:
3198 					this value will prevent some checks from
3199 					failing, we need to check if this
3200 					field is still needed, and if it does,
3201 					where is it in the NVM */
3202 
3203 	return 0;
3204 }
3205 
3206 int
3207 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3208 {
3209 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3210 	const uint16_t *regulatory = NULL;
3211 	int n_regulatory = 0;
3212 
3213 	/* Checking for required sections */
3214 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3215 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3216 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3217 			return ENOENT;
3218 		}
3219 
3220 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3221 
3222 		if (sc->nvm_type == IWM_NVM_SDP) {
3223 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3224 				return ENOENT;
3225 			regulatory = (const uint16_t *)
3226 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3227 			n_regulatory =
3228 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3229 		}
3230 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3231 		/* SW and REGULATORY sections are mandatory */
3232 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3233 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3234 			return ENOENT;
3235 		}
3236 		/* MAC_OVERRIDE or at least HW section must exist */
3237 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3238 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3239 			return ENOENT;
3240 		}
3241 
3242 		/* PHY_SKU section is mandatory in B0 */
3243 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3244 			return ENOENT;
3245 		}
3246 
3247 		regulatory = (const uint16_t *)
3248 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3249 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
3250 		hw = (const uint16_t *)
3251 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3252 		mac_override =
3253 			(const uint16_t *)
3254 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3255 		phy_sku = (const uint16_t *)
3256 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3257 	} else {
3258 		panic("unknown device family %d\n", sc->sc_device_family);
3259 	}
3260 
3261 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3262 	calib = (const uint16_t *)
3263 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3264 
3265 	/* XXX should pass in the length of every section */
3266 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3267 	    phy_sku, regulatory, n_regulatory);
3268 }
3269 
3270 int
3271 iwm_nvm_init(struct iwm_softc *sc)
3272 {
3273 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3274 	int i, section, err;
3275 	uint16_t len;
3276 	uint8_t *buf;
3277 	const size_t bufsz = sc->sc_nvm_max_section_size;
3278 
3279 	memset(nvm_sections, 0, sizeof(nvm_sections));
3280 
3281 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
3282 	if (buf == NULL)
3283 		return ENOMEM;
3284 
3285 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
3286 		section = iwm_nvm_to_read[i];
3287 		KASSERT(section <= nitems(nvm_sections));
3288 
3289 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3290 		if (err) {
3291 			err = 0;
3292 			continue;
3293 		}
3294 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
3295 		if (nvm_sections[section].data == NULL) {
3296 			err = ENOMEM;
3297 			break;
3298 		}
3299 		memcpy(nvm_sections[section].data, buf, len);
3300 		nvm_sections[section].length = len;
3301 	}
3302 	free(buf, M_DEVBUF, bufsz);
3303 	if (err == 0)
3304 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3305 
3306 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3307 		if (nvm_sections[i].data != NULL)
3308 			free(nvm_sections[i].data, M_DEVBUF,
3309 			    nvm_sections[i].length);
3310 	}
3311 
3312 	return err;
3313 }
3314 
3315 int
3316 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3317     const uint8_t *section, uint32_t byte_cnt)
3318 {
3319 	int err = EINVAL;
3320 	uint32_t chunk_sz, offset;
3321 
3322 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3323 
3324 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3325 		uint32_t addr, len;
3326 		const uint8_t *data;
3327 
3328 		addr = dst_addr + offset;
3329 		len = MIN(chunk_sz, byte_cnt - offset);
3330 		data = section + offset;
3331 
3332 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3333 		if (err)
3334 			break;
3335 	}
3336 
3337 	return err;
3338 }
3339 
3340 int
3341 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3342     const uint8_t *chunk, uint32_t byte_cnt)
3343 {
3344 	struct iwm_dma_info *dma = &sc->fw_dma;
3345 	int err;
3346 
3347 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3348 	memcpy(dma->vaddr, chunk, byte_cnt);
3349 	bus_dmamap_sync(sc->sc_dmat,
3350 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
3351 
3352 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3353 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
3354 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3355 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3356 
3357 	sc->sc_fw_chunk_done = 0;
3358 
3359 	if (!iwm_nic_lock(sc))
3360 		return EBUSY;
3361 
3362 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3363 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3364 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3365 	    dst_addr);
3366 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3367 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3368 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3369 	    (iwm_get_dma_hi_addr(dma->paddr)
3370 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3371 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3372 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3373 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3374 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3375 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3376 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3377 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3378 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3379 
3380 	iwm_nic_unlock(sc);
3381 
3382 	/* Wait for this segment to load. */
3383 	err = 0;
3384 	while (!sc->sc_fw_chunk_done) {
3385 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
3386 		if (err)
3387 			break;
3388 	}
3389 
3390 	if (!sc->sc_fw_chunk_done)
3391 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
3392 		    DEVNAME(sc), dst_addr, byte_cnt);
3393 
3394 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
3395 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
3396 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3397 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3398 	}
3399 
3400 	return err;
3401 }
3402 
3403 int
3404 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3405 {
3406 	struct iwm_fw_sects *fws;
3407 	int err, i;
3408 	void *data;
3409 	uint32_t dlen;
3410 	uint32_t offset;
3411 
3412 	fws = &sc->sc_fw.fw_sects[ucode_type];
3413 	for (i = 0; i < fws->fw_count; i++) {
3414 		data = fws->fw_sect[i].fws_data;
3415 		dlen = fws->fw_sect[i].fws_len;
3416 		offset = fws->fw_sect[i].fws_devoff;
3417 		if (dlen > sc->sc_fwdmasegsz) {
3418 			err = EFBIG;
3419 		} else
3420 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3421 		if (err) {
3422 			printf("%s: could not load firmware chunk %u of %u\n",
3423 			    DEVNAME(sc), i, fws->fw_count);
3424 			return err;
3425 		}
3426 	}
3427 
3428 	iwm_enable_interrupts(sc);
3429 
3430 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3431 
3432 	return 0;
3433 }
3434 
3435 int
3436 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3437     int cpu, int *first_ucode_section)
3438 {
3439 	int shift_param;
3440 	int i, err = 0, sec_num = 0x1;
3441 	uint32_t val, last_read_idx = 0;
3442 	void *data;
3443 	uint32_t dlen;
3444 	uint32_t offset;
3445 
3446 	if (cpu == 1) {
3447 		shift_param = 0;
3448 		*first_ucode_section = 0;
3449 	} else {
3450 		shift_param = 16;
3451 		(*first_ucode_section)++;
3452 	}
3453 
3454 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3455 		last_read_idx = i;
3456 		data = fws->fw_sect[i].fws_data;
3457 		dlen = fws->fw_sect[i].fws_len;
3458 		offset = fws->fw_sect[i].fws_devoff;
3459 
3460 		/*
3461 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3462 		 * CPU1 to CPU2.
3463 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3464 		 * CPU2 non paged to CPU2 paging sec.
3465 		 */
3466 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3467 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3468 			break;
3469 
3470 		if (dlen > sc->sc_fwdmasegsz) {
3471 			err = EFBIG;
3472 		} else
3473 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3474 		if (err) {
3475 			printf("%s: could not load firmware chunk %d "
3476 			    "(error %d)\n", DEVNAME(sc), i, err);
3477 			return err;
3478 		}
3479 
3480 		/* Notify the ucode of the loaded section number and status */
3481 		if (iwm_nic_lock(sc)) {
3482 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3483 			val = val | (sec_num << shift_param);
3484 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3485 			sec_num = (sec_num << 1) | 0x1;
3486 			iwm_nic_unlock(sc);
3487 		} else {
3488 			err = EBUSY;
3489 			printf("%s: could not load firmware chunk %d "
3490 			    "(error %d)\n", DEVNAME(sc), i, err);
3491 			return err;
3492 		}
3493 	}
3494 
3495 	*first_ucode_section = last_read_idx;
3496 
3497 	if (iwm_nic_lock(sc)) {
3498 		if (cpu == 1)
3499 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3500 		else
3501 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3502 		iwm_nic_unlock(sc);
3503 	} else {
3504 		err = EBUSY;
3505 		printf("%s: could not finalize firmware loading (error %d)\n",
3506 		    DEVNAME(sc), err);
3507 		return err;
3508 	}
3509 
3510 	return 0;
3511 }
3512 
3513 int
3514 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3515 {
3516 	struct iwm_fw_sects *fws;
3517 	int err = 0;
3518 	int first_ucode_section;
3519 
3520 	fws = &sc->sc_fw.fw_sects[ucode_type];
3521 
3522 	/* configure the ucode to be ready to get the secured image */
3523 	/* release CPU reset */
3524 	if (iwm_nic_lock(sc)) {
3525 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3526 		    IWM_RELEASE_CPU_RESET_BIT);
3527 		iwm_nic_unlock(sc);
3528 	}
3529 
3530 	/* load to FW the binary Secured sections of CPU1 */
3531 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3532 	if (err)
3533 		return err;
3534 
3535 	/* load to FW the binary sections of CPU2 */
3536 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3537 	if (err)
3538 		return err;
3539 
3540 	iwm_enable_interrupts(sc);
3541 	return 0;
3542 }
3543 
3544 int
3545 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3546 {
3547 	int err, w;
3548 
3549 	sc->sc_uc.uc_intr = 0;
3550 
3551 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
3552 		err = iwm_load_firmware_8000(sc, ucode_type);
3553 	else
3554 		err = iwm_load_firmware_7000(sc, ucode_type);
3555 
3556 	if (err)
3557 		return err;
3558 
3559 	/* wait for the firmware to load */
3560 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
3561 		err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", MSEC_TO_NSEC(100));
3562 	}
3563 	if (err || !sc->sc_uc.uc_ok)
3564 		printf("%s: could not load firmware\n", DEVNAME(sc));
3565 
3566 	return err;
3567 }
3568 
3569 int
3570 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3571 {
3572 	int err;
3573 
3574 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3575 
3576 	err = iwm_nic_init(sc);
3577 	if (err) {
3578 		printf("%s: unable to init nic\n", DEVNAME(sc));
3579 		return err;
3580 	}
3581 
3582 	/* make sure rfkill handshake bits are cleared */
3583 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3584 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3585 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3586 
3587 	/* clear (again), then enable firwmare load interrupt */
3588 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3589 	iwm_enable_fwload_interrupt(sc);
3590 
3591 	/* really make sure rfkill handshake bits are cleared */
3592 	/* maybe we should write a few times more?  just to make sure */
3593 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3594 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3595 
3596 	return iwm_load_firmware(sc, ucode_type);
3597 }
3598 
3599 int
3600 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3601 {
3602 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3603 		.valid = htole32(valid_tx_ant),
3604 	};
3605 
3606 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
3607 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3608 }
3609 
3610 int
3611 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3612 {
3613 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3614 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3615 
3616 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3617 	phy_cfg_cmd.calib_control.event_trigger =
3618 	    sc->sc_default_calib[ucode_type].event_trigger;
3619 	phy_cfg_cmd.calib_control.flow_trigger =
3620 	    sc->sc_default_calib[ucode_type].flow_trigger;
3621 
3622 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3623 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3624 }
3625 
3626 int
3627 iwm_send_dqa_cmd(struct iwm_softc *sc)
3628 {
3629 	struct iwm_dqa_enable_cmd dqa_cmd = {
3630 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
3631 	};
3632 	uint32_t cmd_id;
3633 
3634 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
3635 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
3636 }
3637 
3638 int
3639 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
3640 	enum iwm_ucode_type ucode_type)
3641 {
3642 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3643 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
3644 	int err;
3645 
3646 	err = iwm_read_firmware(sc, ucode_type);
3647 	if (err)
3648 		return err;
3649 
3650 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3651 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
3652 	else
3653 		sc->cmdqid = IWM_CMD_QUEUE;
3654 
3655 	sc->sc_uc_current = ucode_type;
3656 	err = iwm_start_fw(sc, ucode_type);
3657 	if (err) {
3658 		sc->sc_uc_current = old_type;
3659 		return err;
3660 	}
3661 
3662 	err = iwm_post_alive(sc);
3663 	if (err)
3664 		return err;
3665 
3666 	/*
3667 	 * configure and operate fw paging mechanism.
3668 	 * driver configures the paging flow only once, CPU2 paging image
3669 	 * included in the IWM_UCODE_INIT image.
3670 	 */
3671 	if (fw->paging_mem_size) {
3672 		err = iwm_save_fw_paging(sc, fw);
3673 		if (err) {
3674 			printf("%s: failed to save the FW paging image\n",
3675 			    DEVNAME(sc));
3676 			return err;
3677 		}
3678 
3679 		err = iwm_send_paging_cmd(sc, fw);
3680 		if (err) {
3681 			printf("%s: failed to send the paging cmd\n",
3682 			    DEVNAME(sc));
3683 			iwm_free_fw_paging(sc);
3684 			return err;
3685 		}
3686 	}
3687 
3688 	return 0;
3689 }
3690 
3691 int
3692 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3693 {
3694 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
3695 	int err;
3696 
3697 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3698 		printf("%s: radio is disabled by hardware switch\n",
3699 		    DEVNAME(sc));
3700 		return EPERM;
3701 	}
3702 
3703 	sc->sc_init_complete = 0;
3704 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3705 	if (err) {
3706 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
3707 		return err;
3708 	}
3709 
3710 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
3711 		err = iwm_send_bt_init_conf(sc);
3712 		if (err) {
3713 			printf("%s: could not init bt coex (error %d)\n",
3714 			    DEVNAME(sc), err);
3715 			return err;
3716 		}
3717 	}
3718 
3719 	if (justnvm) {
3720 		err = iwm_nvm_init(sc);
3721 		if (err) {
3722 			printf("%s: failed to read nvm\n", DEVNAME(sc));
3723 			return err;
3724 		}
3725 
3726 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3727 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3728 			    sc->sc_nvm.hw_addr);
3729 
3730 		return 0;
3731 	}
3732 
3733 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3734 	if (err)
3735 		return err;
3736 
3737 	/* Send TX valid antennas before triggering calibrations */
3738 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3739 	if (err)
3740 		return err;
3741 
3742 	/*
3743 	 * Send phy configurations command to init uCode
3744 	 * to start the 16.0 uCode init image internal calibrations.
3745 	 */
3746 	err = iwm_send_phy_cfg_cmd(sc);
3747 	if (err)
3748 		return err;
3749 
3750 	/*
3751 	 * Nothing to do but wait for the init complete and phy DB
3752 	 * notifications from the firmware.
3753 	 */
3754 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3755 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
3756 		    SEC_TO_NSEC(2));
3757 		if (err)
3758 			break;
3759 	}
3760 
3761 	return err;
3762 }
3763 
3764 int
3765 iwm_config_ltr(struct iwm_softc *sc)
3766 {
3767 	struct iwm_ltr_config_cmd cmd = {
3768 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3769 	};
3770 
3771 	if (!sc->sc_ltr_enabled)
3772 		return 0;
3773 
3774 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3775 }
3776 
3777 int
3778 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3779 {
3780 	struct iwm_rx_ring *ring = &sc->rxq;
3781 	struct iwm_rx_data *data = &ring->data[idx];
3782 	struct mbuf *m;
3783 	int err;
3784 	int fatal = 0;
3785 
3786 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3787 	if (m == NULL)
3788 		return ENOBUFS;
3789 
3790 	if (size <= MCLBYTES) {
3791 		MCLGET(m, M_DONTWAIT);
3792 	} else {
3793 		MCLGETI(m, M_DONTWAIT, NULL, IWM_RBUF_SIZE);
3794 	}
3795 	if ((m->m_flags & M_EXT) == 0) {
3796 		m_freem(m);
3797 		return ENOBUFS;
3798 	}
3799 
3800 	if (data->m != NULL) {
3801 		bus_dmamap_unload(sc->sc_dmat, data->map);
3802 		fatal = 1;
3803 	}
3804 
3805 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3806 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3807 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3808 	if (err) {
3809 		/* XXX */
3810 		if (fatal)
3811 			panic("iwm: could not load RX mbuf");
3812 		m_freem(m);
3813 		return err;
3814 	}
3815 	data->m = m;
3816 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3817 
3818 	/* Update RX descriptor. */
3819 	if (sc->sc_mqrx_supported) {
3820 		((uint64_t *)ring->desc)[idx] =
3821 		    htole64(data->map->dm_segs[0].ds_addr);
3822 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3823 		    idx * sizeof(uint64_t), sizeof(uint64_t),
3824 		    BUS_DMASYNC_PREWRITE);
3825 	} else {
3826 		((uint32_t *)ring->desc)[idx] =
3827 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
3828 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
3829 		    idx * sizeof(uint32_t), sizeof(uint32_t),
3830 		    BUS_DMASYNC_PREWRITE);
3831 	}
3832 
3833 	return 0;
3834 }
3835 
3836 /*
3837  * RSSI values are reported by the FW as positive values - need to negate
3838  * to obtain their dBM.  Account for missing antennas by replacing 0
3839  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3840  */
3841 int
3842 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3843 {
3844 	int energy_a, energy_b, energy_c, max_energy;
3845 	uint32_t val;
3846 
3847 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3848 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3849 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3850 	energy_a = energy_a ? -energy_a : -256;
3851 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3852 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3853 	energy_b = energy_b ? -energy_b : -256;
3854 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3855 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3856 	energy_c = energy_c ? -energy_c : -256;
3857 	max_energy = MAX(energy_a, energy_b);
3858 	max_energy = MAX(max_energy, energy_c);
3859 
3860 	return max_energy;
3861 }
3862 
3863 int
3864 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3865     struct iwm_rx_mpdu_desc *desc)
3866 {
3867 	int energy_a, energy_b;
3868 
3869 	energy_a = desc->v1.energy_a;
3870 	energy_b = desc->v1.energy_b;
3871 	energy_a = energy_a ? -energy_a : -256;
3872 	energy_b = energy_b ? -energy_b : -256;
3873 	return MAX(energy_a, energy_b);
3874 }
3875 
3876 void
3877 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3878     struct iwm_rx_data *data)
3879 {
3880 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3881 
3882 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3883 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3884 
3885 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3886 }
3887 
3888 /*
3889  * Retrieve the average noise (in dBm) among receivers.
3890  */
3891 int
3892 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3893 {
3894 	int i, total, nbant, noise;
3895 
3896 	total = nbant = noise = 0;
3897 	for (i = 0; i < 3; i++) {
3898 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3899 		if (noise) {
3900 			total += noise;
3901 			nbant++;
3902 		}
3903 	}
3904 
3905 	/* There should be at least one antenna but check anyway. */
3906 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3907 }
3908 
3909 int
3910 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3911 {
3912 	struct ieee80211com *ic = &sc->sc_ic;
3913 	struct ieee80211_key *k = &ni->ni_pairwise_key;
3914 	struct ieee80211_frame *wh;
3915 	uint64_t pn, *prsc;
3916 	uint8_t *ivp;
3917 	uint8_t tid;
3918 	int hdrlen, hasqos;
3919 
3920 	wh = mtod(m, struct ieee80211_frame *);
3921 	hdrlen = ieee80211_get_hdrlen(wh);
3922 	ivp = (uint8_t *)wh + hdrlen;
3923 
3924 	/* Check that ExtIV bit is set. */
3925 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
3926 		return 1;
3927 
3928 	hasqos = ieee80211_has_qos(wh);
3929 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
3930 	prsc = &k->k_rsc[tid];
3931 
3932 	/* Extract the 48-bit PN from the CCMP header. */
3933 	pn = (uint64_t)ivp[0]       |
3934 	     (uint64_t)ivp[1] <<  8 |
3935 	     (uint64_t)ivp[4] << 16 |
3936 	     (uint64_t)ivp[5] << 24 |
3937 	     (uint64_t)ivp[6] << 32 |
3938 	     (uint64_t)ivp[7] << 40;
3939 	if (pn <= *prsc) {
3940 		ic->ic_stats.is_ccmp_replays++;
3941 		return 1;
3942 	}
3943 	/* Last seen packet number is updated in ieee80211_inputm(). */
3944 
3945 	/*
3946 	 * Some firmware versions strip the MIC, and some don't. It is not
3947 	 * clear which of the capability flags could tell us what to expect.
3948 	 * For now, keep things simple and just leave the MIC in place if
3949 	 * it is present.
3950 	 *
3951 	 * The IV will be stripped by ieee80211_inputm().
3952 	 */
3953 	return 0;
3954 }
3955 
3956 void
3957 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
3958     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
3959     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
3960     struct mbuf_list *ml)
3961 {
3962 	struct ieee80211com *ic = &sc->sc_ic;
3963 	struct ieee80211_frame *wh;
3964 	struct ieee80211_node *ni;
3965 	struct ieee80211_channel *bss_chan;
3966 	uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3967 	struct ifnet *ifp = IC2IFP(ic);
3968 
3969 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3970 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3971 
3972 	wh = mtod(m, struct ieee80211_frame *);
3973 	ni = ieee80211_find_rxnode(ic, wh);
3974 	if (ni == ic->ic_bss) {
3975 		/*
3976 		 * We may switch ic_bss's channel during scans.
3977 		 * Record the current channel so we can restore it later.
3978 		 */
3979 		bss_chan = ni->ni_chan;
3980 		IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3981 	}
3982 	ni->ni_chan = &ic->ic_channels[chanidx];
3983 
3984 	/* Handle hardware decryption. */
3985 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
3986 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
3987 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3988 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
3989 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
3990 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
3991 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
3992 			ic->ic_stats.is_ccmp_dec_errs++;
3993 			ifp->if_ierrors++;
3994 			m_freem(m);
3995 			ieee80211_release_node(ic, ni);
3996 			return;
3997 		}
3998 		/* Check whether decryption was successful or not. */
3999 		if ((rx_pkt_status &
4000 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4001 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4002 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4003 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4004 			ic->ic_stats.is_ccmp_dec_errs++;
4005 			ifp->if_ierrors++;
4006 			m_freem(m);
4007 			ieee80211_release_node(ic, ni);
4008 			return;
4009 		}
4010 		if (iwm_ccmp_decap(sc, m, ni) != 0) {
4011 			ifp->if_ierrors++;
4012 			m_freem(m);
4013 			ieee80211_release_node(ic, ni);
4014 			return;
4015 		}
4016 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4017 	}
4018 
4019 #if NBPFILTER > 0
4020 	if (sc->sc_drvbpf != NULL) {
4021 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4022 		uint16_t chan_flags;
4023 
4024 		tap->wr_flags = 0;
4025 		if (is_shortpre)
4026 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4027 		tap->wr_chan_freq =
4028 		    htole16(ic->ic_channels[chanidx].ic_freq);
4029 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4030 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4031 			chan_flags &= ~IEEE80211_CHAN_HT;
4032 		tap->wr_chan_flags = htole16(chan_flags);
4033 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4034 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4035 		tap->wr_tsft = device_timestamp;
4036 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4037 			uint8_t mcs = (rate_n_flags &
4038 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4039 			    IWM_RATE_HT_MCS_NSS_MSK));
4040 			tap->wr_rate = (0x80 | mcs);
4041 		} else {
4042 			uint8_t rate = (rate_n_flags &
4043 			    IWM_RATE_LEGACY_RATE_MSK);
4044 			switch (rate) {
4045 			/* CCK rates. */
4046 			case  10: tap->wr_rate =   2; break;
4047 			case  20: tap->wr_rate =   4; break;
4048 			case  55: tap->wr_rate =  11; break;
4049 			case 110: tap->wr_rate =  22; break;
4050 			/* OFDM rates. */
4051 			case 0xd: tap->wr_rate =  12; break;
4052 			case 0xf: tap->wr_rate =  18; break;
4053 			case 0x5: tap->wr_rate =  24; break;
4054 			case 0x7: tap->wr_rate =  36; break;
4055 			case 0x9: tap->wr_rate =  48; break;
4056 			case 0xb: tap->wr_rate =  72; break;
4057 			case 0x1: tap->wr_rate =  96; break;
4058 			case 0x3: tap->wr_rate = 108; break;
4059 			/* Unknown rate: should not happen. */
4060 			default:  tap->wr_rate =   0;
4061 			}
4062 		}
4063 
4064 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4065 		    m, BPF_DIRECTION_IN);
4066 	}
4067 #endif
4068 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4069 	/*
4070 	 * ieee80211_inputm() might have changed our BSS.
4071 	 * Restore ic_bss's channel if we are still in the same BSS.
4072 	 */
4073 	if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
4074 		ni->ni_chan = bss_chan;
4075 	ieee80211_release_node(ic, ni);
4076 }
4077 
4078 void
4079 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4080     size_t maxlen, struct mbuf_list *ml)
4081 {
4082 	struct ieee80211com *ic = &sc->sc_ic;
4083 	struct ieee80211_rxinfo rxi;
4084 	struct iwm_rx_phy_info *phy_info;
4085 	struct iwm_rx_mpdu_res_start *rx_res;
4086 	int device_timestamp;
4087 	uint16_t phy_flags;
4088 	uint32_t len;
4089 	uint32_t rx_pkt_status;
4090 	int rssi, chanidx, rate_n_flags;
4091 
4092 	phy_info = &sc->sc_last_phy_info;
4093 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4094 	len = le16toh(rx_res->byte_count);
4095 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4096 		/* Allow control frames in monitor mode. */
4097 		if (len < sizeof(struct ieee80211_frame_cts)) {
4098 			ic->ic_stats.is_rx_tooshort++;
4099 			IC2IFP(ic)->if_ierrors++;
4100 			m_freem(m);
4101 			return;
4102 		}
4103 	} else if (len < sizeof(struct ieee80211_frame)) {
4104 		ic->ic_stats.is_rx_tooshort++;
4105 		IC2IFP(ic)->if_ierrors++;
4106 		m_freem(m);
4107 		return;
4108 	}
4109 	if (len > maxlen - sizeof(*rx_res)) {
4110 		IC2IFP(ic)->if_ierrors++;
4111 		m_freem(m);
4112 		return;
4113 	}
4114 
4115 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4116 		m_freem(m);
4117 		return;
4118 	}
4119 
4120 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4121 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4122 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4123 		m_freem(m);
4124 		return; /* drop */
4125 	}
4126 
4127 	m->m_data = pktdata + sizeof(*rx_res);
4128 	m->m_pkthdr.len = m->m_len = len;
4129 
4130 	chanidx = letoh32(phy_info->channel);
4131 	device_timestamp = le32toh(phy_info->system_timestamp);
4132 	phy_flags = letoh16(phy_info->phy_flags);
4133 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4134 
4135 	rssi = iwm_get_signal_strength(sc, phy_info);
4136 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4137 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4138 
4139 	memset(&rxi, 0, sizeof(rxi));
4140 	rxi.rxi_rssi = rssi;
4141 	rxi.rxi_tstamp = device_timestamp;
4142 
4143 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4144 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4145 	    rate_n_flags, device_timestamp, &rxi, ml);
4146 }
4147 
4148 void
4149 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4150     size_t maxlen, struct mbuf_list *ml)
4151 {
4152 	struct ieee80211com *ic = &sc->sc_ic;
4153 	struct ieee80211_rxinfo rxi;
4154 	struct iwm_rx_mpdu_desc *desc;
4155 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4156 	int rssi;
4157 	uint8_t chanidx;
4158 	uint16_t phy_info;
4159 
4160 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
4161 
4162 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
4163 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4164 		m_freem(m);
4165 		return; /* drop */
4166 	}
4167 
4168 	len = le16toh(desc->mpdu_len);
4169 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4170 		/* Allow control frames in monitor mode. */
4171 		if (len < sizeof(struct ieee80211_frame_cts)) {
4172 			ic->ic_stats.is_rx_tooshort++;
4173 			IC2IFP(ic)->if_ierrors++;
4174 			m_freem(m);
4175 			return;
4176 		}
4177 	} else if (len < sizeof(struct ieee80211_frame)) {
4178 		ic->ic_stats.is_rx_tooshort++;
4179 		IC2IFP(ic)->if_ierrors++;
4180 		m_freem(m);
4181 		return;
4182 	}
4183 	if (len > maxlen - sizeof(*desc)) {
4184 		IC2IFP(ic)->if_ierrors++;
4185 		m_freem(m);
4186 		return;
4187 	}
4188 
4189 	m->m_data = pktdata + sizeof(*desc);
4190 	m->m_pkthdr.len = m->m_len = len;
4191 
4192 	/* Account for padding following the frame header. */
4193 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
4194 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4195 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4196 		if (type == IEEE80211_FC0_TYPE_CTL) {
4197 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4198 			case IEEE80211_FC0_SUBTYPE_CTS:
4199 				hdrlen = sizeof(struct ieee80211_frame_cts);
4200 				break;
4201 			case IEEE80211_FC0_SUBTYPE_ACK:
4202 				hdrlen = sizeof(struct ieee80211_frame_ack);
4203 				break;
4204 			default:
4205 				hdrlen = sizeof(struct ieee80211_frame_min);
4206 				break;
4207 			}
4208 		} else
4209 			hdrlen = ieee80211_get_hdrlen(wh);
4210 
4211 		if ((le16toh(desc->status) &
4212 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4213 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4214 			/* Padding is inserted after the IV. */
4215 			hdrlen += IEEE80211_CCMP_HDRLEN;
4216 		}
4217 
4218 		memmove(m->m_data + 2, m->m_data, hdrlen);
4219 		m_adj(m, 2);
4220 	}
4221 
4222 	phy_info = le16toh(desc->phy_info);
4223 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
4224 	chanidx = desc->v1.channel;
4225 	device_timestamp = desc->v1.gp2_on_air_rise;
4226 
4227 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
4228 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4229 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4230 
4231 	memset(&rxi, 0, sizeof(rxi));
4232 	rxi.rxi_rssi = rssi;
4233 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
4234 
4235 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
4236 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
4237 	    rate_n_flags, device_timestamp, &rxi, ml);
4238 }
4239 
4240 void
4241 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4242     struct iwm_node *in, int txmcs, int txrate)
4243 {
4244 	struct ieee80211com *ic = &sc->sc_ic;
4245 	struct ieee80211_node *ni = &in->in_ni;
4246 	struct ifnet *ifp = IC2IFP(ic);
4247 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4248 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4249 	int txfail;
4250 
4251 	KASSERT(tx_resp->frame_count == 1);
4252 
4253 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
4254 	    status != IWM_TX_STATUS_DIRECT_DONE);
4255 
4256 	/*
4257 	 * Update rate control statistics.
4258 	 * Only report frames which were actually queued with the currently
4259 	 * selected Tx rate. Because Tx queues are relatively long we may
4260 	 * encounter previously selected rates here during Tx bursts.
4261 	 * Providing feedback based on such frames can lead to suboptimal
4262 	 * Tx rate control decisions.
4263 	 */
4264 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
4265 		if (txrate == ni->ni_txrate) {
4266 			in->in_amn.amn_txcnt++;
4267 			if (txfail)
4268 				in->in_amn.amn_retrycnt++;
4269 			if (tx_resp->failure_frame > 0)
4270 				in->in_amn.amn_retrycnt++;
4271 		}
4272 	} else if (ic->ic_fixed_mcs == -1 && txmcs == ni->ni_txmcs) {
4273 		in->in_mn.frames += tx_resp->frame_count;
4274 		in->in_mn.ampdu_size = le16toh(tx_resp->byte_cnt);
4275 		in->in_mn.agglen = tx_resp->frame_count;
4276 		if (tx_resp->failure_frame > 0)
4277 			in->in_mn.retries += tx_resp->failure_frame;
4278 		if (txfail)
4279 			in->in_mn.txfail += tx_resp->frame_count;
4280 		if (ic->ic_state == IEEE80211_S_RUN) {
4281 			int best_mcs;
4282 
4283 			ieee80211_mira_choose(&in->in_mn, ic, &in->in_ni);
4284 			/*
4285 			 * If MiRA has chosen a new TX rate we must update
4286 			 * the firwmare's LQ rate table from process context.
4287 			 * ni_txmcs may change again before the task runs so
4288 			 * cache the chosen rate in the iwm_node structure.
4289 			 */
4290 			best_mcs = ieee80211_mira_get_best_mcs(&in->in_mn);
4291 			if (best_mcs != in->chosen_txmcs) {
4292 				in->chosen_txmcs = best_mcs;
4293 				iwm_setrates(in, 1);
4294 			}
4295 		}
4296 	}
4297 
4298 	if (txfail)
4299 		ifp->if_oerrors++;
4300 }
4301 
4302 void
4303 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
4304 {
4305 	struct ieee80211com *ic = &sc->sc_ic;
4306 
4307 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4308 	    BUS_DMASYNC_POSTWRITE);
4309 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4310 	m_freem(txd->m);
4311 	txd->m = NULL;
4312 
4313 	KASSERT(txd->in);
4314 	ieee80211_release_node(ic, &txd->in->in_ni);
4315 	txd->in = NULL;
4316 }
4317 
4318 void
4319 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4320     struct iwm_rx_data *data)
4321 {
4322 	struct ieee80211com *ic = &sc->sc_ic;
4323 	struct ifnet *ifp = IC2IFP(ic);
4324 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4325 	int idx = cmd_hdr->idx;
4326 	int qid = cmd_hdr->qid;
4327 	struct iwm_tx_ring *ring = &sc->txq[qid];
4328 	struct iwm_tx_data *txd;
4329 
4330 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4331 	    BUS_DMASYNC_POSTREAD);
4332 
4333 	sc->sc_tx_timer = 0;
4334 
4335 	txd = &ring->data[idx];
4336 	if (txd->m == NULL)
4337 		return;
4338 
4339 	iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
4340 	iwm_txd_done(sc, txd);
4341 
4342 	/*
4343 	 * XXX Sometimes we miss Tx completion interrupts.
4344 	 * We cannot check Tx success/failure for affected frames; just free
4345 	 * the associated mbuf and release the associated node reference.
4346 	 */
4347 	while (ring->tail != idx) {
4348 		txd = &ring->data[ring->tail];
4349 		if (txd->m != NULL) {
4350 			DPRINTF(("%s: missed Tx completion: tail=%d idx=%d\n",
4351 			    __func__, ring->tail, idx));
4352 			iwm_txd_done(sc, txd);
4353 			ring->queued--;
4354 		}
4355 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
4356 	}
4357 
4358 	if (--ring->queued < IWM_TX_RING_LOMARK) {
4359 		sc->qfullmsk &= ~(1 << ring->qid);
4360 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
4361 			ifq_clr_oactive(&ifp->if_snd);
4362 			/*
4363 			 * Well, we're in interrupt context, but then again
4364 			 * I guess net80211 does all sorts of stunts in
4365 			 * interrupt context, so maybe this is no biggie.
4366 			 */
4367 			(*ifp->if_start)(ifp);
4368 		}
4369 	}
4370 }
4371 
4372 void
4373 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4374     struct iwm_rx_data *data)
4375 {
4376 	struct ieee80211com *ic = &sc->sc_ic;
4377 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
4378 	uint32_t missed;
4379 
4380 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
4381 	    (ic->ic_state != IEEE80211_S_RUN))
4382 		return;
4383 
4384 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4385 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
4386 
4387 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4388 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
4389 		if (ic->ic_if.if_flags & IFF_DEBUG)
4390 			printf("%s: receiving no beacons from %s; checking if "
4391 			    "this AP is still responding to probe requests\n",
4392 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
4393 		/*
4394 		 * Rather than go directly to scan state, try to send a
4395 		 * directed probe request first. If that fails then the
4396 		 * state machine will drop us into scanning after timing
4397 		 * out waiting for a probe response.
4398 		 */
4399 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
4400 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
4401 	}
4402 
4403 }
4404 
4405 int
4406 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4407 {
4408 	struct iwm_binding_cmd cmd;
4409 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4410 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4411 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
4412 	uint32_t status;
4413 
4414 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
4415 		panic("binding already added");
4416 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
4417 		panic("binding already removed");
4418 
4419 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
4420 		return EINVAL;
4421 
4422 	memset(&cmd, 0, sizeof(cmd));
4423 
4424 	cmd.id_and_color
4425 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4426 	cmd.action = htole32(action);
4427 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4428 
4429 	cmd.macs[0] = htole32(mac_id);
4430 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4431 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4432 
4433 	status = 0;
4434 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4435 	    sizeof(cmd), &cmd, &status);
4436 	if (err == 0 && status != 0)
4437 		err = EIO;
4438 
4439 	return err;
4440 }
4441 
4442 void
4443 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4444     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4445 {
4446 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4447 
4448 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4449 	    ctxt->color));
4450 	cmd->action = htole32(action);
4451 	cmd->apply_time = htole32(apply_time);
4452 }
4453 
4454 void
4455 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4456     struct ieee80211_channel *chan, uint8_t chains_static,
4457     uint8_t chains_dynamic)
4458 {
4459 	struct ieee80211com *ic = &sc->sc_ic;
4460 	uint8_t active_cnt, idle_cnt;
4461 
4462 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4463 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4464 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4465 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4466 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4467 
4468 	/* Set rx the chains */
4469 	idle_cnt = chains_static;
4470 	active_cnt = chains_dynamic;
4471 
4472 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4473 					IWM_PHY_RX_CHAIN_VALID_POS);
4474 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4475 	cmd->rxchain_info |= htole32(active_cnt <<
4476 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4477 
4478 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4479 }
4480 
4481 int
4482 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4483     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4484     uint32_t apply_time)
4485 {
4486 	struct iwm_phy_context_cmd cmd;
4487 
4488 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4489 
4490 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4491 	    chains_static, chains_dynamic);
4492 
4493 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4494 	    sizeof(struct iwm_phy_context_cmd), &cmd);
4495 }
4496 
4497 int
4498 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4499 {
4500 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
4501 	struct iwm_tfd *desc;
4502 	struct iwm_tx_data *txdata;
4503 	struct iwm_device_cmd *cmd;
4504 	struct mbuf *m;
4505 	bus_addr_t paddr;
4506 	uint32_t addr_lo;
4507 	int err = 0, i, paylen, off, s;
4508 	int idx, code, async, group_id;
4509 	size_t hdrlen, datasz;
4510 	uint8_t *data;
4511 	int generation = sc->sc_generation;
4512 
4513 	code = hcmd->id;
4514 	async = hcmd->flags & IWM_CMD_ASYNC;
4515 	idx = ring->cur;
4516 
4517 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
4518 		paylen += hcmd->len[i];
4519 	}
4520 
4521 	/* If this command waits for a response, allocate response buffer. */
4522 	hcmd->resp_pkt = NULL;
4523 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
4524 		uint8_t *resp_buf;
4525 		KASSERT(!async);
4526 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
4527 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
4528 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
4529 			return ENOSPC;
4530 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
4531 		    M_NOWAIT | M_ZERO);
4532 		if (resp_buf == NULL)
4533 			return ENOMEM;
4534 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
4535 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
4536 	} else {
4537 		sc->sc_cmd_resp_pkt[idx] = NULL;
4538 	}
4539 
4540 	s = splnet();
4541 
4542 	desc = &ring->desc[idx];
4543 	txdata = &ring->data[idx];
4544 
4545 	group_id = iwm_cmd_groupid(code);
4546 	if (group_id != 0) {
4547 		hdrlen = sizeof(cmd->hdr_wide);
4548 		datasz = sizeof(cmd->data_wide);
4549 	} else {
4550 		hdrlen = sizeof(cmd->hdr);
4551 		datasz = sizeof(cmd->data);
4552 	}
4553 
4554 	if (paylen > datasz) {
4555 		/* Command is too large to fit in pre-allocated space. */
4556 		size_t totlen = hdrlen + paylen;
4557 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4558 			printf("%s: firmware command too long (%zd bytes)\n",
4559 			    DEVNAME(sc), totlen);
4560 			err = EINVAL;
4561 			goto out;
4562 		}
4563 		m = MCLGETI(NULL, M_DONTWAIT, NULL, totlen);
4564 		if (m == NULL) {
4565 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
4566 			    DEVNAME(sc), totlen);
4567 			err = ENOMEM;
4568 			goto out;
4569 		}
4570 		cmd = mtod(m, struct iwm_device_cmd *);
4571 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4572 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4573 		if (err) {
4574 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
4575 			    DEVNAME(sc), totlen);
4576 			m_freem(m);
4577 			goto out;
4578 		}
4579 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
4580 		paddr = txdata->map->dm_segs[0].ds_addr;
4581 	} else {
4582 		cmd = &ring->cmd[idx];
4583 		paddr = txdata->cmd_paddr;
4584 	}
4585 
4586 	if (group_id != 0) {
4587 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4588 		cmd->hdr_wide.group_id = group_id;
4589 		cmd->hdr_wide.qid = ring->qid;
4590 		cmd->hdr_wide.idx = idx;
4591 		cmd->hdr_wide.length = htole16(paylen);
4592 		cmd->hdr_wide.version = iwm_cmd_version(code);
4593 		data = cmd->data_wide;
4594 	} else {
4595 		cmd->hdr.code = code;
4596 		cmd->hdr.flags = 0;
4597 		cmd->hdr.qid = ring->qid;
4598 		cmd->hdr.idx = idx;
4599 		data = cmd->data;
4600 	}
4601 
4602 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
4603 		if (hcmd->len[i] == 0)
4604 			continue;
4605 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4606 		off += hcmd->len[i];
4607 	}
4608 	KASSERT(off == paylen);
4609 
4610 	/* lo field is not aligned */
4611 	addr_lo = htole32((uint32_t)paddr);
4612 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4613 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
4614 	    | ((hdrlen + paylen) << 4));
4615 	desc->num_tbs = 1;
4616 
4617 	if (paylen > datasz) {
4618 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
4619 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
4620 	} else {
4621 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4622 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4623 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
4624 	}
4625 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4626 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4627 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
4628 
4629 	/*
4630 	 * Wake up the NIC to make sure that the firmware will see the host
4631 	 * command - we will let the NIC sleep once all the host commands
4632 	 * returned. This needs to be done only on 7000 family NICs.
4633 	 */
4634 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
4635 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
4636 			err = EBUSY;
4637 			goto out;
4638 		}
4639 	}
4640 
4641 #if 0
4642 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4643 #endif
4644 	/* Kick command ring. */
4645 	ring->queued++;
4646 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4647 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4648 
4649 	if (!async) {
4650 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
4651 		if (err == 0) {
4652 			/* if hardware is no longer up, return error */
4653 			if (generation != sc->sc_generation) {
4654 				err = ENXIO;
4655 				goto out;
4656 			}
4657 
4658 			/* Response buffer will be freed in iwm_free_resp(). */
4659 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
4660 			sc->sc_cmd_resp_pkt[idx] = NULL;
4661 		} else if (generation == sc->sc_generation) {
4662 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
4663 			    sc->sc_cmd_resp_len[idx]);
4664 			sc->sc_cmd_resp_pkt[idx] = NULL;
4665 		}
4666 	}
4667  out:
4668 	splx(s);
4669 
4670 	return err;
4671 }
4672 
4673 int
4674 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4675     uint16_t len, const void *data)
4676 {
4677 	struct iwm_host_cmd cmd = {
4678 		.id = id,
4679 		.len = { len, },
4680 		.data = { data, },
4681 		.flags = flags,
4682 	};
4683 
4684 	return iwm_send_cmd(sc, &cmd);
4685 }
4686 
4687 int
4688 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4689     uint32_t *status)
4690 {
4691 	struct iwm_rx_packet *pkt;
4692 	struct iwm_cmd_response *resp;
4693 	int err, resp_len;
4694 
4695 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
4696 	cmd->flags |= IWM_CMD_WANT_RESP;
4697 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
4698 
4699 	err = iwm_send_cmd(sc, cmd);
4700 	if (err)
4701 		return err;
4702 
4703 	pkt = cmd->resp_pkt;
4704 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
4705 		return EIO;
4706 
4707 	resp_len = iwm_rx_packet_payload_len(pkt);
4708 	if (resp_len != sizeof(*resp)) {
4709 		iwm_free_resp(sc, cmd);
4710 		return EIO;
4711 	}
4712 
4713 	resp = (void *)pkt->data;
4714 	*status = le32toh(resp->status);
4715 	iwm_free_resp(sc, cmd);
4716 	return err;
4717 }
4718 
4719 int
4720 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4721     const void *data, uint32_t *status)
4722 {
4723 	struct iwm_host_cmd cmd = {
4724 		.id = id,
4725 		.len = { len, },
4726 		.data = { data, },
4727 	};
4728 
4729 	return iwm_send_cmd_status(sc, &cmd, status);
4730 }
4731 
4732 void
4733 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4734 {
4735 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
4736 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
4737 	hcmd->resp_pkt = NULL;
4738 }
4739 
4740 void
4741 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
4742 {
4743 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
4744 	struct iwm_tx_data *data;
4745 
4746 	if (qid != sc->cmdqid) {
4747 		return;	/* Not a command ack. */
4748 	}
4749 
4750 	data = &ring->data[idx];
4751 
4752 	if (data->m != NULL) {
4753 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4754 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4755 		bus_dmamap_unload(sc->sc_dmat, data->map);
4756 		m_freem(data->m);
4757 		data->m = NULL;
4758 	}
4759 	wakeup(&ring->desc[idx]);
4760 
4761 	if (ring->queued == 0) {
4762 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
4763 		    DEVNAME(sc), code));
4764 	} else if (--ring->queued == 0) {
4765 		/*
4766 		 * 7000 family NICs are locked while commands are in progress.
4767 		 * All commands are now done so we may unlock the NIC again.
4768 		 */
4769 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4770 			iwm_nic_unlock(sc);
4771 	}
4772 }
4773 
4774 #if 0
4775 /*
4776  * necessary only for block ack mode
4777  */
4778 void
4779 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4780     uint16_t len)
4781 {
4782 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4783 	uint16_t w_val;
4784 
4785 	scd_bc_tbl = sc->sched_dma.vaddr;
4786 
4787 	len += 8; /* magic numbers came naturally from paris */
4788 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4789 		len = roundup(len, 4) / 4;
4790 
4791 	w_val = htole16(sta_id << 12 | len);
4792 
4793 	/* Update TX scheduler. */
4794 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4795 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4796 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4797 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4798 
4799 	/* I really wonder what this is ?!? */
4800 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4801 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4802 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4803 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4804 		    (char *)(void *)sc->sched_dma.vaddr,
4805 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4806 	}
4807 }
4808 #endif
4809 
4810 /*
4811  * Fill in various bit for management frames, and leave them
4812  * unfilled for data frames (firmware takes care of that).
4813  * Return the selected TX rate.
4814  */
4815 const struct iwm_rate *
4816 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4817     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4818 {
4819 	struct ieee80211com *ic = &sc->sc_ic;
4820 	struct ieee80211_node *ni = &in->in_ni;
4821 	const struct iwm_rate *rinfo;
4822 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4823 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
4824 	int ridx, rate_flags;
4825 
4826 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4827 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
4828 
4829 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4830 	    type != IEEE80211_FC0_TYPE_DATA) {
4831 		/* for non-data, use the lowest supported rate */
4832 		ridx = min_ridx;
4833 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4834 	} else if (ic->ic_fixed_mcs != -1) {
4835 		ridx = sc->sc_fixed_ridx;
4836 	} else if (ic->ic_fixed_rate != -1) {
4837 		ridx = sc->sc_fixed_ridx;
4838 	} else if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4839 	    ieee80211_mira_is_probing(&in->in_mn)) {
4840 		/* Keep Tx rate constant while mira is probing. */
4841 		ridx = iwm_mcs2ridx[ni->ni_txmcs];
4842  	} else {
4843 		int i;
4844 		/* Use firmware rateset retry table. */
4845 		tx->initial_rate_index = 0;
4846 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4847 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4848 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
4849 			return &iwm_rates[ridx];
4850 		}
4851 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4852 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4853 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
4854 			if (iwm_rates[i].rate == (ni->ni_txrate &
4855 			    IEEE80211_RATE_VAL)) {
4856 				ridx = i;
4857 				break;
4858 			}
4859 		}
4860 		return &iwm_rates[ridx];
4861 	}
4862 
4863 	rinfo = &iwm_rates[ridx];
4864 	if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
4865 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
4866 	else
4867 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
4868 	if (IWM_RIDX_IS_CCK(ridx))
4869 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4870 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4871 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4872 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4873 		if (ieee80211_node_supports_ht_sgi20(ni))
4874 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
4875 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4876 	} else
4877 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4878 
4879 	return rinfo;
4880 }
4881 
4882 #define TB0_SIZE 16
4883 int
4884 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4885 {
4886 	struct ieee80211com *ic = &sc->sc_ic;
4887 	struct iwm_node *in = (void *)ni;
4888 	struct iwm_tx_ring *ring;
4889 	struct iwm_tx_data *data;
4890 	struct iwm_tfd *desc;
4891 	struct iwm_device_cmd *cmd;
4892 	struct iwm_tx_cmd *tx;
4893 	struct ieee80211_frame *wh;
4894 	struct ieee80211_key *k = NULL;
4895 	const struct iwm_rate *rinfo;
4896 	uint8_t *ivp;
4897 	uint32_t flags;
4898 	u_int hdrlen;
4899 	bus_dma_segment_t *seg;
4900 	uint8_t tid, type;
4901 	int i, totlen, err, pad;
4902 	int hdrlen2, rtsthres = ic->ic_rtsthreshold;
4903 
4904 	wh = mtod(m, struct ieee80211_frame *);
4905 	hdrlen = ieee80211_get_hdrlen(wh);
4906 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4907 
4908 	hdrlen2 = (ieee80211_has_qos(wh)) ?
4909 	    sizeof (struct ieee80211_qosframe) :
4910 	    sizeof (struct ieee80211_frame);
4911 
4912 	tid = 0;
4913 
4914 	/*
4915 	 * Map EDCA categories to Tx data queues.
4916 	 *
4917 	 * We use static data queue assignments even in DQA mode. We do not
4918 	 * need to share Tx queues between stations because we only implement
4919 	 * client mode; the firmware's station table contains only one entry
4920 	 * which represents our access point.
4921 	 *
4922 	 * Tx aggregation will require additional queues (one queue per TID
4923 	 * for which aggregation is enabled) but we do not implement this yet.
4924 	 */
4925 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4926 		ring = &sc->txq[IWM_DQA_MIN_MGMT_QUEUE + ac];
4927 	else
4928 		ring = &sc->txq[ac];
4929 	desc = &ring->desc[ring->cur];
4930 	memset(desc, 0, sizeof(*desc));
4931 	data = &ring->data[ring->cur];
4932 
4933 	cmd = &ring->cmd[ring->cur];
4934 	cmd->hdr.code = IWM_TX_CMD;
4935 	cmd->hdr.flags = 0;
4936 	cmd->hdr.qid = ring->qid;
4937 	cmd->hdr.idx = ring->cur;
4938 
4939 	tx = (void *)cmd->data;
4940 	memset(tx, 0, sizeof(*tx));
4941 
4942 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4943 
4944 #if NBPFILTER > 0
4945 	if (sc->sc_drvbpf != NULL) {
4946 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4947 		uint16_t chan_flags;
4948 
4949 		tap->wt_flags = 0;
4950 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4951 		chan_flags = ni->ni_chan->ic_flags;
4952 		if (ic->ic_curmode != IEEE80211_MODE_11N)
4953 			chan_flags &= ~IEEE80211_CHAN_HT;
4954 		tap->wt_chan_flags = htole16(chan_flags);
4955 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4956 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4957 		    type == IEEE80211_FC0_TYPE_DATA &&
4958 		    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4959 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4960 		} else
4961 			tap->wt_rate = rinfo->rate;
4962 		tap->wt_hwqueue = ac;
4963 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4964 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4965 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4966 
4967 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
4968 		    m, BPF_DIRECTION_OUT);
4969 	}
4970 #endif
4971 	totlen = m->m_pkthdr.len;
4972 
4973 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4974 		k = ieee80211_get_txkey(ic, wh, ni);
4975 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
4976 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
4977 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
4978 				return ENOBUFS;
4979 			/* 802.11 header may have moved. */
4980 			wh = mtod(m, struct ieee80211_frame *);
4981 			totlen = m->m_pkthdr.len;
4982 			k = NULL; /* skip hardware crypto below */
4983 		} else {
4984 			/* HW appends CCMP MIC */
4985 			totlen += IEEE80211_CCMP_HDRLEN;
4986 		}
4987 	}
4988 
4989 	flags = 0;
4990 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4991 		flags |= IWM_TX_CMD_FLG_ACK;
4992 	}
4993 
4994 	if (ni->ni_flags & IEEE80211_NODE_HT)
4995 		rtsthres = ieee80211_mira_get_rts_threshold(&in->in_mn, ic, ni,
4996 		    totlen + IEEE80211_CRC_LEN);
4997 
4998 	if (type == IEEE80211_FC0_TYPE_DATA &&
4999 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5000 	    (totlen + IEEE80211_CRC_LEN > rtsthres ||
5001 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
5002 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
5003 
5004 	tx->sta_id = IWM_STATION_ID;
5005 
5006 	if (type == IEEE80211_FC0_TYPE_MGT) {
5007 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5008 
5009 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
5010 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
5011 			tx->pm_frame_timeout = htole16(3);
5012 		else
5013 			tx->pm_frame_timeout = htole16(2);
5014 	} else {
5015 		tx->pm_frame_timeout = htole16(0);
5016 	}
5017 
5018 	if (hdrlen & 3) {
5019 		/* First segment length must be a multiple of 4. */
5020 		flags |= IWM_TX_CMD_FLG_MH_PAD;
5021 		pad = 4 - (hdrlen & 3);
5022 	} else
5023 		pad = 0;
5024 
5025 	tx->driver_txop = 0;
5026 	tx->next_frame_len = 0;
5027 
5028 	tx->len = htole16(totlen);
5029 	tx->tid_tspec = tid;
5030 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
5031 
5032 	/* Set physical address of "scratch area". */
5033 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
5034 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
5035 
5036 	/* Copy 802.11 header in TX command. */
5037 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
5038 
5039 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
5040 		/* Trim 802.11 header and prepend CCMP IV. */
5041 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
5042 		ivp = mtod(m, u_int8_t *);
5043 		k->k_tsc++;	/* increment the 48-bit PN */
5044 		ivp[0] = k->k_tsc; /* PN0 */
5045 		ivp[1] = k->k_tsc >> 8; /* PN1 */
5046 		ivp[2] = 0;        /* Rsvd */
5047 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
5048 		ivp[4] = k->k_tsc >> 16; /* PN2 */
5049 		ivp[5] = k->k_tsc >> 24; /* PN3 */
5050 		ivp[6] = k->k_tsc >> 32; /* PN4 */
5051 		ivp[7] = k->k_tsc >> 40; /* PN5 */
5052 
5053 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
5054 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
5055 	} else {
5056 		/* Trim 802.11 header. */
5057 		m_adj(m, hdrlen);
5058 		tx->sec_ctl = 0;
5059 	}
5060 
5061 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
5062 
5063 	tx->tx_flags |= htole32(flags);
5064 
5065 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5066 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5067 	if (err && err != EFBIG) {
5068 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5069 		m_freem(m);
5070 		return err;
5071 	}
5072 	if (err) {
5073 		/* Too many DMA segments, linearize mbuf. */
5074 		if (m_defrag(m, M_DONTWAIT)) {
5075 			m_freem(m);
5076 			return ENOBUFS;
5077 		}
5078 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
5079 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5080 		if (err) {
5081 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
5082 			    err);
5083 			m_freem(m);
5084 			return err;
5085 		}
5086 	}
5087 	data->m = m;
5088 	data->in = in;
5089 	data->txmcs = ni->ni_txmcs;
5090 	data->txrate = ni->ni_txrate;
5091 
5092 	/* Fill TX descriptor. */
5093 	desc->num_tbs = 2 + data->map->dm_nsegs;
5094 
5095 	desc->tbs[0].lo = htole32(data->cmd_paddr);
5096 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
5097 	    (TB0_SIZE << 4));
5098 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
5099 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
5100 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
5101 	      + hdrlen + pad - TB0_SIZE) << 4));
5102 
5103 	/* Other DMA segments are for data payload. */
5104 	seg = data->map->dm_segs;
5105 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
5106 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
5107 		desc->tbs[i+2].hi_n_len = \
5108 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
5109 		    | ((seg->ds_len) << 4));
5110 	}
5111 
5112 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
5113 	    BUS_DMASYNC_PREWRITE);
5114 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5115 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5116 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
5117 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5118 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5119 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5120 
5121 #if 0
5122 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
5123 #endif
5124 
5125 	/* Kick TX ring. */
5126 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
5127 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5128 
5129 	/* Mark TX ring as full if we reach a certain threshold. */
5130 	if (++ring->queued > IWM_TX_RING_HIMARK) {
5131 		sc->qfullmsk |= 1 << ring->qid;
5132 	}
5133 
5134 	return 0;
5135 }
5136 
5137 int
5138 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
5139 {
5140 	struct iwm_tx_path_flush_cmd flush_cmd = {
5141 		.queues_ctl = htole32(tfd_queue_msk),
5142 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
5143 	};
5144 	int err;
5145 
5146 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
5147 	    sizeof(flush_cmd), &flush_cmd);
5148 	if (err)
5149                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
5150 	return err;
5151 }
5152 
5153 void
5154 iwm_led_enable(struct iwm_softc *sc)
5155 {
5156 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
5157 }
5158 
5159 void
5160 iwm_led_disable(struct iwm_softc *sc)
5161 {
5162 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
5163 }
5164 
5165 int
5166 iwm_led_is_enabled(struct iwm_softc *sc)
5167 {
5168 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
5169 }
5170 
5171 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
5172 
5173 void
5174 iwm_led_blink_timeout(void *arg)
5175 {
5176 	struct iwm_softc *sc = arg;
5177 
5178 	if (iwm_led_is_enabled(sc))
5179 		iwm_led_disable(sc);
5180 	else
5181 		iwm_led_enable(sc);
5182 
5183 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
5184 }
5185 
5186 void
5187 iwm_led_blink_start(struct iwm_softc *sc)
5188 {
5189 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
5190 	iwm_led_enable(sc);
5191 }
5192 
5193 void
5194 iwm_led_blink_stop(struct iwm_softc *sc)
5195 {
5196 	timeout_del(&sc->sc_led_blink_to);
5197 	iwm_led_disable(sc);
5198 }
5199 
5200 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
5201 
5202 int
5203 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
5204     struct iwm_beacon_filter_cmd *cmd)
5205 {
5206 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
5207 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
5208 }
5209 
5210 void
5211 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
5212     struct iwm_beacon_filter_cmd *cmd)
5213 {
5214 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
5215 }
5216 
5217 int
5218 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
5219 {
5220 	struct iwm_beacon_filter_cmd cmd = {
5221 		IWM_BF_CMD_CONFIG_DEFAULTS,
5222 		.bf_enable_beacon_filter = htole32(1),
5223 		.ba_enable_beacon_abort = htole32(enable),
5224 	};
5225 
5226 	if (!sc->sc_bf.bf_enabled)
5227 		return 0;
5228 
5229 	sc->sc_bf.ba_enabled = enable;
5230 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5231 	return iwm_beacon_filter_send_cmd(sc, &cmd);
5232 }
5233 
5234 void
5235 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
5236     struct iwm_mac_power_cmd *cmd)
5237 {
5238 	struct ieee80211com *ic = &sc->sc_ic;
5239 	struct ieee80211_node *ni = &in->in_ni;
5240 	int dtim_period, dtim_msec, keep_alive;
5241 
5242 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5243 	    in->in_color));
5244 	if (ni->ni_dtimperiod)
5245 		dtim_period = ni->ni_dtimperiod;
5246 	else
5247 		dtim_period = 1;
5248 
5249 	/*
5250 	 * Regardless of power management state the driver must set
5251 	 * keep alive period. FW will use it for sending keep alive NDPs
5252 	 * immediately after association. Check that keep alive period
5253 	 * is at least 3 * DTIM.
5254 	 */
5255 	dtim_msec = dtim_period * ni->ni_intval;
5256 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
5257 	keep_alive = roundup(keep_alive, 1000) / 1000;
5258 	cmd->keep_alive_seconds = htole16(keep_alive);
5259 
5260 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5261 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5262 }
5263 
5264 int
5265 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
5266 {
5267 	int err;
5268 	int ba_enable;
5269 	struct iwm_mac_power_cmd cmd;
5270 
5271 	memset(&cmd, 0, sizeof(cmd));
5272 
5273 	iwm_power_build_cmd(sc, in, &cmd);
5274 
5275 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
5276 	    sizeof(cmd), &cmd);
5277 	if (err != 0)
5278 		return err;
5279 
5280 	ba_enable = !!(cmd.flags &
5281 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5282 	return iwm_update_beacon_abort(sc, in, ba_enable);
5283 }
5284 
5285 int
5286 iwm_power_update_device(struct iwm_softc *sc)
5287 {
5288 	struct iwm_device_power_cmd cmd = { };
5289 	struct ieee80211com *ic = &sc->sc_ic;
5290 
5291 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
5292 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5293 
5294 	return iwm_send_cmd_pdu(sc,
5295 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
5296 }
5297 
5298 int
5299 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
5300 {
5301 	struct iwm_beacon_filter_cmd cmd = {
5302 		IWM_BF_CMD_CONFIG_DEFAULTS,
5303 		.bf_enable_beacon_filter = htole32(1),
5304 	};
5305 	int err;
5306 
5307 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5308 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5309 
5310 	if (err == 0)
5311 		sc->sc_bf.bf_enabled = 1;
5312 
5313 	return err;
5314 }
5315 
5316 int
5317 iwm_disable_beacon_filter(struct iwm_softc *sc)
5318 {
5319 	struct iwm_beacon_filter_cmd cmd;
5320 	int err;
5321 
5322 	memset(&cmd, 0, sizeof(cmd));
5323 
5324 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5325 	if (err == 0)
5326 		sc->sc_bf.bf_enabled = 0;
5327 
5328 	return err;
5329 }
5330 
5331 int
5332 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5333 {
5334 	struct iwm_add_sta_cmd add_sta_cmd;
5335 	int err;
5336 	uint32_t status;
5337 	size_t cmdsize;
5338 	struct ieee80211com *ic = &sc->sc_ic;
5339 
5340 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
5341 		panic("STA already added");
5342 
5343 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5344 
5345 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5346 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
5347 	else
5348 		add_sta_cmd.sta_id = IWM_STATION_ID;
5349 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
5350 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
5351 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
5352 		else
5353 			add_sta_cmd.station_type = IWM_STA_LINK;
5354 	}
5355 	add_sta_cmd.mac_id_n_color
5356 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5357 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5358 		int qid;
5359 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
5360 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
5361 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
5362 		else
5363 			qid = IWM_AUX_QUEUE;
5364 		add_sta_cmd.tfd_queue_msk |= htole32(1 << qid);
5365 	} else if (!update) {
5366 		int ac;
5367 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
5368 			int qid = ac;
5369 			if (isset(sc->sc_enabled_capa,
5370 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
5371 				qid += IWM_DQA_MIN_MGMT_QUEUE;
5372 			add_sta_cmd.tfd_queue_msk |= htole32(1 << qid);
5373 		}
5374 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5375 	}
5376 	add_sta_cmd.add_modify = update ? 1 : 0;
5377 	add_sta_cmd.station_flags_msk
5378 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5379 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
5380 	if (update)
5381 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5382 
5383 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5384 		add_sta_cmd.station_flags_msk
5385 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5386 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5387 
5388 		if (iwm_mimo_enabled(sc)) {
5389 			if (in->in_ni.ni_rxmcs[1] != 0) {
5390 				add_sta_cmd.station_flags |=
5391 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
5392 			}
5393 			if (in->in_ni.ni_rxmcs[2] != 0) {
5394 				add_sta_cmd.station_flags |=
5395 				    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
5396 			}
5397 		}
5398 
5399 		add_sta_cmd.station_flags
5400 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5401 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5402 		case IEEE80211_AMPDU_PARAM_SS_2:
5403 			add_sta_cmd.station_flags
5404 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5405 			break;
5406 		case IEEE80211_AMPDU_PARAM_SS_4:
5407 			add_sta_cmd.station_flags
5408 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5409 			break;
5410 		case IEEE80211_AMPDU_PARAM_SS_8:
5411 			add_sta_cmd.station_flags
5412 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5413 			break;
5414 		case IEEE80211_AMPDU_PARAM_SS_16:
5415 			add_sta_cmd.station_flags
5416 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5417 			break;
5418 		default:
5419 			break;
5420 		}
5421 	}
5422 
5423 	status = IWM_ADD_STA_SUCCESS;
5424 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
5425 		cmdsize = sizeof(add_sta_cmd);
5426 	else
5427 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
5428 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
5429 	    &add_sta_cmd, &status);
5430 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
5431 		err = EIO;
5432 
5433 	return err;
5434 }
5435 
5436 int
5437 iwm_add_aux_sta(struct iwm_softc *sc)
5438 {
5439 	struct iwm_add_sta_cmd cmd;
5440 	int err, qid;
5441 	uint32_t status;
5442 	size_t cmdsize;
5443 
5444 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
5445 		qid = IWM_DQA_AUX_QUEUE;
5446 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
5447 		    IWM_TX_FIFO_MCAST);
5448 	} else {
5449 		qid = IWM_AUX_QUEUE;
5450 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
5451 	}
5452 	if (err)
5453 		return err;
5454 
5455 	memset(&cmd, 0, sizeof(cmd));
5456 	cmd.sta_id = IWM_AUX_STA_ID;
5457 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
5458 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
5459 	cmd.mac_id_n_color =
5460 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5461 	cmd.tfd_queue_msk = htole32(1 << qid);
5462 	cmd.tid_disable_tx = htole16(0xffff);
5463 
5464 	status = IWM_ADD_STA_SUCCESS;
5465 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
5466 		cmdsize = sizeof(cmd);
5467 	else
5468 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
5469 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
5470 	    &status);
5471 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
5472 		err = EIO;
5473 
5474 	return err;
5475 }
5476 
5477 int
5478 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
5479 {
5480 	struct ieee80211com *ic = &sc->sc_ic;
5481 	struct iwm_rm_sta_cmd rm_sta_cmd;
5482 	int err;
5483 
5484 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
5485 		panic("sta already removed");
5486 
5487 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
5488 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
5489 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
5490 	else
5491 		rm_sta_cmd.sta_id = IWM_STATION_ID;
5492 
5493 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
5494 	    &rm_sta_cmd);
5495 
5496 	return err;
5497 }
5498 
5499 uint16_t
5500 iwm_scan_rx_chain(struct iwm_softc *sc)
5501 {
5502 	uint16_t rx_chain;
5503 	uint8_t rx_ant;
5504 
5505 	rx_ant = iwm_fw_valid_rx_ant(sc);
5506 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5507 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5508 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5509 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5510 	return htole16(rx_chain);
5511 }
5512 
5513 uint32_t
5514 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5515 {
5516 	uint32_t tx_ant;
5517 	int i, ind;
5518 
5519 	for (i = 0, ind = sc->sc_scan_last_antenna;
5520 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
5521 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5522 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5523 			sc->sc_scan_last_antenna = ind;
5524 			break;
5525 		}
5526 	}
5527 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5528 
5529 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5530 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5531 				   tx_ant);
5532 	else
5533 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
5534 }
5535 
5536 uint8_t
5537 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5538     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
5539 {
5540 	struct ieee80211com *ic = &sc->sc_ic;
5541 	struct ieee80211_channel *c;
5542 	uint8_t nchan;
5543 
5544 	for (nchan = 0, c = &ic->ic_channels[1];
5545 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5546 	    nchan < sc->sc_capa_n_scan_channels;
5547 	    c++) {
5548 		if (c->ic_flags == 0)
5549 			continue;
5550 
5551 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5552 		chan->iter_count = htole16(1);
5553 		chan->iter_interval = 0;
5554 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5555 		if (n_ssids != 0 && !bgscan)
5556 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
5557 		chan++;
5558 		nchan++;
5559 	}
5560 
5561 	return nchan;
5562 }
5563 
5564 uint8_t
5565 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5566     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
5567 {
5568 	struct ieee80211com *ic = &sc->sc_ic;
5569 	struct ieee80211_channel *c;
5570 	uint8_t nchan;
5571 
5572 	for (nchan = 0, c = &ic->ic_channels[1];
5573 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5574 	    nchan < sc->sc_capa_n_scan_channels;
5575 	    c++) {
5576 		if (c->ic_flags == 0)
5577 			continue;
5578 
5579 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5580 		chan->iter_count = 1;
5581 		chan->iter_interval = htole16(0);
5582 		if (n_ssids != 0 && !bgscan)
5583 			chan->flags = htole32(1 << 0); /* select SSID 0 */
5584 		chan++;
5585 		nchan++;
5586 	}
5587 
5588 	return nchan;
5589 }
5590 
5591 int
5592 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
5593 {
5594 	struct iwm_scan_probe_req preq2;
5595 	int err, i;
5596 
5597 	err = iwm_fill_probe_req(sc, &preq2);
5598 	if (err)
5599 		return err;
5600 
5601 	preq1->mac_header = preq2.mac_header;
5602 	for (i = 0; i < nitems(preq1->band_data); i++)
5603 		preq1->band_data[i] = preq2.band_data[i];
5604 	preq1->common_data = preq2.common_data;
5605 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
5606 	return 0;
5607 }
5608 
5609 int
5610 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5611 {
5612 	struct ieee80211com *ic = &sc->sc_ic;
5613 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5614 	struct ieee80211_rateset *rs;
5615 	size_t remain = sizeof(preq->buf);
5616 	uint8_t *frm, *pos;
5617 
5618 	memset(preq, 0, sizeof(*preq));
5619 
5620 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5621 		return ENOBUFS;
5622 
5623 	/*
5624 	 * Build a probe request frame.  Most of the following code is a
5625 	 * copy & paste of what is done in net80211.
5626 	 */
5627 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5628 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5629 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5630 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5631 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5632 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5633 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5634 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5635 
5636 	frm = (uint8_t *)(wh + 1);
5637 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5638 
5639 	/* Tell the firmware where the MAC header is. */
5640 	preq->mac_header.offset = 0;
5641 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5642 	remain -= frm - (uint8_t *)wh;
5643 
5644 	/* Fill in 2GHz IEs and tell firmware where they are. */
5645 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5646 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5647 		if (remain < 4 + rs->rs_nrates)
5648 			return ENOBUFS;
5649 	} else if (remain < 2 + rs->rs_nrates)
5650 		return ENOBUFS;
5651 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5652 	pos = frm;
5653 	frm = ieee80211_add_rates(frm, rs);
5654 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5655 		frm = ieee80211_add_xrates(frm, rs);
5656 	preq->band_data[0].len = htole16(frm - pos);
5657 	remain -= frm - pos;
5658 
5659 	if (isset(sc->sc_enabled_capa,
5660 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5661 		if (remain < 3)
5662 			return ENOBUFS;
5663 		*frm++ = IEEE80211_ELEMID_DSPARMS;
5664 		*frm++ = 1;
5665 		*frm++ = 0;
5666 		remain -= 3;
5667 	}
5668 
5669 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5670 		/* Fill in 5GHz IEs. */
5671 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5672 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5673 			if (remain < 4 + rs->rs_nrates)
5674 				return ENOBUFS;
5675 		} else if (remain < 2 + rs->rs_nrates)
5676 			return ENOBUFS;
5677 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5678 		pos = frm;
5679 		frm = ieee80211_add_rates(frm, rs);
5680 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5681 			frm = ieee80211_add_xrates(frm, rs);
5682 		preq->band_data[1].len = htole16(frm - pos);
5683 		remain -= frm - pos;
5684 	}
5685 
5686 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
5687 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5688 	pos = frm;
5689 	if (ic->ic_flags & IEEE80211_F_HTON) {
5690 		if (remain < 28)
5691 			return ENOBUFS;
5692 		frm = ieee80211_add_htcaps(frm, ic);
5693 		/* XXX add WME info? */
5694 	}
5695 	preq->common_data.len = htole16(frm - pos);
5696 
5697 	return 0;
5698 }
5699 
5700 int
5701 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
5702 {
5703 	struct ieee80211com *ic = &sc->sc_ic;
5704 	struct iwm_host_cmd hcmd = {
5705 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5706 		.len = { 0, },
5707 		.data = { NULL, },
5708 		.flags = 0,
5709 	};
5710 	struct iwm_scan_req_lmac *req;
5711 	struct iwm_scan_probe_req_v1 *preq;
5712 	size_t req_len;
5713 	int err, async = bgscan;
5714 
5715 	req_len = sizeof(struct iwm_scan_req_lmac) +
5716 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5717 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
5718 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5719 		return ENOMEM;
5720 	req = malloc(req_len, M_DEVBUF,
5721 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
5722 	if (req == NULL)
5723 		return ENOMEM;
5724 
5725 	hcmd.len[0] = (uint16_t)req_len;
5726 	hcmd.data[0] = (void *)req;
5727 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
5728 
5729 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5730 	req->active_dwell = 10;
5731 	req->passive_dwell = 110;
5732 	req->fragmented_dwell = 44;
5733 	req->extended_dwell = 90;
5734 	if (bgscan) {
5735 		req->max_out_time = htole32(120);
5736 		req->suspend_time = htole32(120);
5737 	} else {
5738 		req->max_out_time = htole32(0);
5739 		req->suspend_time = htole32(0);
5740 	}
5741 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5742 	req->rx_chain_select = iwm_scan_rx_chain(sc);
5743 	req->iter_num = htole32(1);
5744 	req->delay = 0;
5745 
5746 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5747 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5748 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5749 	if (ic->ic_des_esslen == 0)
5750 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5751 	else
5752 		req->scan_flags |=
5753 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5754 	if (isset(sc->sc_enabled_capa,
5755 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5756 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5757 
5758 	req->flags = htole32(IWM_PHY_BAND_24);
5759 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5760 		req->flags |= htole32(IWM_PHY_BAND_5);
5761 	req->filter_flags =
5762 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5763 
5764 	/* Tx flags 2 GHz. */
5765 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5766 	    IWM_TX_CMD_FLG_BT_DIS);
5767 	req->tx_cmd[0].rate_n_flags =
5768 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5769 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5770 
5771 	/* Tx flags 5 GHz. */
5772 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5773 	    IWM_TX_CMD_FLG_BT_DIS);
5774 	req->tx_cmd[1].rate_n_flags =
5775 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5776 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5777 
5778 	/* Check if we're doing an active directed scan. */
5779 	if (ic->ic_des_esslen != 0) {
5780 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5781 		req->direct_scan[0].len = ic->ic_des_esslen;
5782 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5783 		    ic->ic_des_esslen);
5784 	}
5785 
5786 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5787 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5788 	    ic->ic_des_esslen != 0, bgscan);
5789 
5790 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
5791 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5792 	    sc->sc_capa_n_scan_channels));
5793 	err = iwm_fill_probe_req_v1(sc, preq);
5794 	if (err) {
5795 		free(req, M_DEVBUF, req_len);
5796 		return err;
5797 	}
5798 
5799 	/* Specify the scan plan: We'll do one iteration. */
5800 	req->schedule[0].iterations = 1;
5801 	req->schedule[0].full_scan_mul = 1;
5802 
5803 	/* Disable EBS. */
5804 	req->channel_opt[0].non_ebs_ratio = 1;
5805 	req->channel_opt[1].non_ebs_ratio = 1;
5806 
5807 	err = iwm_send_cmd(sc, &hcmd);
5808 	free(req, M_DEVBUF, req_len);
5809 	return err;
5810 }
5811 
5812 int
5813 iwm_config_umac_scan(struct iwm_softc *sc)
5814 {
5815 	struct ieee80211com *ic = &sc->sc_ic;
5816 	struct iwm_scan_config *scan_config;
5817 	int err, nchan;
5818 	size_t cmd_size;
5819 	struct ieee80211_channel *c;
5820 	struct iwm_host_cmd hcmd = {
5821 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
5822 		.flags = 0,
5823 	};
5824 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5825 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5826 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5827 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5828 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5829 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5830 	    IWM_SCAN_CONFIG_RATE_54M);
5831 
5832 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5833 
5834 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
5835 	if (scan_config == NULL)
5836 		return ENOMEM;
5837 
5838 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5839 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5840 	scan_config->legacy_rates = htole32(rates |
5841 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5842 
5843 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5844 	scan_config->dwell_active = 10;
5845 	scan_config->dwell_passive = 110;
5846 	scan_config->dwell_fragmented = 44;
5847 	scan_config->dwell_extended = 90;
5848 	scan_config->out_of_channel_time = htole32(0);
5849 	scan_config->suspend_time = htole32(0);
5850 
5851 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5852 
5853 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5854 	scan_config->channel_flags = 0;
5855 
5856 	for (c = &ic->ic_channels[1], nchan = 0;
5857 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5858 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5859 		if (c->ic_flags == 0)
5860 			continue;
5861 		scan_config->channel_array[nchan++] =
5862 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5863 	}
5864 
5865 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5866 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5867 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5868 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5869 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5870 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5871 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5872 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5873 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5874 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5875 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5876 
5877 	hcmd.data[0] = scan_config;
5878 	hcmd.len[0] = cmd_size;
5879 
5880 	err = iwm_send_cmd(sc, &hcmd);
5881 	free(scan_config, M_DEVBUF, cmd_size);
5882 	return err;
5883 }
5884 
5885 int
5886 iwm_umac_scan_size(struct iwm_softc *sc)
5887 {
5888 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
5889 	int tail_size;
5890 
5891 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5892 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
5893 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
5894 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
5895 #ifdef notyet
5896 	else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
5897 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V6;
5898 #endif
5899 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
5900 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
5901 	else
5902 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
5903 
5904 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
5905 	    sc->sc_capa_n_scan_channels + tail_size;
5906 }
5907 
5908 struct iwm_scan_umac_chan_param *
5909 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
5910     struct iwm_scan_req_umac *req)
5911 {
5912 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5913 		return &req->v8.channel;
5914 
5915 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
5916 		return &req->v7.channel;
5917 #ifdef notyet
5918 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
5919 		return &req->v6.channel;
5920 #endif
5921 	return &req->v1.channel;
5922 }
5923 
5924 void *
5925 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
5926 {
5927 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
5928 		return (void *)&req->v8.data;
5929 
5930 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
5931 		return (void *)&req->v7.data;
5932 #ifdef notyet
5933 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_22000)
5934 		return (void *)&req->v6.data;
5935 #endif
5936 	return (void *)&req->v1.data;
5937 
5938 }
5939 
5940 /* adaptive dwell max budget time [TU] for full scan */
5941 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
5942 /* adaptive dwell max budget time [TU] for directed scan */
5943 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
5944 /* adaptive dwell default high band APs number */
5945 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
5946 /* adaptive dwell default low band APs number */
5947 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
5948 /* adaptive dwell default APs number in social channels (1, 6, 11) */
5949 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
5950 
5951 int
5952 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
5953 {
5954 	struct ieee80211com *ic = &sc->sc_ic;
5955 	struct iwm_host_cmd hcmd = {
5956 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
5957 		.len = { 0, },
5958 		.data = { NULL, },
5959 		.flags = 0,
5960 	};
5961 	struct iwm_scan_req_umac *req;
5962 	void *cmd_data, *tail_data;
5963 	struct iwm_scan_req_umac_tail_v2 *tail;
5964 	struct iwm_scan_req_umac_tail_v1 *tailv1;
5965 	struct iwm_scan_umac_chan_param *chanparam;
5966 	size_t req_len;
5967 	int err, async = bgscan;
5968 
5969 	req_len = iwm_umac_scan_size(sc);
5970 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
5971 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
5972 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5973 		return ERANGE;
5974 	req = malloc(req_len, M_DEVBUF,
5975 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
5976 	if (req == NULL)
5977 		return ENOMEM;
5978 
5979 	hcmd.len[0] = (uint16_t)req_len;
5980 	hcmd.data[0] = (void *)req;
5981 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
5982 
5983 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
5984 		req->v7.adwell_default_n_aps_social =
5985 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
5986 		req->v7.adwell_default_n_aps =
5987 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
5988 
5989 		if (ic->ic_des_esslen != 0)
5990 			req->v7.adwell_max_budget =
5991 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
5992 		else
5993 			req->v7.adwell_max_budget =
5994 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
5995 
5996 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5997 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
5998 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
5999 
6000 		if (isset(sc->sc_ucode_api,
6001 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
6002 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
6003 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
6004 		} else {
6005 			req->v7.active_dwell = 10;
6006 			req->v7.passive_dwell = 110;
6007 			req->v7.fragmented_dwell = 44;
6008 		}
6009 	} else {
6010 		/* These timings correspond to iwlwifi's UNASSOC scan. */
6011 		req->v1.active_dwell = 10;
6012 		req->v1.passive_dwell = 110;
6013 		req->v1.fragmented_dwell = 44;
6014 		req->v1.extended_dwell = 90;
6015 	}
6016 
6017 	if (bgscan) {
6018 		const uint32_t timeout = htole32(120);
6019 		if (isset(sc->sc_ucode_api,
6020 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
6021 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6022 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6023 		} else if (isset(sc->sc_ucode_api,
6024 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
6025 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6026 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
6027 		} else {
6028 			req->v1.max_out_time = timeout;
6029 			req->v1.suspend_time = timeout;
6030 		}
6031 	}
6032 
6033 	req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
6034 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
6035 
6036 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
6037 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
6038 	chanparam->count = iwm_umac_scan_fill_channels(sc,
6039 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
6040 	    ic->ic_des_esslen != 0, bgscan);
6041 	chanparam->flags = 0;
6042 
6043 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
6044 	    sc->sc_capa_n_scan_channels;
6045 	tail = tail_data;
6046 	/* tail v1 layout differs in preq and direct_scan member fields. */
6047 	tailv1 = tail_data;
6048 
6049 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
6050 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
6051 
6052 	/* Check if we're doing an active directed scan. */
6053 	if (ic->ic_des_esslen != 0) {
6054 		if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6055 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
6056 			tail->direct_scan[0].len = ic->ic_des_esslen;
6057 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
6058 			    ic->ic_des_esslen);
6059 		} else {
6060 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
6061 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
6062 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
6063 			    ic->ic_des_esslen);
6064 		}
6065 		req->general_flags |=
6066 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
6067 	} else
6068 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
6069 
6070 	if (isset(sc->sc_enabled_capa,
6071 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
6072 		req->general_flags |=
6073 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
6074 
6075 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
6076 		req->general_flags |=
6077 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
6078 	} else {
6079 		req->general_flags |=
6080 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
6081 	}
6082 
6083 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
6084 		err = iwm_fill_probe_req(sc, &tail->preq);
6085 	else
6086 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
6087 	if (err) {
6088 		free(req, M_DEVBUF, req_len);
6089 		return err;
6090 	}
6091 
6092 	/* Specify the scan plan: We'll do one iteration. */
6093 	tail->schedule[0].interval = 0;
6094 	tail->schedule[0].iter_count = 1;
6095 
6096 	err = iwm_send_cmd(sc, &hcmd);
6097 	free(req, M_DEVBUF, req_len);
6098 	return err;
6099 }
6100 
6101 uint8_t
6102 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6103 {
6104 	int i;
6105 	uint8_t rval;
6106 
6107 	for (i = 0; i < rs->rs_nrates; i++) {
6108 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6109 		if (rval == iwm_rates[ridx].rate)
6110 			return rs->rs_rates[i];
6111 	}
6112 
6113 	return 0;
6114 }
6115 
6116 int
6117 iwm_rval2ridx(int rval)
6118 {
6119 	int ridx;
6120 
6121 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
6122 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
6123 			continue;
6124 		if (rval == iwm_rates[ridx].rate)
6125 			break;
6126 	}
6127 
6128        return ridx;
6129 }
6130 
6131 void
6132 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
6133     int *ofdm_rates)
6134 {
6135 	struct ieee80211_node *ni = &in->in_ni;
6136 	struct ieee80211_rateset *rs = &ni->ni_rates;
6137 	int lowest_present_ofdm = -1;
6138 	int lowest_present_cck = -1;
6139 	uint8_t cck = 0;
6140 	uint8_t ofdm = 0;
6141 	int i;
6142 
6143 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6144 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6145 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
6146 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6147 				continue;
6148 			cck |= (1 << i);
6149 			if (lowest_present_cck == -1 || lowest_present_cck > i)
6150 				lowest_present_cck = i;
6151 		}
6152 	}
6153 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
6154 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6155 			continue;
6156 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
6157 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6158 			lowest_present_ofdm = i;
6159 	}
6160 
6161 	/*
6162 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
6163 	 * variables. This isn't sufficient though, as there might not
6164 	 * be all the right rates in the bitmap. E.g. if the only basic
6165 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6166 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6167 	 *
6168 	 *    [...] a STA responding to a received frame shall transmit
6169 	 *    its Control Response frame [...] at the highest rate in the
6170 	 *    BSSBasicRateSet parameter that is less than or equal to the
6171 	 *    rate of the immediately previous frame in the frame exchange
6172 	 *    sequence ([...]) and that is of the same modulation class
6173 	 *    ([...]) as the received frame. If no rate contained in the
6174 	 *    BSSBasicRateSet parameter meets these conditions, then the
6175 	 *    control frame sent in response to a received frame shall be
6176 	 *    transmitted at the highest mandatory rate of the PHY that is
6177 	 *    less than or equal to the rate of the received frame, and
6178 	 *    that is of the same modulation class as the received frame.
6179 	 *
6180 	 * As a consequence, we need to add all mandatory rates that are
6181 	 * lower than all of the basic rates to these bitmaps.
6182 	 */
6183 
6184 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
6185 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
6186 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
6187 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
6188 	/* 6M already there or needed so always add */
6189 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
6190 
6191 	/*
6192 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6193 	 * Note, however:
6194 	 *  - if no CCK rates are basic, it must be ERP since there must
6195 	 *    be some basic rates at all, so they're OFDM => ERP PHY
6196 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
6197 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6198 	 *  - if 5.5M is basic, 1M and 2M are mandatory
6199 	 *  - if 2M is basic, 1M is mandatory
6200 	 *  - if 1M is basic, that's the only valid ACK rate.
6201 	 * As a consequence, it's not as complicated as it sounds, just add
6202 	 * any lower rates to the ACK rate bitmap.
6203 	 */
6204 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
6205 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
6206 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
6207 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
6208 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
6209 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
6210 	/* 1M already there or needed so always add */
6211 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
6212 
6213 	*cck_rates = cck;
6214 	*ofdm_rates = ofdm;
6215 }
6216 
6217 void
6218 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
6219     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
6220 {
6221 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6222 	struct ieee80211com *ic = &sc->sc_ic;
6223 	struct ieee80211_node *ni = ic->ic_bss;
6224 	int cck_ack_rates, ofdm_ack_rates;
6225 	int i;
6226 
6227 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
6228 	    in->in_color));
6229 	cmd->action = htole32(action);
6230 
6231 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6232 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
6233 	else if (ic->ic_opmode == IEEE80211_M_STA)
6234 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
6235 	else
6236 		panic("unsupported operating mode %d\n", ic->ic_opmode);
6237 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
6238 
6239 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
6240 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6241 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6242 		return;
6243 	}
6244 
6245 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
6246 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6247 	cmd->cck_rates = htole32(cck_ack_rates);
6248 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6249 
6250 	cmd->cck_short_preamble
6251 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6252 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
6253 	cmd->short_slot
6254 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6255 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
6256 
6257 	for (i = 0; i < EDCA_NUM_AC; i++) {
6258 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
6259 		int txf = iwm_ac_to_tx_fifo[i];
6260 
6261 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
6262 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
6263 		cmd->ac[txf].aifsn = ac->ac_aifsn;
6264 		cmd->ac[txf].fifos_mask = (1 << txf);
6265 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
6266 	}
6267 	if (ni->ni_flags & IEEE80211_NODE_QOS)
6268 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
6269 
6270 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6271 		enum ieee80211_htprot htprot =
6272 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
6273 		switch (htprot) {
6274 		case IEEE80211_HTPROT_NONE:
6275 			break;
6276 		case IEEE80211_HTPROT_NONMEMBER:
6277 		case IEEE80211_HTPROT_NONHT_MIXED:
6278 			cmd->protection_flags |=
6279 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
6280 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
6281 				cmd->protection_flags |=
6282 				    htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
6283 			break;
6284 		case IEEE80211_HTPROT_20MHZ:
6285 			if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
6286 				/* XXX ... and if our channel is 40 MHz ... */
6287 				cmd->protection_flags |=
6288 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
6289 				    IWM_MAC_PROT_FLG_FAT_PROT);
6290 				if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
6291 					cmd->protection_flags |= htole32(
6292 					    IWM_MAC_PROT_FLG_SELF_CTS_EN);
6293 			}
6294 			break;
6295 		default:
6296 			break;
6297 		}
6298 
6299 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
6300 	}
6301 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6302 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
6303 
6304 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
6305 #undef IWM_EXP2
6306 }
6307 
6308 void
6309 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
6310     struct iwm_mac_data_sta *sta, int assoc)
6311 {
6312 	struct ieee80211_node *ni = &in->in_ni;
6313 	uint32_t dtim_off;
6314 	uint64_t tsf;
6315 
6316 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
6317 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
6318 	tsf = letoh64(tsf);
6319 
6320 	sta->is_assoc = htole32(assoc);
6321 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
6322 	sta->dtim_tsf = htole64(tsf + dtim_off);
6323 	sta->bi = htole32(ni->ni_intval);
6324 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
6325 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
6326 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
6327 	sta->listen_interval = htole32(10);
6328 	sta->assoc_id = htole32(ni->ni_associd);
6329 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
6330 }
6331 
6332 int
6333 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
6334     int assoc)
6335 {
6336 	struct ieee80211com *ic = &sc->sc_ic;
6337 	struct ieee80211_node *ni = &in->in_ni;
6338 	struct iwm_mac_ctx_cmd cmd;
6339 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
6340 
6341 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
6342 		panic("MAC already added");
6343 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
6344 		panic("MAC already removed");
6345 
6346 	memset(&cmd, 0, sizeof(cmd));
6347 
6348 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
6349 
6350 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6351 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
6352 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
6353 		    IWM_MAC_FILTER_ACCEPT_GRP |
6354 		    IWM_MAC_FILTER_IN_BEACON |
6355 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
6356 		    IWM_MAC_FILTER_IN_CRC32);
6357 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
6358 		/*
6359 		 * Allow beacons to pass through as long as we are not
6360 		 * associated or we do not have dtim period information.
6361 		 */
6362 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
6363 	else
6364 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
6365 
6366 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6367 }
6368 
6369 int
6370 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
6371 {
6372 	struct iwm_time_quota_cmd cmd;
6373 	int i, idx, num_active_macs, quota, quota_rem;
6374 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
6375 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
6376 	uint16_t id;
6377 
6378 	memset(&cmd, 0, sizeof(cmd));
6379 
6380 	/* currently, PHY ID == binding ID */
6381 	if (in && in->in_phyctxt) {
6382 		id = in->in_phyctxt->id;
6383 		KASSERT(id < IWM_MAX_BINDINGS);
6384 		colors[id] = in->in_phyctxt->color;
6385 		if (running)
6386 			n_ifs[id] = 1;
6387 	}
6388 
6389 	/*
6390 	 * The FW's scheduling session consists of
6391 	 * IWM_MAX_QUOTA fragments. Divide these fragments
6392 	 * equally between all the bindings that require quota
6393 	 */
6394 	num_active_macs = 0;
6395 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
6396 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
6397 		num_active_macs += n_ifs[i];
6398 	}
6399 
6400 	quota = 0;
6401 	quota_rem = 0;
6402 	if (num_active_macs) {
6403 		quota = IWM_MAX_QUOTA / num_active_macs;
6404 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
6405 	}
6406 
6407 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
6408 		if (colors[i] < 0)
6409 			continue;
6410 
6411 		cmd.quotas[idx].id_and_color =
6412 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
6413 
6414 		if (n_ifs[i] <= 0) {
6415 			cmd.quotas[idx].quota = htole32(0);
6416 			cmd.quotas[idx].max_duration = htole32(0);
6417 		} else {
6418 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
6419 			cmd.quotas[idx].max_duration = htole32(0);
6420 		}
6421 		idx++;
6422 	}
6423 
6424 	/* Give the remainder of the session to the first binding */
6425 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
6426 
6427 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
6428 	    sizeof(cmd), &cmd);
6429 }
6430 
6431 void
6432 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
6433 {
6434 	int s = splnet();
6435 
6436 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
6437 		splx(s);
6438 		return;
6439 	}
6440 
6441 	refcnt_take(&sc->task_refs);
6442 	if (!task_add(taskq, task))
6443 		refcnt_rele_wake(&sc->task_refs);
6444 	splx(s);
6445 }
6446 
6447 void
6448 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
6449 {
6450 	if (task_del(taskq, task))
6451 		refcnt_rele(&sc->task_refs);
6452 }
6453 
6454 int
6455 iwm_scan(struct iwm_softc *sc)
6456 {
6457 	struct ieee80211com *ic = &sc->sc_ic;
6458 	struct ifnet *ifp = IC2IFP(ic);
6459 	int err;
6460 
6461 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
6462 		err = iwm_scan_abort(sc);
6463 		if (err) {
6464 			printf("%s: could not abort background scan\n",
6465 			    DEVNAME(sc));
6466 			return err;
6467 		}
6468 	}
6469 
6470 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6471 		err = iwm_umac_scan(sc, 0);
6472 	else
6473 		err = iwm_lmac_scan(sc, 0);
6474 	if (err) {
6475 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6476 		return err;
6477 	}
6478 
6479 	/*
6480 	 * The current mode might have been fixed during association.
6481 	 * Ensure all channels get scanned.
6482 	 */
6483 	if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
6484 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
6485 
6486 	sc->sc_flags |= IWM_FLAG_SCANNING;
6487 	if (ifp->if_flags & IFF_DEBUG)
6488 		printf("%s: %s -> %s\n", ifp->if_xname,
6489 		    ieee80211_state_name[ic->ic_state],
6490 		    ieee80211_state_name[IEEE80211_S_SCAN]);
6491 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
6492 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
6493 		ieee80211_node_cleanup(ic, ic->ic_bss);
6494 	}
6495 	ic->ic_state = IEEE80211_S_SCAN;
6496 	iwm_led_blink_start(sc);
6497 	wakeup(&ic->ic_state); /* wake iwm_init() */
6498 
6499 	return 0;
6500 }
6501 
6502 int
6503 iwm_bgscan(struct ieee80211com *ic)
6504 {
6505 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6506 	int err;
6507 
6508 	if (sc->sc_flags & IWM_FLAG_SCANNING)
6509 		return 0;
6510 
6511 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6512 		err = iwm_umac_scan(sc, 1);
6513 	else
6514 		err = iwm_lmac_scan(sc, 1);
6515 	if (err) {
6516 		printf("%s: could not initiate scan\n", DEVNAME(sc));
6517 		return err;
6518 	}
6519 
6520 	sc->sc_flags |= IWM_FLAG_BGSCAN;
6521 	return 0;
6522 }
6523 
6524 int
6525 iwm_umac_scan_abort(struct iwm_softc *sc)
6526 {
6527 	struct iwm_umac_scan_abort cmd = { 0 };
6528 
6529 	return iwm_send_cmd_pdu(sc,
6530 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
6531 	    0, sizeof(cmd), &cmd);
6532 }
6533 
6534 int
6535 iwm_lmac_scan_abort(struct iwm_softc *sc)
6536 {
6537 	struct iwm_host_cmd cmd = {
6538 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
6539 	};
6540 	int err, status;
6541 
6542 	err = iwm_send_cmd_status(sc, &cmd, &status);
6543 	if (err)
6544 		return err;
6545 
6546 	if (status != IWM_CAN_ABORT_STATUS) {
6547 		/*
6548 		 * The scan abort will return 1 for success or
6549 		 * 2 for "failure".  A failure condition can be
6550 		 * due to simply not being in an active scan which
6551 		 * can occur if we send the scan abort before the
6552 		 * microcode has notified us that a scan is completed.
6553 		 */
6554 		return EBUSY;
6555 	}
6556 
6557 	return 0;
6558 }
6559 
6560 int
6561 iwm_scan_abort(struct iwm_softc *sc)
6562 {
6563 	int err;
6564 
6565 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6566 		err = iwm_umac_scan_abort(sc);
6567 	else
6568 		err = iwm_lmac_scan_abort(sc);
6569 
6570 	if (err == 0)
6571 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
6572 	return err;
6573 }
6574 
6575 int
6576 iwm_auth(struct iwm_softc *sc)
6577 {
6578 	struct ieee80211com *ic = &sc->sc_ic;
6579 	struct iwm_node *in = (void *)ic->ic_bss;
6580 	uint32_t duration;
6581 	int generation = sc->sc_generation, err;
6582 
6583 	splassert(IPL_NET);
6584 
6585 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6586 		sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
6587 	else
6588 		sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
6589 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
6590 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
6591 	if (err) {
6592 		printf("%s: could not update PHY context (error %d)\n",
6593 		    DEVNAME(sc), err);
6594 		return err;
6595 	}
6596 	in->in_phyctxt = &sc->sc_phyctxt[0];
6597 
6598 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
6599 	if (err) {
6600 		printf("%s: could not add MAC context (error %d)\n",
6601 		    DEVNAME(sc), err);
6602 		return err;
6603  	}
6604 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
6605 
6606 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
6607 	if (err) {
6608 		printf("%s: could not add binding (error %d)\n",
6609 		    DEVNAME(sc), err);
6610 		goto rm_mac_ctxt;
6611 	}
6612 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
6613 
6614 	err = iwm_add_sta_cmd(sc, in, 0);
6615 	if (err) {
6616 		printf("%s: could not add sta (error %d)\n",
6617 		    DEVNAME(sc), err);
6618 		goto rm_binding;
6619 	}
6620 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
6621 
6622 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6623 		return 0;
6624 
6625 	/*
6626 	 * Prevent the FW from wandering off channel during association
6627 	 * by "protecting" the session with a time event.
6628 	 */
6629 	if (in->in_ni.ni_intval)
6630 		duration = in->in_ni.ni_intval * 2;
6631 	else
6632 		duration = IEEE80211_DUR_TU;
6633 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
6634 
6635 	return 0;
6636 
6637 rm_binding:
6638 	if (generation == sc->sc_generation) {
6639 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
6640 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
6641 	}
6642 rm_mac_ctxt:
6643 	if (generation == sc->sc_generation) {
6644 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
6645 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
6646 	}
6647 	return err;
6648 }
6649 
6650 int
6651 iwm_deauth(struct iwm_softc *sc)
6652 {
6653 	struct ieee80211com *ic = &sc->sc_ic;
6654 	struct iwm_node *in = (void *)ic->ic_bss;
6655 	int ac, tfd_queue_msk, err;
6656 
6657 	splassert(IPL_NET);
6658 
6659 	iwm_unprotect_session(sc, in);
6660 
6661 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
6662 		err = iwm_rm_sta_cmd(sc, in);
6663 		if (err) {
6664 			printf("%s: could not remove STA (error %d)\n",
6665 			    DEVNAME(sc), err);
6666 			return err;
6667 		}
6668 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
6669 	}
6670 
6671 	tfd_queue_msk = 0;
6672 	for (ac = 0; ac < EDCA_NUM_AC; ac++) {
6673 		int qid = ac;
6674 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6675 			qid += IWM_DQA_MIN_MGMT_QUEUE;
6676 		tfd_queue_msk |= htole32(1 << qid);
6677 	}
6678 
6679 	err = iwm_flush_tx_path(sc, tfd_queue_msk);
6680 	if (err) {
6681 		printf("%s: could not flush Tx path (error %d)\n",
6682 		    DEVNAME(sc), err);
6683 		return err;
6684 	}
6685 
6686 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
6687 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
6688 		if (err) {
6689 			printf("%s: could not remove binding (error %d)\n",
6690 			    DEVNAME(sc), err);
6691 			return err;
6692 		}
6693 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
6694 	}
6695 
6696 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
6697 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
6698 		if (err) {
6699 			printf("%s: could not remove MAC context (error %d)\n",
6700 			    DEVNAME(sc), err);
6701 			return err;
6702 		}
6703 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
6704 	}
6705 
6706 	return 0;
6707 }
6708 
6709 int
6710 iwm_assoc(struct iwm_softc *sc)
6711 {
6712 	struct ieee80211com *ic = &sc->sc_ic;
6713 	struct iwm_node *in = (void *)ic->ic_bss;
6714 	int update_sta = (sc->sc_flags & IWM_FLAG_STA_ACTIVE);
6715 	int err;
6716 
6717 	splassert(IPL_NET);
6718 
6719 	err = iwm_add_sta_cmd(sc, in, update_sta);
6720 	if (err) {
6721 		printf("%s: could not %s STA (error %d)\n",
6722 		    DEVNAME(sc), update_sta ? "update" : "add", err);
6723 		return err;
6724 	}
6725 
6726 	return 0;
6727 }
6728 
6729 int
6730 iwm_disassoc(struct iwm_softc *sc)
6731 {
6732 	struct ieee80211com *ic = &sc->sc_ic;
6733 	struct iwm_node *in = (void *)ic->ic_bss;
6734 	int err;
6735 
6736 	splassert(IPL_NET);
6737 
6738 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
6739 		err = iwm_rm_sta_cmd(sc, in);
6740 		if (err) {
6741 			printf("%s: could not remove STA (error %d)\n",
6742 			    DEVNAME(sc), err);
6743 			return err;
6744 		}
6745 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
6746 	}
6747 
6748 	return 0;
6749 }
6750 
6751 int
6752 iwm_run(struct iwm_softc *sc)
6753 {
6754 	struct ieee80211com *ic = &sc->sc_ic;
6755 	struct iwm_node *in = (void *)ic->ic_bss;
6756 	int err;
6757 
6758 	splassert(IPL_NET);
6759 
6760 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6761 		/* Add a MAC context and a sniffing STA. */
6762 		err = iwm_auth(sc);
6763 		if (err)
6764 			return err;
6765 	}
6766 
6767 	/* Configure Rx chains for MIMO. */
6768 	if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
6769 	    (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
6770 	    iwm_mimo_enabled(sc)) {
6771 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
6772 		    2, 2, IWM_FW_CTXT_ACTION_MODIFY, 0);
6773 		if (err) {
6774 			printf("%s: failed to update PHY\n",
6775 			    DEVNAME(sc));
6776 			return err;
6777 		}
6778 	}
6779 
6780 	/* Update STA again, for HT-related settings such as MIMO. */
6781 	err = iwm_add_sta_cmd(sc, in, 1);
6782 	if (err) {
6783 		printf("%s: could not update STA (error %d)\n",
6784 		    DEVNAME(sc), err);
6785 		return err;
6786 	}
6787 
6788 	/* We have now been assigned an associd by the AP. */
6789 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6790 	if (err) {
6791 		printf("%s: failed to update MAC\n", DEVNAME(sc));
6792 		return err;
6793 	}
6794 
6795 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
6796 	if (err) {
6797 		printf("%s: could not set sf full on (error %d)\n",
6798 		    DEVNAME(sc), err);
6799 		return err;
6800 	}
6801 
6802 	err = iwm_allow_mcast(sc);
6803 	if (err) {
6804 		printf("%s: could not allow mcast (error %d)\n",
6805 		    DEVNAME(sc), err);
6806 		return err;
6807 	}
6808 
6809 	err = iwm_power_update_device(sc);
6810 	if (err) {
6811 		printf("%s: could not send power command (error %d)\n",
6812 		    DEVNAME(sc), err);
6813 		return err;
6814 	}
6815 #ifdef notyet
6816 	/*
6817 	 * Disabled for now. Default beacon filter settings
6818 	 * prevent net80211 from getting ERP and HT protection
6819 	 * updates from beacons.
6820 	 */
6821 	err = iwm_enable_beacon_filter(sc, in);
6822 	if (err) {
6823 		printf("%s: could not enable beacon filter\n",
6824 		    DEVNAME(sc));
6825 		return err;
6826 	}
6827 #endif
6828 	err = iwm_power_mac_update_mode(sc, in);
6829 	if (err) {
6830 		printf("%s: could not update MAC power (error %d)\n",
6831 		    DEVNAME(sc), err);
6832 		return err;
6833 	}
6834 
6835 	err = iwm_update_quotas(sc, in, 1);
6836 	if (err) {
6837 		printf("%s: could not update quotas (error %d)\n",
6838 		    DEVNAME(sc), err);
6839 		return err;
6840 	}
6841 
6842 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6843 	ieee80211_mira_node_init(&in->in_mn);
6844 
6845 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6846 		iwm_led_blink_start(sc);
6847 		return 0;
6848 	}
6849 
6850 	/* Start at lowest available bit-rate, AMRR will raise. */
6851 	in->in_ni.ni_txrate = 0;
6852 	in->in_ni.ni_txmcs = 0;
6853 	in->chosen_txrate = 0;
6854 	in->chosen_txmcs = 0;
6855 	iwm_setrates(in, 0);
6856 
6857 	timeout_add_msec(&sc->sc_calib_to, 500);
6858 	iwm_led_enable(sc);
6859 
6860 	return 0;
6861 }
6862 
6863 int
6864 iwm_run_stop(struct iwm_softc *sc)
6865 {
6866 	struct ieee80211com *ic = &sc->sc_ic;
6867 	struct iwm_node *in = (void *)ic->ic_bss;
6868 	int err;
6869 
6870 	splassert(IPL_NET);
6871 
6872 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6873 		iwm_led_blink_stop(sc);
6874 
6875 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
6876 	if (err)
6877 		return err;
6878 
6879 	iwm_disable_beacon_filter(sc);
6880 
6881 	err = iwm_update_quotas(sc, in, 0);
6882 	if (err) {
6883 		printf("%s: could not update quotas (error %d)\n",
6884 		    DEVNAME(sc), err);
6885 		return err;
6886 	}
6887 
6888 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
6889 	if (err) {
6890 		printf("%s: failed to update MAC\n", DEVNAME(sc));
6891 		return err;
6892 	}
6893 
6894 	/* Reset Tx chains in case MIMO was enabled. */
6895 	if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
6896 	    iwm_mimo_enabled(sc)) {
6897 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
6898 		    IWM_FW_CTXT_ACTION_MODIFY, 0);
6899 		if (err) {
6900 			printf("%s: failed to update PHY\n", DEVNAME(sc));
6901 			return err;
6902 		}
6903 	}
6904 
6905 	return 0;
6906 }
6907 
6908 struct ieee80211_node *
6909 iwm_node_alloc(struct ieee80211com *ic)
6910 {
6911 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
6912 }
6913 
6914 int
6915 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
6916     struct ieee80211_key *k)
6917 {
6918 	struct iwm_softc *sc = ic->ic_softc;
6919 	struct iwm_add_sta_key_cmd_v1 cmd;
6920 
6921 	memset(&cmd, 0, sizeof(cmd));
6922 
6923 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
6924 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
6925 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
6926 	    IWM_STA_KEY_FLG_KEYID_MSK));
6927 	if (k->k_flags & IEEE80211_KEY_GROUP)
6928 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
6929 
6930 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6931 	cmd.common.key_offset = 0;
6932 	cmd.common.sta_id = IWM_STATION_ID;
6933 
6934 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
6935 	    sizeof(cmd), &cmd);
6936 }
6937 
6938 int
6939 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6940     struct ieee80211_key *k)
6941 {
6942 	struct iwm_softc *sc = ic->ic_softc;
6943 	struct iwm_add_sta_key_cmd cmd;
6944 
6945 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6946 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
6947 		/* Fallback to software crypto for other ciphers. */
6948 		return (ieee80211_set_key(ic, ni, k));
6949 	}
6950 
6951 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
6952 		return iwm_set_key_v1(ic, ni, k);
6953 
6954 	memset(&cmd, 0, sizeof(cmd));
6955 
6956 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
6957 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
6958 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
6959 	    IWM_STA_KEY_FLG_KEYID_MSK));
6960 	if (k->k_flags & IEEE80211_KEY_GROUP)
6961 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
6962 
6963 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6964 	cmd.common.key_offset = 0;
6965 	cmd.common.sta_id = IWM_STATION_ID;
6966 
6967 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
6968 
6969 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
6970 	    sizeof(cmd), &cmd);
6971 }
6972 
6973 void
6974 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
6975     struct ieee80211_key *k)
6976 {
6977 	struct iwm_softc *sc = ic->ic_softc;
6978 	struct iwm_add_sta_key_cmd_v1 cmd;
6979 
6980 	memset(&cmd, 0, sizeof(cmd));
6981 
6982 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
6983 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
6984 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
6985 	    IWM_STA_KEY_FLG_KEYID_MSK));
6986 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
6987 	cmd.common.key_offset = 0;
6988 	cmd.common.sta_id = IWM_STATION_ID;
6989 
6990 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
6991 }
6992 
6993 void
6994 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6995     struct ieee80211_key *k)
6996 {
6997 	struct iwm_softc *sc = ic->ic_softc;
6998 	struct iwm_add_sta_key_cmd cmd;
6999 
7000 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
7001 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
7002 		/* Fallback to software crypto for other ciphers. */
7003                 ieee80211_delete_key(ic, ni, k);
7004 		return;
7005 	}
7006 
7007 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
7008 		return iwm_delete_key_v1(ic, ni, k);
7009 
7010 	memset(&cmd, 0, sizeof(cmd));
7011 
7012 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
7013 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
7014 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
7015 	    IWM_STA_KEY_FLG_KEYID_MSK));
7016 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7017 	cmd.common.key_offset = 0;
7018 	cmd.common.sta_id = IWM_STATION_ID;
7019 
7020 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
7021 }
7022 
7023 void
7024 iwm_calib_timeout(void *arg)
7025 {
7026 	struct iwm_softc *sc = arg;
7027 	struct ieee80211com *ic = &sc->sc_ic;
7028 	struct iwm_node *in = (void *)ic->ic_bss;
7029 	struct ieee80211_node *ni = &in->in_ni;
7030 	int s;
7031 
7032 	s = splnet();
7033 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
7034 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
7035 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
7036 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
7037 		/*
7038 		 * If AMRR has chosen a new TX rate we must update
7039 		 * the firwmare's LQ rate table from process context.
7040 		 * ni_txrate may change again before the task runs so
7041 		 * cache the chosen rate in the iwm_node structure.
7042 		 */
7043 		if (ni->ni_txrate != in->chosen_txrate) {
7044 			in->chosen_txrate = ni->ni_txrate;
7045 			iwm_setrates(in, 1);
7046 		}
7047 	}
7048 
7049 	splx(s);
7050 
7051 	timeout_add_msec(&sc->sc_calib_to, 500);
7052 }
7053 
7054 void
7055 iwm_setrates(struct iwm_node *in, int async)
7056 {
7057 	struct ieee80211_node *ni = &in->in_ni;
7058 	struct ieee80211com *ic = ni->ni_ic;
7059 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
7060 	struct iwm_lq_cmd lqcmd;
7061 	struct ieee80211_rateset *rs = &ni->ni_rates;
7062 	int i, ridx, ridx_min, ridx_max, j, sgi_ok = 0, mimo, tab = 0;
7063 	struct iwm_host_cmd cmd = {
7064 		.id = IWM_LQ_CMD,
7065 		.len = { sizeof(lqcmd), },
7066 	};
7067 
7068 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
7069 
7070 	memset(&lqcmd, 0, sizeof(lqcmd));
7071 	lqcmd.sta_id = IWM_STATION_ID;
7072 
7073 	if (ic->ic_flags & IEEE80211_F_USEPROT)
7074 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
7075 
7076 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
7077 	    ieee80211_node_supports_ht_sgi20(ni)) {
7078 		ni->ni_flags |= IEEE80211_NODE_HT_SGI20;
7079 		sgi_ok = 1;
7080 	}
7081 
7082 	/*
7083 	 * Fill the LQ rate selection table with legacy and/or HT rates
7084 	 * in descending order, i.e. with the node's current TX rate first.
7085 	 * In cases where throughput of an HT rate corresponds to a legacy
7086 	 * rate it makes no sense to add both. We rely on the fact that
7087 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
7088 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
7089 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
7090 	 */
7091 	j = 0;
7092 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
7093 	mimo = iwm_is_mimo_mcs(in->chosen_txmcs);
7094 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
7095 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
7096 		uint8_t plcp = iwm_rates[ridx].plcp;
7097 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
7098 
7099 		if (j >= nitems(lqcmd.rs_table))
7100 			break;
7101 		tab = 0;
7102 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7103 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
7104 				continue;
7105 	 		/* Do not mix SISO and MIMO HT rates. */
7106 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
7107 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
7108 				continue;
7109 			for (i = in->chosen_txmcs; i >= 0; i--) {
7110 				if (isclr(ni->ni_rxmcs, i))
7111 					continue;
7112 				if (ridx == iwm_mcs2ridx[i]) {
7113 					tab = ht_plcp;
7114 					tab |= IWM_RATE_MCS_HT_MSK;
7115 					if (sgi_ok)
7116 						tab |= IWM_RATE_MCS_SGI_MSK;
7117 					break;
7118 				}
7119 			}
7120 		} else if (plcp != IWM_RATE_INVM_PLCP) {
7121 			for (i = in->chosen_txrate; i >= 0; i--) {
7122 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
7123 				    IEEE80211_RATE_VAL)) {
7124 					tab = plcp;
7125 					break;
7126 				}
7127 			}
7128 		}
7129 
7130 		if (tab == 0)
7131 			continue;
7132 
7133 		if (iwm_is_mimo_ht_plcp(ht_plcp))
7134 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
7135 		else
7136 			tab |= IWM_RATE_MCS_ANT_A_MSK;
7137 
7138 		if (IWM_RIDX_IS_CCK(ridx))
7139 			tab |= IWM_RATE_MCS_CCK_MSK;
7140 		lqcmd.rs_table[j++] = htole32(tab);
7141 	}
7142 
7143 	lqcmd.mimo_delim = (mimo ? j : 0);
7144 
7145 	/* Fill the rest with the lowest possible rate */
7146 	while (j < nitems(lqcmd.rs_table)) {
7147 		tab = iwm_rates[ridx_min].plcp;
7148 		if (IWM_RIDX_IS_CCK(ridx_min))
7149 			tab |= IWM_RATE_MCS_CCK_MSK;
7150 		tab |= IWM_RATE_MCS_ANT_A_MSK;
7151 		lqcmd.rs_table[j++] = htole32(tab);
7152 	}
7153 
7154 	lqcmd.single_stream_ant_msk = IWM_ANT_A;
7155 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
7156 
7157 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
7158 	lqcmd.agg_disable_start_th = 3;
7159 #ifdef notyet
7160 	lqcmd.agg_frame_cnt_limit = 0x3f;
7161 #else
7162 	lqcmd.agg_frame_cnt_limit = 1; /* tx agg disabled */
7163 #endif
7164 
7165 	cmd.data[0] = &lqcmd;
7166 	iwm_send_cmd(sc, &cmd);
7167 }
7168 
7169 int
7170 iwm_media_change(struct ifnet *ifp)
7171 {
7172 	struct iwm_softc *sc = ifp->if_softc;
7173 	struct ieee80211com *ic = &sc->sc_ic;
7174 	uint8_t rate, ridx;
7175 	int err;
7176 
7177 	err = ieee80211_media_change(ifp);
7178 	if (err != ENETRESET)
7179 		return err;
7180 
7181 	if (ic->ic_fixed_mcs != -1)
7182 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
7183 	else if (ic->ic_fixed_rate != -1) {
7184 		rate = ic->ic_sup_rates[ic->ic_curmode].
7185 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
7186 		/* Map 802.11 rate to HW rate index. */
7187 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
7188 			if (iwm_rates[ridx].rate == rate)
7189 				break;
7190 		sc->sc_fixed_ridx = ridx;
7191 	}
7192 
7193 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7194 	    (IFF_UP | IFF_RUNNING)) {
7195 		iwm_stop(ifp);
7196 		err = iwm_init(ifp);
7197 	}
7198 	return err;
7199 }
7200 
7201 void
7202 iwm_newstate_task(void *psc)
7203 {
7204 	struct iwm_softc *sc = (struct iwm_softc *)psc;
7205 	struct ieee80211com *ic = &sc->sc_ic;
7206 	enum ieee80211_state nstate = sc->ns_nstate;
7207 	enum ieee80211_state ostate = ic->ic_state;
7208 	int arg = sc->ns_arg;
7209 	int err = 0, s = splnet();
7210 
7211 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7212 		/* iwm_stop() is waiting for us. */
7213 		refcnt_rele_wake(&sc->task_refs);
7214 		splx(s);
7215 		return;
7216 	}
7217 
7218 	if (ostate == IEEE80211_S_SCAN) {
7219 		if (nstate == ostate) {
7220 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
7221 				refcnt_rele_wake(&sc->task_refs);
7222 				splx(s);
7223 				return;
7224 			}
7225 			/* Firmware is no longer scanning. Do another scan. */
7226 			goto next_scan;
7227 		} else
7228 			iwm_led_blink_stop(sc);
7229 	}
7230 
7231 	if (nstate <= ostate) {
7232 		switch (ostate) {
7233 		case IEEE80211_S_RUN:
7234 			err = iwm_run_stop(sc);
7235 			if (err)
7236 				goto out;
7237 			/* FALLTHROUGH */
7238 		case IEEE80211_S_ASSOC:
7239 			if (nstate <= IEEE80211_S_ASSOC) {
7240 				err = iwm_disassoc(sc);
7241 				if (err)
7242 					goto out;
7243 			}
7244 			/* FALLTHROUGH */
7245 		case IEEE80211_S_AUTH:
7246 			if (nstate <= IEEE80211_S_AUTH) {
7247 				err = iwm_deauth(sc);
7248 				if (err)
7249 					goto out;
7250 			}
7251 			/* FALLTHROUGH */
7252 		case IEEE80211_S_SCAN:
7253 		case IEEE80211_S_INIT:
7254 			break;
7255 		}
7256 
7257 		/* Die now if iwm_stop() was called while we were sleeping. */
7258 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
7259 			refcnt_rele_wake(&sc->task_refs);
7260 			splx(s);
7261 			return;
7262 		}
7263 	}
7264 
7265 	switch (nstate) {
7266 	case IEEE80211_S_INIT:
7267 		break;
7268 
7269 	case IEEE80211_S_SCAN:
7270 next_scan:
7271 		err = iwm_scan(sc);
7272 		if (err)
7273 			break;
7274 		refcnt_rele_wake(&sc->task_refs);
7275 		splx(s);
7276 		return;
7277 
7278 	case IEEE80211_S_AUTH:
7279 		err = iwm_auth(sc);
7280 		break;
7281 
7282 	case IEEE80211_S_ASSOC:
7283 		err = iwm_assoc(sc);
7284 		break;
7285 
7286 	case IEEE80211_S_RUN:
7287 		err = iwm_run(sc);
7288 		break;
7289 	}
7290 
7291 out:
7292 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
7293 		if (err)
7294 			task_add(systq, &sc->init_task);
7295 		else
7296 			sc->sc_newstate(ic, nstate, arg);
7297 	}
7298 	refcnt_rele_wake(&sc->task_refs);
7299 	splx(s);
7300 }
7301 
7302 int
7303 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
7304 {
7305 	struct ifnet *ifp = IC2IFP(ic);
7306 	struct iwm_softc *sc = ifp->if_softc;
7307 	struct iwm_node *in = (void *)ic->ic_bss;
7308 
7309 	if (ic->ic_state == IEEE80211_S_RUN) {
7310 		timeout_del(&sc->sc_calib_to);
7311 		ieee80211_mira_cancel_timeouts(&in->in_mn);
7312 		iwm_del_task(sc, systq, &sc->ba_task);
7313 		iwm_del_task(sc, systq, &sc->htprot_task);
7314 	}
7315 
7316 	sc->ns_nstate = nstate;
7317 	sc->ns_arg = arg;
7318 
7319 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
7320 
7321 	return 0;
7322 }
7323 
7324 void
7325 iwm_endscan(struct iwm_softc *sc)
7326 {
7327 	struct ieee80211com *ic = &sc->sc_ic;
7328 
7329 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
7330 		return;
7331 
7332 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
7333 	ieee80211_end_scan(&ic->ic_if);
7334 }
7335 
7336 /*
7337  * Aging and idle timeouts for the different possible scenarios
7338  * in default configuration
7339  */
7340 static const uint32_t
7341 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
7342 	{
7343 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
7344 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
7345 	},
7346 	{
7347 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
7348 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
7349 	},
7350 	{
7351 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
7352 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
7353 	},
7354 	{
7355 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
7356 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
7357 	},
7358 	{
7359 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
7360 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
7361 	},
7362 };
7363 
7364 /*
7365  * Aging and idle timeouts for the different possible scenarios
7366  * in single BSS MAC configuration.
7367  */
7368 static const uint32_t
7369 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
7370 	{
7371 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
7372 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
7373 	},
7374 	{
7375 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
7376 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
7377 	},
7378 	{
7379 		htole32(IWM_SF_MCAST_AGING_TIMER),
7380 		htole32(IWM_SF_MCAST_IDLE_TIMER)
7381 	},
7382 	{
7383 		htole32(IWM_SF_BA_AGING_TIMER),
7384 		htole32(IWM_SF_BA_IDLE_TIMER)
7385 	},
7386 	{
7387 		htole32(IWM_SF_TX_RE_AGING_TIMER),
7388 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
7389 	},
7390 };
7391 
7392 void
7393 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
7394     struct ieee80211_node *ni)
7395 {
7396 	int i, j, watermark;
7397 
7398 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
7399 
7400 	/*
7401 	 * If we are in association flow - check antenna configuration
7402 	 * capabilities of the AP station, and choose the watermark accordingly.
7403 	 */
7404 	if (ni) {
7405 		if (ni->ni_flags & IEEE80211_NODE_HT) {
7406 			if (ni->ni_rxmcs[1] != 0)
7407 				watermark = IWM_SF_W_MARK_MIMO2;
7408 			else
7409 				watermark = IWM_SF_W_MARK_SISO;
7410 		} else {
7411 			watermark = IWM_SF_W_MARK_LEGACY;
7412 		}
7413 	/* default watermark value for unassociated mode. */
7414 	} else {
7415 		watermark = IWM_SF_W_MARK_MIMO2;
7416 	}
7417 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
7418 
7419 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
7420 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
7421 			sf_cmd->long_delay_timeouts[i][j] =
7422 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
7423 		}
7424 	}
7425 
7426 	if (ni) {
7427 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
7428 		       sizeof(iwm_sf_full_timeout));
7429 	} else {
7430 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
7431 		       sizeof(iwm_sf_full_timeout_def));
7432 	}
7433 
7434 }
7435 
7436 int
7437 iwm_sf_config(struct iwm_softc *sc, int new_state)
7438 {
7439 	struct ieee80211com *ic = &sc->sc_ic;
7440 	struct iwm_sf_cfg_cmd sf_cmd = {
7441 		.state = htole32(new_state),
7442 	};
7443 	int err = 0;
7444 
7445 #if 0	/* only used for models with sdio interface, in iwlwifi */
7446 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7447 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
7448 #endif
7449 
7450 	switch (new_state) {
7451 	case IWM_SF_UNINIT:
7452 	case IWM_SF_INIT_OFF:
7453 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
7454 		break;
7455 	case IWM_SF_FULL_ON:
7456 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
7457 		break;
7458 	default:
7459 		return EINVAL;
7460 	}
7461 
7462 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
7463 				   sizeof(sf_cmd), &sf_cmd);
7464 	return err;
7465 }
7466 
7467 int
7468 iwm_send_bt_init_conf(struct iwm_softc *sc)
7469 {
7470 	struct iwm_bt_coex_cmd bt_cmd;
7471 
7472 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
7473 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
7474 
7475 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
7476 	    &bt_cmd);
7477 }
7478 
7479 int
7480 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
7481 {
7482 	struct iwm_mcc_update_cmd mcc_cmd;
7483 	struct iwm_host_cmd hcmd = {
7484 		.id = IWM_MCC_UPDATE_CMD,
7485 		.flags = IWM_CMD_WANT_RESP,
7486 		.data = { &mcc_cmd },
7487 	};
7488 	int err;
7489 	int resp_v2 = isset(sc->sc_enabled_capa,
7490 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
7491 
7492 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
7493 	    !sc->sc_nvm.lar_enabled) {
7494 		return 0;
7495 	}
7496 
7497 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
7498 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
7499 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
7500 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
7501 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
7502 	else
7503 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
7504 
7505 	if (resp_v2) {
7506 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
7507 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
7508 		    sizeof(struct iwm_mcc_update_resp);
7509 	} else {
7510 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
7511 		hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
7512 		    sizeof(struct iwm_mcc_update_resp_v1);
7513 	}
7514 
7515 	err = iwm_send_cmd(sc, &hcmd);
7516 	if (err)
7517 		return err;
7518 
7519 	iwm_free_resp(sc, &hcmd);
7520 
7521 	return 0;
7522 }
7523 
7524 void
7525 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
7526 {
7527 	struct iwm_host_cmd cmd = {
7528 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
7529 		.len = { sizeof(uint32_t), },
7530 		.data = { &backoff, },
7531 	};
7532 
7533 	iwm_send_cmd(sc, &cmd);
7534 }
7535 
7536 void
7537 iwm_free_fw_paging(struct iwm_softc *sc)
7538 {
7539 	int i;
7540 
7541 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
7542 		return;
7543 
7544 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
7545 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
7546 	}
7547 
7548 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
7549 }
7550 
7551 int
7552 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
7553 {
7554 	int sec_idx, idx;
7555 	uint32_t offset = 0;
7556 
7557 	/*
7558 	 * find where is the paging image start point:
7559 	 * if CPU2 exist and it's in paging format, then the image looks like:
7560 	 * CPU1 sections (2 or more)
7561 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
7562 	 * CPU2 sections (not paged)
7563 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
7564 	 * non paged to CPU2 paging sec
7565 	 * CPU2 paging CSS
7566 	 * CPU2 paging image (including instruction and data)
7567 	 */
7568 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
7569 		if (image->fw_sect[sec_idx].fws_devoff ==
7570 		    IWM_PAGING_SEPARATOR_SECTION) {
7571 			sec_idx++;
7572 			break;
7573 		}
7574 	}
7575 
7576 	/*
7577 	 * If paging is enabled there should be at least 2 more sections left
7578 	 * (one for CSS and one for Paging data)
7579 	 */
7580 	if (sec_idx >= nitems(image->fw_sect) - 1) {
7581 		printf("%s: Paging: Missing CSS and/or paging sections\n",
7582 		    DEVNAME(sc));
7583 		iwm_free_fw_paging(sc);
7584 		return EINVAL;
7585 	}
7586 
7587 	/* copy the CSS block to the dram */
7588 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
7589 	    DEVNAME(sc), sec_idx));
7590 
7591 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
7592 	    image->fw_sect[sec_idx].fws_data,
7593 	    sc->fw_paging_db[0].fw_paging_size);
7594 
7595 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
7596 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
7597 
7598 	sec_idx++;
7599 
7600 	/*
7601 	 * copy the paging blocks to the dram
7602 	 * loop index start from 1 since that CSS block already copied to dram
7603 	 * and CSS index is 0.
7604 	 * loop stop at num_of_paging_blk since that last block is not full.
7605 	 */
7606 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
7607 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
7608 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
7609 		    sc->fw_paging_db[idx].fw_paging_size);
7610 
7611 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
7612 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
7613 
7614 		offset += sc->fw_paging_db[idx].fw_paging_size;
7615 	}
7616 
7617 	/* copy the last paging block */
7618 	if (sc->num_of_pages_in_last_blk > 0) {
7619 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
7620 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
7621 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
7622 
7623 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
7624 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
7625 	}
7626 
7627 	return 0;
7628 }
7629 
7630 int
7631 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
7632 {
7633 	int blk_idx = 0;
7634 	int error, num_of_pages;
7635 
7636 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
7637 		int i;
7638 		/* Device got reset, and we setup firmware paging again */
7639 		bus_dmamap_sync(sc->sc_dmat,
7640 		    sc->fw_paging_db[0].fw_paging_block.map,
7641 		    0, IWM_FW_PAGING_SIZE,
7642 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
7643 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
7644 			bus_dmamap_sync(sc->sc_dmat,
7645 			    sc->fw_paging_db[i].fw_paging_block.map,
7646 			    0, IWM_PAGING_BLOCK_SIZE,
7647 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
7648 		}
7649 		return 0;
7650 	}
7651 
7652 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
7653 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
7654 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
7655 #endif
7656 
7657 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
7658 	sc->num_of_paging_blk =
7659 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
7660 
7661 	sc->num_of_pages_in_last_blk =
7662 		num_of_pages -
7663 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
7664 
7665 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
7666 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
7667 	    sc->num_of_paging_blk,
7668 	    sc->num_of_pages_in_last_blk));
7669 
7670 	/* allocate block of 4Kbytes for paging CSS */
7671 	error = iwm_dma_contig_alloc(sc->sc_dmat,
7672 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
7673 	    4096);
7674 	if (error) {
7675 		/* free all the previous pages since we failed */
7676 		iwm_free_fw_paging(sc);
7677 		return ENOMEM;
7678 	}
7679 
7680 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
7681 
7682 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
7683 	    DEVNAME(sc)));
7684 
7685 	/*
7686 	 * allocate blocks in dram.
7687 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
7688 	 */
7689 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
7690 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
7691 		/* XXX Use iwm_dma_contig_alloc for allocating */
7692 		error = iwm_dma_contig_alloc(sc->sc_dmat,
7693 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
7694 		    IWM_PAGING_BLOCK_SIZE, 4096);
7695 		if (error) {
7696 			/* free all the previous pages since we failed */
7697 			iwm_free_fw_paging(sc);
7698 			return ENOMEM;
7699 		}
7700 
7701 		sc->fw_paging_db[blk_idx].fw_paging_size =
7702 		    IWM_PAGING_BLOCK_SIZE;
7703 
7704 		DPRINTF((
7705 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
7706 		    DEVNAME(sc)));
7707 	}
7708 
7709 	return 0;
7710 }
7711 
7712 int
7713 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
7714 {
7715 	int ret;
7716 
7717 	ret = iwm_alloc_fw_paging_mem(sc, fw);
7718 	if (ret)
7719 		return ret;
7720 
7721 	return iwm_fill_paging_mem(sc, fw);
7722 }
7723 
7724 /* send paging cmd to FW in case CPU2 has paging image */
7725 int
7726 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
7727 {
7728 	int blk_idx;
7729 	uint32_t dev_phy_addr;
7730 	struct iwm_fw_paging_cmd fw_paging_cmd = {
7731 		.flags =
7732 			htole32(IWM_PAGING_CMD_IS_SECURED |
7733 				IWM_PAGING_CMD_IS_ENABLED |
7734 				(sc->num_of_pages_in_last_blk <<
7735 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
7736 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
7737 		.block_num = htole32(sc->num_of_paging_blk),
7738 	};
7739 
7740 	/* loop for for all paging blocks + CSS block */
7741 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
7742 		dev_phy_addr = htole32(
7743 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
7744 		    IWM_PAGE_2_EXP_SIZE);
7745 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
7746 		bus_dmamap_sync(sc->sc_dmat,
7747 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
7748 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
7749 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
7750 	}
7751 
7752 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
7753 					       IWM_LONG_GROUP, 0),
7754 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
7755 }
7756 
7757 int
7758 iwm_init_hw(struct iwm_softc *sc)
7759 {
7760 	struct ieee80211com *ic = &sc->sc_ic;
7761 	int err, i, ac, qid;
7762 
7763 	err = iwm_preinit(sc);
7764 	if (err)
7765 		return err;
7766 
7767 	err = iwm_start_hw(sc);
7768 	if (err) {
7769 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7770 		return err;
7771 	}
7772 
7773 	err = iwm_run_init_mvm_ucode(sc, 0);
7774 	if (err)
7775 		return err;
7776 
7777 	/* Should stop and start HW since INIT image just loaded. */
7778 	iwm_stop_device(sc);
7779 	err = iwm_start_hw(sc);
7780 	if (err) {
7781 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
7782 		return err;
7783 	}
7784 
7785 	/* Restart, this time with the regular firmware */
7786 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
7787 	if (err) {
7788 		printf("%s: could not load firmware\n", DEVNAME(sc));
7789 		goto err;
7790 	}
7791 
7792 	if (!iwm_nic_lock(sc))
7793 		return EBUSY;
7794 
7795 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
7796 	if (err) {
7797 		printf("%s: could not init tx ant config (error %d)\n",
7798 		    DEVNAME(sc), err);
7799 		goto err;
7800 	}
7801 
7802 	err = iwm_send_phy_db_data(sc);
7803 	if (err) {
7804 		printf("%s: could not init phy db (error %d)\n",
7805 		    DEVNAME(sc), err);
7806 		goto err;
7807 	}
7808 
7809 	err = iwm_send_phy_cfg_cmd(sc);
7810 	if (err) {
7811 		printf("%s: could not send phy config (error %d)\n",
7812 		    DEVNAME(sc), err);
7813 		goto err;
7814 	}
7815 
7816 	err = iwm_send_bt_init_conf(sc);
7817 	if (err) {
7818 		printf("%s: could not init bt coex (error %d)\n",
7819 		    DEVNAME(sc), err);
7820 		return err;
7821 	}
7822 
7823 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7824 		err = iwm_send_dqa_cmd(sc);
7825 		if (err)
7826 			return err;
7827 	}
7828 
7829 	/* Add auxiliary station for scanning */
7830 	err = iwm_add_aux_sta(sc);
7831 	if (err) {
7832 		printf("%s: could not add aux station (error %d)\n",
7833 		    DEVNAME(sc), err);
7834 		goto err;
7835 	}
7836 
7837 	for (i = 0; i < 1; i++) {
7838 		/*
7839 		 * The channel used here isn't relevant as it's
7840 		 * going to be overwritten in the other flows.
7841 		 * For now use the first channel we have.
7842 		 */
7843 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
7844 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
7845 		    IWM_FW_CTXT_ACTION_ADD, 0);
7846 		if (err) {
7847 			printf("%s: could not add phy context %d (error %d)\n",
7848 			    DEVNAME(sc), i, err);
7849 			goto err;
7850 		}
7851 	}
7852 
7853 	/* Initialize tx backoffs to the minimum. */
7854 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
7855 		iwm_tt_tx_backoff(sc, 0);
7856 
7857 
7858 	err = iwm_config_ltr(sc);
7859 	if (err) {
7860 		printf("%s: PCIe LTR configuration failed (error %d)\n",
7861 		    DEVNAME(sc), err);
7862 	}
7863 
7864 	err = iwm_power_update_device(sc);
7865 	if (err) {
7866 		printf("%s: could not send power command (error %d)\n",
7867 		    DEVNAME(sc), err);
7868 		goto err;
7869 	}
7870 
7871 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
7872 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
7873 		if (err) {
7874 			printf("%s: could not init LAR (error %d)\n",
7875 			    DEVNAME(sc), err);
7876 			goto err;
7877 		}
7878 	}
7879 
7880 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
7881 		err = iwm_config_umac_scan(sc);
7882 		if (err) {
7883 			printf("%s: could not configure scan (error %d)\n",
7884 			    DEVNAME(sc), err);
7885 			goto err;
7886 		}
7887 	}
7888 
7889 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7890 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7891 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
7892 		else
7893 			qid = IWM_AUX_QUEUE;
7894 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
7895 		    iwm_ac_to_tx_fifo[EDCA_AC_BE]);
7896 		if (err) {
7897 			printf("%s: could not enable monitor inject Tx queue "
7898 			    "(error %d)\n", DEVNAME(sc), err);
7899 			goto err;
7900 		}
7901 	} else {
7902 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
7903 			if (isset(sc->sc_enabled_capa,
7904 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7905 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
7906 			else
7907 				qid = ac;
7908 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
7909 			    iwm_ac_to_tx_fifo[ac]);
7910 			if (err) {
7911 				printf("%s: could not enable Tx queue %d "
7912 				    "(error %d)\n", DEVNAME(sc), ac, err);
7913 				goto err;
7914 			}
7915 		}
7916 	}
7917 
7918 	err = iwm_disable_beacon_filter(sc);
7919 	if (err) {
7920 		printf("%s: could not disable beacon filter (error %d)\n",
7921 		    DEVNAME(sc), err);
7922 		goto err;
7923 	}
7924 
7925 err:
7926 	iwm_nic_unlock(sc);
7927 	return err;
7928 }
7929 
7930 /* Allow multicast from our BSSID. */
7931 int
7932 iwm_allow_mcast(struct iwm_softc *sc)
7933 {
7934 	struct ieee80211com *ic = &sc->sc_ic;
7935 	struct ieee80211_node *ni = ic->ic_bss;
7936 	struct iwm_mcast_filter_cmd *cmd;
7937 	size_t size;
7938 	int err;
7939 
7940 	size = roundup(sizeof(*cmd), 4);
7941 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
7942 	if (cmd == NULL)
7943 		return ENOMEM;
7944 	cmd->filter_own = 1;
7945 	cmd->port_id = 0;
7946 	cmd->count = 0;
7947 	cmd->pass_all = 1;
7948 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
7949 
7950 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
7951 	    0, size, cmd);
7952 	free(cmd, M_DEVBUF, size);
7953 	return err;
7954 }
7955 
7956 int
7957 iwm_init(struct ifnet *ifp)
7958 {
7959 	struct iwm_softc *sc = ifp->if_softc;
7960 	struct ieee80211com *ic = &sc->sc_ic;
7961 	int err, generation;
7962 
7963 	rw_assert_wrlock(&sc->ioctl_rwl);
7964 
7965 	generation = ++sc->sc_generation;
7966 
7967 	KASSERT(sc->task_refs.refs == 0);
7968 	refcnt_init(&sc->task_refs);
7969 
7970 	err = iwm_init_hw(sc);
7971 	if (err) {
7972 		if (generation == sc->sc_generation)
7973 			iwm_stop(ifp);
7974 		return err;
7975 	}
7976 
7977 	if (sc->sc_nvm.sku_cap_11n_enable)
7978 		iwm_setup_ht_rates(sc);
7979 
7980 	ifq_clr_oactive(&ifp->if_snd);
7981 	ifp->if_flags |= IFF_RUNNING;
7982 
7983 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7984 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
7985 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
7986 		return 0;
7987 	}
7988 
7989 	ieee80211_begin_scan(ifp);
7990 
7991 	/*
7992 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
7993 	 * Wait until the transition to SCAN state has completed.
7994 	 */
7995 	do {
7996 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
7997 		    SEC_TO_NSEC(1));
7998 		if (generation != sc->sc_generation)
7999 			return ENXIO;
8000 		if (err)
8001 			return err;
8002 	} while (ic->ic_state != IEEE80211_S_SCAN);
8003 
8004 	return 0;
8005 }
8006 
8007 void
8008 iwm_start(struct ifnet *ifp)
8009 {
8010 	struct iwm_softc *sc = ifp->if_softc;
8011 	struct ieee80211com *ic = &sc->sc_ic;
8012 	struct ieee80211_node *ni;
8013 	struct ether_header *eh;
8014 	struct mbuf *m;
8015 	int ac = EDCA_AC_BE; /* XXX */
8016 
8017 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
8018 		return;
8019 
8020 	for (;;) {
8021 		/* why isn't this done per-queue? */
8022 		if (sc->qfullmsk != 0) {
8023 			ifq_set_oactive(&ifp->if_snd);
8024 			break;
8025 		}
8026 
8027 		/* need to send management frames even if we're not RUNning */
8028 		m = mq_dequeue(&ic->ic_mgtq);
8029 		if (m) {
8030 			ni = m->m_pkthdr.ph_cookie;
8031 			goto sendit;
8032 		}
8033 
8034 		if (ic->ic_state != IEEE80211_S_RUN ||
8035 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
8036 			break;
8037 
8038 		m = ifq_dequeue(&ifp->if_snd);
8039 		if (!m)
8040 			break;
8041 		if (m->m_len < sizeof (*eh) &&
8042 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
8043 			ifp->if_oerrors++;
8044 			continue;
8045 		}
8046 #if NBPFILTER > 0
8047 		if (ifp->if_bpf != NULL)
8048 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
8049 #endif
8050 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
8051 			ifp->if_oerrors++;
8052 			continue;
8053 		}
8054 
8055  sendit:
8056 #if NBPFILTER > 0
8057 		if (ic->ic_rawbpf != NULL)
8058 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
8059 #endif
8060 		if (iwm_tx(sc, m, ni, ac) != 0) {
8061 			ieee80211_release_node(ic, ni);
8062 			ifp->if_oerrors++;
8063 			continue;
8064 		}
8065 
8066 		if (ifp->if_flags & IFF_UP) {
8067 			sc->sc_tx_timer = 15;
8068 			ifp->if_timer = 1;
8069 		}
8070 	}
8071 
8072 	return;
8073 }
8074 
8075 void
8076 iwm_stop(struct ifnet *ifp)
8077 {
8078 	struct iwm_softc *sc = ifp->if_softc;
8079 	struct ieee80211com *ic = &sc->sc_ic;
8080 	struct iwm_node *in = (void *)ic->ic_bss;
8081 	int i, s = splnet();
8082 
8083 	rw_assert_wrlock(&sc->ioctl_rwl);
8084 
8085 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
8086 
8087 	/* Cancel scheduled tasks and let any stale tasks finish up. */
8088 	task_del(systq, &sc->init_task);
8089 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
8090 	iwm_del_task(sc, systq, &sc->ba_task);
8091 	iwm_del_task(sc, systq, &sc->htprot_task);
8092 	KASSERT(sc->task_refs.refs >= 1);
8093 	refcnt_finalize(&sc->task_refs, "iwmstop");
8094 
8095 	iwm_stop_device(sc);
8096 
8097 	/* Reset soft state. */
8098 
8099 	sc->sc_generation++;
8100 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
8101 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
8102 		sc->sc_cmd_resp_pkt[i] = NULL;
8103 		sc->sc_cmd_resp_len[i] = 0;
8104 	}
8105 	ifp->if_flags &= ~IFF_RUNNING;
8106 	ifq_clr_oactive(&ifp->if_snd);
8107 
8108 	in->in_phyctxt = NULL;
8109 	if (ic->ic_state == IEEE80211_S_RUN)
8110 		ieee80211_mira_cancel_timeouts(&in->in_mn); /* XXX refcount? */
8111 
8112 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8113 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8114 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8115 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8116 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
8117 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
8118 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
8119 
8120 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
8121 
8122 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
8123 	iwm_led_blink_stop(sc);
8124 	ifp->if_timer = sc->sc_tx_timer = 0;
8125 
8126 	splx(s);
8127 }
8128 
8129 void
8130 iwm_watchdog(struct ifnet *ifp)
8131 {
8132 	struct iwm_softc *sc = ifp->if_softc;
8133 
8134 	ifp->if_timer = 0;
8135 	if (sc->sc_tx_timer > 0) {
8136 		if (--sc->sc_tx_timer == 0) {
8137 			printf("%s: device timeout\n", DEVNAME(sc));
8138 #ifdef IWM_DEBUG
8139 			iwm_nic_error(sc);
8140 #endif
8141 			if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8142 				task_add(systq, &sc->init_task);
8143 			ifp->if_oerrors++;
8144 			return;
8145 		}
8146 		ifp->if_timer = 1;
8147 	}
8148 
8149 	ieee80211_watchdog(ifp);
8150 }
8151 
8152 int
8153 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
8154 {
8155 	struct iwm_softc *sc = ifp->if_softc;
8156 	int s, err = 0, generation = sc->sc_generation;
8157 
8158 	/*
8159 	 * Prevent processes from entering this function while another
8160 	 * process is tsleep'ing in it.
8161 	 */
8162 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
8163 	if (err == 0 && generation != sc->sc_generation) {
8164 		rw_exit(&sc->ioctl_rwl);
8165 		return ENXIO;
8166 	}
8167 	if (err)
8168 		return err;
8169 	s = splnet();
8170 
8171 	switch (cmd) {
8172 	case SIOCSIFADDR:
8173 		ifp->if_flags |= IFF_UP;
8174 		/* FALLTHROUGH */
8175 	case SIOCSIFFLAGS:
8176 		if (ifp->if_flags & IFF_UP) {
8177 			if (!(ifp->if_flags & IFF_RUNNING)) {
8178 				err = iwm_init(ifp);
8179 			}
8180 		} else {
8181 			if (ifp->if_flags & IFF_RUNNING)
8182 				iwm_stop(ifp);
8183 		}
8184 		break;
8185 
8186 	default:
8187 		err = ieee80211_ioctl(ifp, cmd, data);
8188 	}
8189 
8190 	if (err == ENETRESET) {
8191 		err = 0;
8192 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8193 		    (IFF_UP | IFF_RUNNING)) {
8194 			iwm_stop(ifp);
8195 			err = iwm_init(ifp);
8196 		}
8197 	}
8198 
8199 	splx(s);
8200 	rw_exit(&sc->ioctl_rwl);
8201 
8202 	return err;
8203 }
8204 
8205 #ifdef IWM_DEBUG
8206 /*
8207  * Note: This structure is read from the device with IO accesses,
8208  * and the reading already does the endian conversion. As it is
8209  * read with uint32_t-sized accesses, any members with a different size
8210  * need to be ordered correctly though!
8211  */
8212 struct iwm_error_event_table {
8213 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8214 	uint32_t error_id;		/* type of error */
8215 	uint32_t trm_hw_status0;	/* TRM HW status */
8216 	uint32_t trm_hw_status1;	/* TRM HW status */
8217 	uint32_t blink2;		/* branch link */
8218 	uint32_t ilink1;		/* interrupt link */
8219 	uint32_t ilink2;		/* interrupt link */
8220 	uint32_t data1;		/* error-specific data */
8221 	uint32_t data2;		/* error-specific data */
8222 	uint32_t data3;		/* error-specific data */
8223 	uint32_t bcon_time;		/* beacon timer */
8224 	uint32_t tsf_low;		/* network timestamp function timer */
8225 	uint32_t tsf_hi;		/* network timestamp function timer */
8226 	uint32_t gp1;		/* GP1 timer register */
8227 	uint32_t gp2;		/* GP2 timer register */
8228 	uint32_t fw_rev_type;	/* firmware revision type */
8229 	uint32_t major;		/* uCode version major */
8230 	uint32_t minor;		/* uCode version minor */
8231 	uint32_t hw_ver;		/* HW Silicon version */
8232 	uint32_t brd_ver;		/* HW board version */
8233 	uint32_t log_pc;		/* log program counter */
8234 	uint32_t frame_ptr;		/* frame pointer */
8235 	uint32_t stack_ptr;		/* stack pointer */
8236 	uint32_t hcmd;		/* last host command header */
8237 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
8238 				 * rxtx_flag */
8239 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
8240 				 * host_flag */
8241 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
8242 				 * enc_flag */
8243 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
8244 				 * time_flag */
8245 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
8246 				 * wico interrupt */
8247 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
8248 	uint32_t wait_event;		/* wait event() caller address */
8249 	uint32_t l2p_control;	/* L2pControlField */
8250 	uint32_t l2p_duration;	/* L2pDurationField */
8251 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
8252 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
8253 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
8254 				 * (LMPM_PMG_SEL) */
8255 	uint32_t u_timestamp;	/* indicate when the date and time of the
8256 				 * compilation */
8257 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
8258 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8259 
8260 /*
8261  * UMAC error struct - relevant starting from family 8000 chip.
8262  * Note: This structure is read from the device with IO accesses,
8263  * and the reading already does the endian conversion. As it is
8264  * read with u32-sized accesses, any members with a different size
8265  * need to be ordered correctly though!
8266  */
8267 struct iwm_umac_error_event_table {
8268 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
8269 	uint32_t error_id;	/* type of error */
8270 	uint32_t blink1;	/* branch link */
8271 	uint32_t blink2;	/* branch link */
8272 	uint32_t ilink1;	/* interrupt link */
8273 	uint32_t ilink2;	/* interrupt link */
8274 	uint32_t data1;		/* error-specific data */
8275 	uint32_t data2;		/* error-specific data */
8276 	uint32_t data3;		/* error-specific data */
8277 	uint32_t umac_major;
8278 	uint32_t umac_minor;
8279 	uint32_t frame_pointer;	/* core register 27*/
8280 	uint32_t stack_pointer;	/* core register 28 */
8281 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
8282 	uint32_t nic_isr_pref;	/* ISR status register */
8283 } __packed;
8284 
8285 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
8286 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
8287 
8288 void
8289 iwm_nic_umac_error(struct iwm_softc *sc)
8290 {
8291 	struct iwm_umac_error_event_table table;
8292 	uint32_t base;
8293 
8294 	base = sc->sc_uc.uc_umac_error_event_table;
8295 
8296 	if (base < 0x800000) {
8297 		printf("%s: Invalid error log pointer 0x%08x\n",
8298 		    DEVNAME(sc), base);
8299 		return;
8300 	}
8301 
8302 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8303 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8304 		return;
8305 	}
8306 
8307 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8308 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8309 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8310 			sc->sc_flags, table.valid);
8311 	}
8312 
8313 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8314 		iwm_desc_lookup(table.error_id));
8315 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8316 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8317 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8318 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8319 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8320 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8321 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8322 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8323 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8324 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8325 	    table.frame_pointer);
8326 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8327 	    table.stack_pointer);
8328 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8329 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8330 	    table.nic_isr_pref);
8331 }
8332 
8333 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
8334 static struct {
8335 	const char *name;
8336 	uint8_t num;
8337 } advanced_lookup[] = {
8338 	{ "NMI_INTERRUPT_WDG", 0x34 },
8339 	{ "SYSASSERT", 0x35 },
8340 	{ "UCODE_VERSION_MISMATCH", 0x37 },
8341 	{ "BAD_COMMAND", 0x38 },
8342 	{ "BAD_COMMAND", 0x39 },
8343 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8344 	{ "FATAL_ERROR", 0x3D },
8345 	{ "NMI_TRM_HW_ERR", 0x46 },
8346 	{ "NMI_INTERRUPT_TRM", 0x4C },
8347 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8348 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8349 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8350 	{ "NMI_INTERRUPT_HOST", 0x66 },
8351 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8352 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8353 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8354 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
8355 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
8356 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8357 	{ "ADVANCED_SYSASSERT", 0 },
8358 };
8359 
8360 const char *
8361 iwm_desc_lookup(uint32_t num)
8362 {
8363 	int i;
8364 
8365 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8366 		if (advanced_lookup[i].num ==
8367 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
8368 			return advanced_lookup[i].name;
8369 
8370 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8371 	return advanced_lookup[i].name;
8372 }
8373 
8374 /*
8375  * Support for dumping the error log seemed like a good idea ...
8376  * but it's mostly hex junk and the only sensible thing is the
8377  * hw/ucode revision (which we know anyway).  Since it's here,
8378  * I'll just leave it in, just in case e.g. the Intel guys want to
8379  * help us decipher some "ADVANCED_SYSASSERT" later.
8380  */
8381 void
8382 iwm_nic_error(struct iwm_softc *sc)
8383 {
8384 	struct iwm_error_event_table table;
8385 	uint32_t base;
8386 
8387 	printf("%s: dumping device error log\n", DEVNAME(sc));
8388 	base = sc->sc_uc.uc_error_event_table;
8389 	if (base < 0x800000) {
8390 		printf("%s: Invalid error log pointer 0x%08x\n",
8391 		    DEVNAME(sc), base);
8392 		return;
8393 	}
8394 
8395 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8396 		printf("%s: reading errlog failed\n", DEVNAME(sc));
8397 		return;
8398 	}
8399 
8400 	if (!table.valid) {
8401 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8402 		return;
8403 	}
8404 
8405 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8406 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8407 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8408 		    sc->sc_flags, table.valid);
8409 	}
8410 
8411 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8412 	    iwm_desc_lookup(table.error_id));
8413 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8414 	    table.trm_hw_status0);
8415 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8416 	    table.trm_hw_status1);
8417 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8418 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8419 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8420 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8421 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8422 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8423 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8424 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8425 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8426 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8427 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8428 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8429 	    table.fw_rev_type);
8430 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8431 	    table.major);
8432 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8433 	    table.minor);
8434 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8435 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8436 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8437 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8438 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8439 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8440 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8441 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8442 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8443 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8444 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8445 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8446 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8447 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8448 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8449 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8450 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8451 
8452 	if (sc->sc_uc.uc_umac_error_event_table)
8453 		iwm_nic_umac_error(sc);
8454 }
8455 #endif
8456 
8457 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
8458 do {									\
8459 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8460 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
8461 	_var_ = (void *)((_pkt_)+1);					\
8462 } while (/*CONSTCOND*/0)
8463 
8464 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
8465 do {									\
8466 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
8467 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
8468 	_ptr_ = (void *)((_pkt_)+1);					\
8469 } while (/*CONSTCOND*/0)
8470 
8471 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
8472 
8473 int
8474 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
8475 {
8476 	int qid, idx, code;
8477 
8478 	qid = pkt->hdr.qid & ~0x80;
8479 	idx = pkt->hdr.idx;
8480 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8481 
8482 	return (!(qid == 0 && idx == 0 && code == 0) &&
8483 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
8484 }
8485 
8486 void
8487 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
8488 {
8489 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8490 	struct iwm_rx_packet *pkt, *nextpkt;
8491 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8492 	struct mbuf *m0, *m;
8493 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8494 	size_t remain = IWM_RBUF_SIZE;
8495 	int qid, idx, code, handled = 1;
8496 
8497 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
8498 	    BUS_DMASYNC_POSTREAD);
8499 
8500 	m0 = data->m;
8501 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
8502 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
8503 		qid = pkt->hdr.qid;
8504 		idx = pkt->hdr.idx;
8505 
8506 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8507 
8508 		if (!iwm_rx_pkt_valid(pkt))
8509 			break;
8510 
8511 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
8512 		if (len < sizeof(pkt->hdr) ||
8513 		    len > (IWM_RBUF_SIZE - offset - minsz))
8514 			break;
8515 
8516 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8517 			/* Take mbuf m0 off the RX ring. */
8518 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
8519 				ifp->if_ierrors++;
8520 				break;
8521 			}
8522 			KASSERT(data->m != m0);
8523 		}
8524 
8525 		switch (code) {
8526 		case IWM_REPLY_RX_PHY_CMD:
8527 			iwm_rx_rx_phy_cmd(sc, pkt, data);
8528 			break;
8529 
8530 		case IWM_REPLY_RX_MPDU_CMD: {
8531 			size_t maxlen = remain - minsz;
8532 			nextoff = offset +
8533 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
8534 			nextpkt = (struct iwm_rx_packet *)
8535 			    (m0->m_data + nextoff);
8536 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
8537 			    !iwm_rx_pkt_valid(nextpkt)) {
8538 				/* No need to copy last frame in buffer. */
8539 				if (offset > 0)
8540 					m_adj(m0, offset);
8541 				if (sc->sc_mqrx_supported)
8542 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
8543 					    maxlen, ml);
8544 				else
8545 					iwm_rx_mpdu(sc, m0, pkt->data,
8546 					    maxlen, ml);
8547 				m0 = NULL; /* stack owns m0 now; abort loop */
8548 			} else {
8549 				/*
8550 				 * Create an mbuf which points to the current
8551 				 * packet. Always copy from offset zero to
8552 				 * preserve m_pkthdr.
8553 				 */
8554 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
8555 				if (m == NULL) {
8556 					ifp->if_ierrors++;
8557 					m_freem(m0);
8558 					m0 = NULL;
8559 					break;
8560 				}
8561 				m_adj(m, offset);
8562 				if (sc->sc_mqrx_supported)
8563 					iwm_rx_mpdu_mq(sc, m, pkt->data,
8564 					    maxlen, ml);
8565 				else
8566 					iwm_rx_mpdu(sc, m, pkt->data,
8567 					    maxlen, ml);
8568 			}
8569 
8570 			if (offset + minsz < remain)
8571 				remain -= offset;
8572 			else
8573 				remain = minsz;
8574  			break;
8575 		}
8576 
8577 		case IWM_TX_CMD:
8578 			iwm_rx_tx_cmd(sc, pkt, data);
8579 			break;
8580 
8581 		case IWM_MISSED_BEACONS_NOTIFICATION:
8582 			iwm_rx_bmiss(sc, pkt, data);
8583 			break;
8584 
8585 		case IWM_MFUART_LOAD_NOTIFICATION:
8586 			break;
8587 
8588 		case IWM_ALIVE: {
8589 			struct iwm_alive_resp_v1 *resp1;
8590 			struct iwm_alive_resp_v2 *resp2;
8591 			struct iwm_alive_resp_v3 *resp3;
8592 
8593 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
8594 				SYNC_RESP_STRUCT(resp1, pkt);
8595 				sc->sc_uc.uc_error_event_table
8596 				    = le32toh(resp1->error_event_table_ptr);
8597 				sc->sc_uc.uc_log_event_table
8598 				    = le32toh(resp1->log_event_table_ptr);
8599 				sc->sched_base = le32toh(resp1->scd_base_ptr);
8600 				if (resp1->status == IWM_ALIVE_STATUS_OK)
8601 					sc->sc_uc.uc_ok = 1;
8602 				else
8603 					sc->sc_uc.uc_ok = 0;
8604 			}
8605 
8606 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
8607 				SYNC_RESP_STRUCT(resp2, pkt);
8608 				sc->sc_uc.uc_error_event_table
8609 				    = le32toh(resp2->error_event_table_ptr);
8610 				sc->sc_uc.uc_log_event_table
8611 				    = le32toh(resp2->log_event_table_ptr);
8612 				sc->sched_base = le32toh(resp2->scd_base_ptr);
8613 				sc->sc_uc.uc_umac_error_event_table
8614 				    = le32toh(resp2->error_info_addr);
8615 				if (resp2->status == IWM_ALIVE_STATUS_OK)
8616 					sc->sc_uc.uc_ok = 1;
8617 				else
8618 					sc->sc_uc.uc_ok = 0;
8619 			}
8620 
8621 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
8622 				SYNC_RESP_STRUCT(resp3, pkt);
8623 				sc->sc_uc.uc_error_event_table
8624 				    = le32toh(resp3->error_event_table_ptr);
8625 				sc->sc_uc.uc_log_event_table
8626 				    = le32toh(resp3->log_event_table_ptr);
8627 				sc->sched_base = le32toh(resp3->scd_base_ptr);
8628 				sc->sc_uc.uc_umac_error_event_table
8629 				    = le32toh(resp3->error_info_addr);
8630 				if (resp3->status == IWM_ALIVE_STATUS_OK)
8631 					sc->sc_uc.uc_ok = 1;
8632 				else
8633 					sc->sc_uc.uc_ok = 0;
8634 			}
8635 
8636 			sc->sc_uc.uc_intr = 1;
8637 			wakeup(&sc->sc_uc);
8638 			break;
8639 		}
8640 
8641 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
8642 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
8643 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
8644 			iwm_phy_db_set_section(sc, phy_db_notif);
8645 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
8646 			wakeup(&sc->sc_init_complete);
8647 			break;
8648 		}
8649 
8650 		case IWM_STATISTICS_NOTIFICATION: {
8651 			struct iwm_notif_statistics *stats;
8652 			SYNC_RESP_STRUCT(stats, pkt);
8653 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
8654 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
8655 			break;
8656 		}
8657 
8658 		case IWM_MCC_CHUB_UPDATE_CMD: {
8659 			struct iwm_mcc_chub_notif *notif;
8660 			SYNC_RESP_STRUCT(notif, pkt);
8661 
8662 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
8663 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
8664 			sc->sc_fw_mcc[2] = '\0';
8665 		}
8666 
8667 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
8668 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
8669 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
8670 			break;
8671 
8672 		case IWM_ADD_STA_KEY:
8673 		case IWM_PHY_CONFIGURATION_CMD:
8674 		case IWM_TX_ANT_CONFIGURATION_CMD:
8675 		case IWM_ADD_STA:
8676 		case IWM_MAC_CONTEXT_CMD:
8677 		case IWM_REPLY_SF_CFG_CMD:
8678 		case IWM_POWER_TABLE_CMD:
8679 		case IWM_LTR_CONFIG:
8680 		case IWM_PHY_CONTEXT_CMD:
8681 		case IWM_BINDING_CONTEXT_CMD:
8682 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
8683 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
8684 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
8685 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
8686 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
8687 		case IWM_REPLY_BEACON_FILTERING_CMD:
8688 		case IWM_MAC_PM_POWER_TABLE:
8689 		case IWM_TIME_QUOTA_CMD:
8690 		case IWM_REMOVE_STA:
8691 		case IWM_TXPATH_FLUSH:
8692 		case IWM_LQ_CMD:
8693 		case IWM_WIDE_ID(IWM_LONG_GROUP,
8694 				 IWM_FW_PAGING_BLOCK_CMD):
8695 		case IWM_BT_CONFIG:
8696 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
8697 		case IWM_NVM_ACCESS_CMD:
8698 		case IWM_MCC_UPDATE_CMD:
8699 		case IWM_TIME_EVENT_CMD: {
8700 			size_t pkt_len;
8701 
8702 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
8703 				break;
8704 
8705 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
8706 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8707 
8708 			pkt_len = sizeof(pkt->len_n_flags) +
8709 			    iwm_rx_packet_len(pkt);
8710 
8711 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
8712 			    pkt_len < sizeof(*pkt) ||
8713 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
8714 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
8715 				    sc->sc_cmd_resp_len[idx]);
8716 				sc->sc_cmd_resp_pkt[idx] = NULL;
8717 				break;
8718 			}
8719 
8720 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
8721 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
8722 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
8723 			break;
8724 		}
8725 
8726 		/* ignore */
8727 		case IWM_PHY_DB_CMD:
8728 			break;
8729 
8730 		case IWM_INIT_COMPLETE_NOTIF:
8731 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
8732 			wakeup(&sc->sc_init_complete);
8733 			break;
8734 
8735 		case IWM_SCAN_OFFLOAD_COMPLETE: {
8736 			struct iwm_periodic_scan_complete *notif;
8737 			SYNC_RESP_STRUCT(notif, pkt);
8738 			break;
8739 		}
8740 
8741 		case IWM_SCAN_ITERATION_COMPLETE: {
8742 			struct iwm_lmac_scan_complete_notif *notif;
8743 			SYNC_RESP_STRUCT(notif, pkt);
8744 			iwm_endscan(sc);
8745 			break;
8746 		}
8747 
8748 		case IWM_SCAN_COMPLETE_UMAC: {
8749 			struct iwm_umac_scan_complete *notif;
8750 			SYNC_RESP_STRUCT(notif, pkt);
8751 			iwm_endscan(sc);
8752 			break;
8753 		}
8754 
8755 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
8756 			struct iwm_umac_scan_iter_complete_notif *notif;
8757 			SYNC_RESP_STRUCT(notif, pkt);
8758 			iwm_endscan(sc);
8759 			break;
8760 		}
8761 
8762 		case IWM_REPLY_ERROR: {
8763 			struct iwm_error_resp *resp;
8764 			SYNC_RESP_STRUCT(resp, pkt);
8765 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
8766 				DEVNAME(sc), le32toh(resp->error_type),
8767 				resp->cmd_id);
8768 			break;
8769 		}
8770 
8771 		case IWM_TIME_EVENT_NOTIFICATION: {
8772 			struct iwm_time_event_notif *notif;
8773 			uint32_t action;
8774 			SYNC_RESP_STRUCT(notif, pkt);
8775 
8776 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
8777 				break;
8778 			action = le32toh(notif->action);
8779 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
8780 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
8781 			break;
8782 		}
8783 
8784 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
8785 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
8786 		    break;
8787 
8788 		/*
8789 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
8790 		 * messages. Just ignore them for now.
8791 		 */
8792 		case IWM_DEBUG_LOG_MSG:
8793 			break;
8794 
8795 		case IWM_MCAST_FILTER_CMD:
8796 			break;
8797 
8798 		case IWM_SCD_QUEUE_CFG: {
8799 			struct iwm_scd_txq_cfg_rsp *rsp;
8800 			SYNC_RESP_STRUCT(rsp, pkt);
8801 
8802 			break;
8803 		}
8804 
8805 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
8806 			break;
8807 
8808 		default:
8809 			handled = 0;
8810 			printf("%s: unhandled firmware response 0x%x/0x%x "
8811 			    "rx ring %d[%d]\n",
8812 			    DEVNAME(sc), code, pkt->len_n_flags,
8813 			    (qid & ~0x80), idx);
8814 			break;
8815 		}
8816 
8817 		/*
8818 		 * uCode sets bit 0x80 when it originates the notification,
8819 		 * i.e. when the notification is not a direct response to a
8820 		 * command sent by the driver.
8821 		 * For example, uCode issues IWM_REPLY_RX when it sends a
8822 		 * received frame to the driver.
8823 		 */
8824 		if (handled && !(qid & (1 << 7))) {
8825 			iwm_cmd_done(sc, qid, idx, code);
8826 		}
8827 
8828 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
8829 	}
8830 
8831 	if (m0 && m0 != data->m)
8832 		m_freem(m0);
8833 }
8834 
8835 void
8836 iwm_notif_intr(struct iwm_softc *sc)
8837 {
8838 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
8839 	uint32_t wreg;
8840 	uint16_t hw;
8841 	int count;
8842 
8843 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
8844 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
8845 
8846 	if (sc->sc_mqrx_supported) {
8847 		count = IWM_RX_MQ_RING_COUNT;
8848 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
8849 	} else {
8850 		count = IWM_RX_RING_COUNT;
8851 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
8852 	}
8853 
8854 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
8855 	hw &= (count - 1);
8856 	while (sc->rxq.cur != hw) {
8857 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
8858 		iwm_rx_pkt(sc, data, &ml);
8859 		ADVANCE_RXQ(sc);
8860 	}
8861 	if_input(&sc->sc_ic.ic_if, &ml);
8862 
8863 	/*
8864 	 * Tell the firmware what we have processed.
8865 	 * Seems like the hardware gets upset unless we align the write by 8??
8866 	 */
8867 	hw = (hw == 0) ? count - 1 : hw - 1;
8868 	IWM_WRITE(sc, wreg, hw & ~7);
8869 }
8870 
8871 int
8872 iwm_intr(void *arg)
8873 {
8874 	struct iwm_softc *sc = arg;
8875 	int handled = 0;
8876 	int rv = 0;
8877 	uint32_t r1, r2;
8878 
8879 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
8880 
8881 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
8882 		uint32_t *ict = sc->ict_dma.vaddr;
8883 		int tmp;
8884 
8885 		tmp = htole32(ict[sc->ict_cur]);
8886 		if (!tmp)
8887 			goto out_ena;
8888 
8889 		/*
8890 		 * ok, there was something.  keep plowing until we have all.
8891 		 */
8892 		r1 = r2 = 0;
8893 		while (tmp) {
8894 			r1 |= tmp;
8895 			ict[sc->ict_cur] = 0;
8896 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
8897 			tmp = htole32(ict[sc->ict_cur]);
8898 		}
8899 
8900 		/* this is where the fun begins.  don't ask */
8901 		if (r1 == 0xffffffff)
8902 			r1 = 0;
8903 
8904 		/*
8905 		 * Workaround for hardware bug where bits are falsely cleared
8906 		 * when using interrupt coalescing.  Bit 15 should be set if
8907 		 * bits 18 and 19 are set.
8908 		 */
8909 		if (r1 & 0xc0000)
8910 			r1 |= 0x8000;
8911 
8912 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
8913 	} else {
8914 		r1 = IWM_READ(sc, IWM_CSR_INT);
8915 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
8916 	}
8917 	if (r1 == 0 && r2 == 0) {
8918 		goto out_ena;
8919 	}
8920 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
8921 		goto out;
8922 
8923 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
8924 
8925 	/* ignored */
8926 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
8927 
8928 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
8929 		handled |= IWM_CSR_INT_BIT_RF_KILL;
8930 		iwm_check_rfkill(sc);
8931 		task_add(systq, &sc->init_task);
8932 		rv = 1;
8933 		goto out_ena;
8934 	}
8935 
8936 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
8937 #ifdef IWM_DEBUG
8938 		int i;
8939 
8940 		iwm_nic_error(sc);
8941 
8942 		/* Dump driver status (TX and RX rings) while we're here. */
8943 		DPRINTF(("driver status:\n"));
8944 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
8945 			struct iwm_tx_ring *ring = &sc->txq[i];
8946 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
8947 			    "queued=%-3d\n",
8948 			    i, ring->qid, ring->cur, ring->queued));
8949 		}
8950 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
8951 		DPRINTF(("  802.11 state %s\n",
8952 		    ieee80211_state_name[sc->sc_ic.ic_state]));
8953 #endif
8954 
8955 		printf("%s: fatal firmware error\n", DEVNAME(sc));
8956 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8957 			task_add(systq, &sc->init_task);
8958 		rv = 1;
8959 		goto out;
8960 
8961 	}
8962 
8963 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
8964 		handled |= IWM_CSR_INT_BIT_HW_ERR;
8965 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
8966 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
8967 			sc->sc_flags |= IWM_FLAG_HW_ERR;
8968 			task_add(systq, &sc->init_task);
8969 		}
8970 		rv = 1;
8971 		goto out;
8972 	}
8973 
8974 	/* firmware chunk loaded */
8975 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
8976 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
8977 		handled |= IWM_CSR_INT_BIT_FH_TX;
8978 
8979 		sc->sc_fw_chunk_done = 1;
8980 		wakeup(&sc->sc_fw);
8981 	}
8982 
8983 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
8984 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
8985 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
8986 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
8987 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
8988 		}
8989 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
8990 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
8991 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
8992 		}
8993 
8994 		/* Disable periodic interrupt; we use it as just a one-shot. */
8995 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
8996 
8997 		/*
8998 		 * Enable periodic interrupt in 8 msec only if we received
8999 		 * real RX interrupt (instead of just periodic int), to catch
9000 		 * any dangling Rx interrupt.  If it was just the periodic
9001 		 * interrupt, there was no dangling Rx activity, and no need
9002 		 * to extend the periodic interrupt; one-shot is enough.
9003 		 */
9004 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
9005 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
9006 			    IWM_CSR_INT_PERIODIC_ENA);
9007 
9008 		iwm_notif_intr(sc);
9009 	}
9010 
9011 	rv = 1;
9012 
9013  out_ena:
9014 	iwm_restore_interrupts(sc);
9015  out:
9016 	return rv;
9017 }
9018 
9019 int
9020 iwm_intr_msix(void *arg)
9021 {
9022 	struct iwm_softc *sc = arg;
9023 	uint32_t inta_fh, inta_hw;
9024 	int vector = 0;
9025 
9026 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
9027 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
9028 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9029 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9030 	inta_fh &= sc->sc_fh_mask;
9031 	inta_hw &= sc->sc_hw_mask;
9032 
9033 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
9034 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
9035 		iwm_notif_intr(sc);
9036 	}
9037 
9038 	/* firmware chunk loaded */
9039 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9040 		sc->sc_fw_chunk_done = 1;
9041 		wakeup(&sc->sc_fw);
9042 	}
9043 
9044 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
9045 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9046 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9047 #ifdef IWM_DEBUG
9048 		int i;
9049 
9050 		iwm_nic_error(sc);
9051 
9052 		/* Dump driver status (TX and RX rings) while we're here. */
9053 		DPRINTF(("driver status:\n"));
9054 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
9055 			struct iwm_tx_ring *ring = &sc->txq[i];
9056 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
9057 			    "queued=%-3d\n",
9058 			    i, ring->qid, ring->cur, ring->queued));
9059 		}
9060 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
9061 		DPRINTF(("  802.11 state %s\n",
9062 		    ieee80211_state_name[sc->sc_ic.ic_state]));
9063 #endif
9064 
9065 		printf("%s: fatal firmware error\n", DEVNAME(sc));
9066 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
9067 			task_add(systq, &sc->init_task);
9068 		return 1;
9069 	}
9070 
9071 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9072 		iwm_check_rfkill(sc);
9073 		task_add(systq, &sc->init_task);
9074 	}
9075 
9076 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9077 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9078 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9079 			sc->sc_flags |= IWM_FLAG_HW_ERR;
9080 			task_add(systq, &sc->init_task);
9081 		}
9082 		return 1;
9083 	}
9084 
9085 	/*
9086 	 * Before sending the interrupt the HW disables it to prevent
9087 	 * a nested interrupt. This is done by writing 1 to the corresponding
9088 	 * bit in the mask register. After handling the interrupt, it should be
9089 	 * re-enabled by clearing this bit. This register is defined as
9090 	 * write 1 clear (W1C) register, meaning that it's being clear
9091 	 * by writing 1 to the bit.
9092 	 */
9093 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9094 	return 1;
9095 }
9096 
9097 typedef void *iwm_match_t;
9098 
9099 static const struct pci_matchid iwm_devices[] = {
9100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
9101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
9102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
9103 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
9104 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
9105 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
9106 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
9107 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
9108 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
9109 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
9110 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
9111 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
9112 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
9113 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
9114 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
9115 };
9116 
9117 int
9118 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
9119 {
9120 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
9121 	    nitems(iwm_devices));
9122 }
9123 
9124 int
9125 iwm_preinit(struct iwm_softc *sc)
9126 {
9127 	struct ieee80211com *ic = &sc->sc_ic;
9128 	struct ifnet *ifp = IC2IFP(ic);
9129 	int err;
9130 	static int attached;
9131 
9132 	err = iwm_prepare_card_hw(sc);
9133 	if (err) {
9134 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9135 		return err;
9136 	}
9137 
9138 	if (attached) {
9139 		/* Update MAC in case the upper layers changed it. */
9140 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
9141 		    ((struct arpcom *)ifp)->ac_enaddr);
9142 		return 0;
9143 	}
9144 
9145 	err = iwm_start_hw(sc);
9146 	if (err) {
9147 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9148 		return err;
9149 	}
9150 
9151 	err = iwm_run_init_mvm_ucode(sc, 1);
9152 	iwm_stop_device(sc);
9153 	if (err)
9154 		return err;
9155 
9156 	/* Print version info and MAC address on first successful fw load. */
9157 	attached = 1;
9158 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
9159 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
9160 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
9161 
9162 	if (sc->sc_nvm.sku_cap_11n_enable)
9163 		iwm_setup_ht_rates(sc);
9164 
9165 	/* not all hardware can do 5GHz band */
9166 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
9167 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
9168 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
9169 
9170 	/* Configure channel information obtained from firmware. */
9171 	ieee80211_channel_init(ifp);
9172 
9173 	/* Configure MAC address. */
9174 	err = if_setlladdr(ifp, ic->ic_myaddr);
9175 	if (err)
9176 		printf("%s: could not set MAC address (error %d)\n",
9177 		    DEVNAME(sc), err);
9178 
9179 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
9180 
9181 	return 0;
9182 }
9183 
9184 void
9185 iwm_attach_hook(struct device *self)
9186 {
9187 	struct iwm_softc *sc = (void *)self;
9188 
9189 	KASSERT(!cold);
9190 
9191 	iwm_preinit(sc);
9192 }
9193 
9194 void
9195 iwm_attach(struct device *parent, struct device *self, void *aux)
9196 {
9197 	struct iwm_softc *sc = (void *)self;
9198 	struct pci_attach_args *pa = aux;
9199 	pci_intr_handle_t ih;
9200 	pcireg_t reg, memtype;
9201 	struct ieee80211com *ic = &sc->sc_ic;
9202 	struct ifnet *ifp = &ic->ic_if;
9203 	const char *intrstr;
9204 	int err;
9205 	int txq_i, i;
9206 
9207 	sc->sc_pct = pa->pa_pc;
9208 	sc->sc_pcitag = pa->pa_tag;
9209 	sc->sc_dmat = pa->pa_dmat;
9210 
9211 	rw_init(&sc->ioctl_rwl, "iwmioctl");
9212 
9213 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
9214 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
9215 	if (err == 0) {
9216 		printf("%s: PCIe capability structure not found!\n",
9217 		    DEVNAME(sc));
9218 		return;
9219 	}
9220 
9221 	/* Clear device-specific "PCI retry timeout" register (41h). */
9222 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9223 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9224 
9225 	/* Enable bus-mastering and hardware bug workaround. */
9226 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
9227 	reg |= PCI_COMMAND_MASTER_ENABLE;
9228 	/* if !MSI */
9229 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
9230 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
9231 	}
9232 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
9233 
9234 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
9235 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
9236 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
9237 	if (err) {
9238 		printf("%s: can't map mem space\n", DEVNAME(sc));
9239 		return;
9240 	}
9241 
9242 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
9243 		sc->sc_msix = 1;
9244 	} else if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
9245 		printf("%s: can't map interrupt\n", DEVNAME(sc));
9246 		return;
9247 	}
9248 
9249 	intrstr = pci_intr_string(sc->sc_pct, ih);
9250 	if (sc->sc_msix)
9251 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9252 		    iwm_intr_msix, sc, DEVNAME(sc));
9253 	else
9254 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
9255 		    iwm_intr, sc, DEVNAME(sc));
9256 
9257 	if (sc->sc_ih == NULL) {
9258 		printf("\n");
9259 		printf("%s: can't establish interrupt", DEVNAME(sc));
9260 		if (intrstr != NULL)
9261 			printf(" at %s", intrstr);
9262 		printf("\n");
9263 		return;
9264 	}
9265 	printf(", %s\n", intrstr);
9266 
9267 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
9268 	switch (PCI_PRODUCT(pa->pa_id)) {
9269 	case PCI_PRODUCT_INTEL_WL_3160_1:
9270 	case PCI_PRODUCT_INTEL_WL_3160_2:
9271 		sc->sc_fwname = "iwm-3160-17";
9272 		sc->host_interrupt_operation_mode = 1;
9273 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9274 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9275 		sc->sc_nvm_max_section_size = 16384;
9276 		sc->nvm_type = IWM_NVM;
9277 		break;
9278 	case PCI_PRODUCT_INTEL_WL_3165_1:
9279 	case PCI_PRODUCT_INTEL_WL_3165_2:
9280 		sc->sc_fwname = "iwm-7265-17";
9281 		sc->host_interrupt_operation_mode = 0;
9282 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9283 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9284 		sc->sc_nvm_max_section_size = 16384;
9285 		sc->nvm_type = IWM_NVM;
9286 		break;
9287 	case PCI_PRODUCT_INTEL_WL_3168_1:
9288 		sc->sc_fwname = "iwm-3168-29";
9289 		sc->host_interrupt_operation_mode = 0;
9290 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9291 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9292 		sc->sc_nvm_max_section_size = 16384;
9293 		sc->nvm_type = IWM_NVM_SDP;
9294 		break;
9295 	case PCI_PRODUCT_INTEL_WL_7260_1:
9296 	case PCI_PRODUCT_INTEL_WL_7260_2:
9297 		sc->sc_fwname = "iwm-7260-17";
9298 		sc->host_interrupt_operation_mode = 1;
9299 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9300 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9301 		sc->sc_nvm_max_section_size = 16384;
9302 		sc->nvm_type = IWM_NVM;
9303 		break;
9304 	case PCI_PRODUCT_INTEL_WL_7265_1:
9305 	case PCI_PRODUCT_INTEL_WL_7265_2:
9306 		sc->sc_fwname = "iwm-7265-17";
9307 		sc->host_interrupt_operation_mode = 0;
9308 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
9309 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
9310 		sc->sc_nvm_max_section_size = 16384;
9311 		sc->nvm_type = IWM_NVM;
9312 		break;
9313 	case PCI_PRODUCT_INTEL_WL_8260_1:
9314 	case PCI_PRODUCT_INTEL_WL_8260_2:
9315 		sc->sc_fwname = "iwm-8000C-34";
9316 		sc->host_interrupt_operation_mode = 0;
9317 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
9318 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9319 		sc->sc_nvm_max_section_size = 32768;
9320 		sc->nvm_type = IWM_NVM_EXT;
9321 		break;
9322 	case PCI_PRODUCT_INTEL_WL_8265_1:
9323 		sc->sc_fwname = "iwm-8265-34";
9324 		sc->host_interrupt_operation_mode = 0;
9325 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
9326 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9327 		sc->sc_nvm_max_section_size = 32768;
9328 		sc->nvm_type = IWM_NVM_EXT;
9329 		break;
9330 	case PCI_PRODUCT_INTEL_WL_9260_1:
9331 		sc->sc_fwname = "iwm-9260-34";
9332 		sc->host_interrupt_operation_mode = 0;
9333 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
9334 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9335 		sc->sc_nvm_max_section_size = 32768;
9336 		sc->sc_mqrx_supported = 1;
9337 		break;
9338 	case PCI_PRODUCT_INTEL_WL_9560_1:
9339 	case PCI_PRODUCT_INTEL_WL_9560_2:
9340 		sc->sc_fwname = "iwm-9000-34";
9341 		sc->host_interrupt_operation_mode = 0;
9342 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
9343 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
9344 		sc->sc_nvm_max_section_size = 32768;
9345 		sc->sc_mqrx_supported = 1;
9346 		sc->sc_integrated = 1;
9347 		break;
9348 	default:
9349 		printf("%s: unknown adapter type\n", DEVNAME(sc));
9350 		return;
9351 	}
9352 
9353 	/*
9354 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
9355 	 * changed, and now the revision step also includes bit 0-1 (no more
9356 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
9357 	 * in the old format.
9358 	 */
9359 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
9360 		uint32_t hw_step;
9361 
9362 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
9363 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
9364 
9365 		if (iwm_prepare_card_hw(sc) != 0) {
9366 			printf("%s: could not initialize hardware\n",
9367 			    DEVNAME(sc));
9368 			return;
9369 		}
9370 
9371 		/*
9372 		 * In order to recognize C step the driver should read the
9373 		 * chip version id located at the AUX bus MISC address.
9374 		 */
9375 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
9376 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
9377 		DELAY(2);
9378 
9379 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
9380 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9381 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9382 				   25000);
9383 		if (!err) {
9384 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
9385 			return;
9386 		}
9387 
9388 		if (iwm_nic_lock(sc)) {
9389 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
9390 			hw_step |= IWM_ENABLE_WFPM;
9391 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
9392 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
9393 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
9394 			if (hw_step == 0x3)
9395 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
9396 						(IWM_SILICON_C_STEP << 2);
9397 			iwm_nic_unlock(sc);
9398 		} else {
9399 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
9400 			return;
9401 		}
9402 	}
9403 
9404 	/*
9405 	 * Allocate DMA memory for firmware transfers.
9406 	 * Must be aligned on a 16-byte boundary.
9407 	 */
9408 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
9409 	    sc->sc_fwdmasegsz, 16);
9410 	if (err) {
9411 		printf("%s: could not allocate memory for firmware\n",
9412 		    DEVNAME(sc));
9413 		return;
9414 	}
9415 
9416 	/* Allocate "Keep Warm" page, used internally by the card. */
9417 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
9418 	if (err) {
9419 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
9420 		goto fail1;
9421 	}
9422 
9423 	/* Allocate interrupt cause table (ICT).*/
9424 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
9425 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
9426 	if (err) {
9427 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
9428 		goto fail2;
9429 	}
9430 
9431 	/* TX scheduler rings must be aligned on a 1KB boundary. */
9432 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
9433 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
9434 	if (err) {
9435 		printf("%s: could not allocate TX scheduler rings\n",
9436 		    DEVNAME(sc));
9437 		goto fail3;
9438 	}
9439 
9440 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
9441 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
9442 		if (err) {
9443 			printf("%s: could not allocate TX ring %d\n",
9444 			    DEVNAME(sc), txq_i);
9445 			goto fail4;
9446 		}
9447 	}
9448 
9449 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
9450 	if (err) {
9451 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
9452 		goto fail4;
9453 	}
9454 
9455 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
9456 	if (sc->sc_nswq == NULL)
9457 		goto fail4;
9458 
9459 	/* Clear pending interrupts. */
9460 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
9461 
9462 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
9463 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
9464 	ic->ic_state = IEEE80211_S_INIT;
9465 
9466 	/* Set device capabilities. */
9467 	ic->ic_caps =
9468 	    IEEE80211_C_WEP |		/* WEP */
9469 	    IEEE80211_C_RSN |		/* WPA/RSN */
9470 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
9471 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
9472 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
9473 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
9474 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
9475 
9476 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
9477 	ic->ic_htcaps |=
9478 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
9479 	ic->ic_htxcaps = 0;
9480 	ic->ic_txbfcaps = 0;
9481 	ic->ic_aselcaps = 0;
9482 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
9483 
9484 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
9485 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
9486 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
9487 
9488 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
9489 		sc->sc_phyctxt[i].id = i;
9490 	}
9491 
9492 	sc->sc_amrr.amrr_min_success_threshold =  1;
9493 	sc->sc_amrr.amrr_max_success_threshold = 15;
9494 
9495 	/* IBSS channel undefined for now. */
9496 	ic->ic_ibss_chan = &ic->ic_channels[1];
9497 
9498 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
9499 
9500 	ifp->if_softc = sc;
9501 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9502 	ifp->if_ioctl = iwm_ioctl;
9503 	ifp->if_start = iwm_start;
9504 	ifp->if_watchdog = iwm_watchdog;
9505 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
9506 
9507 	if_attach(ifp);
9508 	ieee80211_ifattach(ifp);
9509 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
9510 
9511 #if NBPFILTER > 0
9512 	iwm_radiotap_attach(sc);
9513 #endif
9514 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
9515 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
9516 	task_set(&sc->init_task, iwm_init_task, sc);
9517 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
9518 	task_set(&sc->ba_task, iwm_ba_task, sc);
9519 	task_set(&sc->htprot_task, iwm_htprot_task, sc);
9520 
9521 	ic->ic_node_alloc = iwm_node_alloc;
9522 	ic->ic_bgscan_start = iwm_bgscan;
9523 	ic->ic_set_key = iwm_set_key;
9524 	ic->ic_delete_key = iwm_delete_key;
9525 
9526 	/* Override 802.11 state transition machine. */
9527 	sc->sc_newstate = ic->ic_newstate;
9528 	ic->ic_newstate = iwm_newstate;
9529 	ic->ic_update_htprot = iwm_update_htprot;
9530 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
9531 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
9532 #ifdef notyet
9533 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
9534 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
9535 #endif
9536 	/*
9537 	 * We cannot read the MAC address without loading the
9538 	 * firmware from disk. Postpone until mountroot is done.
9539 	 */
9540 	config_mountroot(self, iwm_attach_hook);
9541 
9542 	return;
9543 
9544 fail4:	while (--txq_i >= 0)
9545 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
9546 	iwm_free_rx_ring(sc, &sc->rxq);
9547 	iwm_dma_contig_free(&sc->sched_dma);
9548 fail3:	if (sc->ict_dma.vaddr != NULL)
9549 		iwm_dma_contig_free(&sc->ict_dma);
9550 
9551 fail2:	iwm_dma_contig_free(&sc->kw_dma);
9552 fail1:	iwm_dma_contig_free(&sc->fw_dma);
9553 	return;
9554 }
9555 
9556 #if NBPFILTER > 0
9557 void
9558 iwm_radiotap_attach(struct iwm_softc *sc)
9559 {
9560 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
9561 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
9562 
9563 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
9564 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
9565 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
9566 
9567 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
9568 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
9569 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
9570 }
9571 #endif
9572 
9573 void
9574 iwm_init_task(void *arg1)
9575 {
9576 	struct iwm_softc *sc = arg1;
9577 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9578 	int s = splnet();
9579 	int generation = sc->sc_generation;
9580 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
9581 
9582 	rw_enter_write(&sc->ioctl_rwl);
9583 	if (generation != sc->sc_generation) {
9584 		rw_exit(&sc->ioctl_rwl);
9585 		splx(s);
9586 		return;
9587 	}
9588 
9589 	if (ifp->if_flags & IFF_RUNNING)
9590 		iwm_stop(ifp);
9591 	else
9592 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
9593 
9594 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
9595 		iwm_init(ifp);
9596 
9597 	rw_exit(&sc->ioctl_rwl);
9598 	splx(s);
9599 }
9600 
9601 int
9602 iwm_resume(struct iwm_softc *sc)
9603 {
9604 	pcireg_t reg;
9605 
9606 	/* Clear device-specific "PCI retry timeout" register (41h). */
9607 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
9608 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
9609 
9610 	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
9611 	iwm_conf_msix_hw(sc, 0);
9612 
9613 	iwm_enable_rfkill_int(sc);
9614 	iwm_check_rfkill(sc);
9615 
9616 	return iwm_prepare_card_hw(sc);
9617 }
9618 
9619 int
9620 iwm_activate(struct device *self, int act)
9621 {
9622 	struct iwm_softc *sc = (struct iwm_softc *)self;
9623 	struct ifnet *ifp = &sc->sc_ic.ic_if;
9624 	int err = 0;
9625 
9626 	switch (act) {
9627 	case DVACT_QUIESCE:
9628 		if (ifp->if_flags & IFF_RUNNING) {
9629 			rw_enter_write(&sc->ioctl_rwl);
9630 			iwm_stop(ifp);
9631 			rw_exit(&sc->ioctl_rwl);
9632 		}
9633 		break;
9634 	case DVACT_RESUME:
9635 		err = iwm_resume(sc);
9636 		if (err)
9637 			printf("%s: could not initialize hardware\n",
9638 			    DEVNAME(sc));
9639 		break;
9640 	case DVACT_WAKEUP:
9641 		/* Hardware should be up at this point. */
9642 		if (iwm_set_hw_ready(sc))
9643 			task_add(systq, &sc->init_task);
9644 		break;
9645 	}
9646 
9647 	return 0;
9648 }
9649 
9650 struct cfdriver iwm_cd = {
9651 	NULL, "iwm", DV_IFNET
9652 };
9653 
9654 struct cfattach iwm_ca = {
9655 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
9656 	NULL, iwm_activate
9657 };
9658