xref: /dragonfly/sys/dev/netif/iwm/if_iwm.c (revision 7b1120e5)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *				DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *	 changes to remove per-device network interface (DragonFly has not
110  *	 caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *	malloc -> kmalloc	(in particular, changing improper M_NOWAIT
114  *				specifications to M_INTWAIT.  We still don't
115  *				understand why FreeBSD uses M_NOWAIT for
116  *				critical must-not-fail kmalloc()s).
117  *	free -> kfree
118  *	printf -> kprintf
119  *	(bug fix) memset in iwm_reset_rx_ring.
120  *	(debug)   added several kprintf()s on error
121  *
122  *	header file paths (DFly allows localized path specifications).
123  *	minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *	(safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *	packet counters
128  *	msleep -> lksleep
129  *	mtx -> lk  (mtx functions -> lockmgr functions)
130  *	callout differences
131  *	taskqueue differences
132  *	MSI differences
133  *	bus_setup_intr() differences
134  *	minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138 
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150 
151 #include <machine/endian.h>
152 
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155 
156 #include <net/bpf.h>
157 
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164 
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169 
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174 
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sf.h"
189 #include "if_iwm_sta.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 #include "if_iwm_fw.h"
193 
194 const uint8_t iwm_nvm_channels[] = {
195 	/* 2.4 GHz */
196 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
197 	/* 5 GHz */
198 	36, 40, 44, 48, 52, 56, 60, 64,
199 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
200 	149, 153, 157, 161, 165
201 };
202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
203     "IWM_NUM_CHANNELS is too small");
204 
205 const uint8_t iwm_nvm_channels_8000[] = {
206 	/* 2.4 GHz */
207 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
208 	/* 5 GHz */
209 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
210 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
211 	149, 153, 157, 161, 165, 169, 173, 177, 181
212 };
213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
214     "IWM_NUM_CHANNELS_8000 is too small");
215 
216 #define IWM_NUM_2GHZ_CHANNELS	14
217 #define IWM_N_HW_ADDR_MASK	0xF
218 
219 /*
220  * XXX For now, there's simply a fixed set of rate table entries
221  * that are populated.
222  */
223 const struct iwm_rate {
224 	uint8_t rate;
225 	uint8_t plcp;
226 } iwm_rates[] = {
227 	{   2,	IWM_RATE_1M_PLCP  },
228 	{   4,	IWM_RATE_2M_PLCP  },
229 	{  11,	IWM_RATE_5M_PLCP  },
230 	{  22,	IWM_RATE_11M_PLCP },
231 	{  12,	IWM_RATE_6M_PLCP  },
232 	{  18,	IWM_RATE_9M_PLCP  },
233 	{  24,	IWM_RATE_12M_PLCP },
234 	{  36,	IWM_RATE_18M_PLCP },
235 	{  48,	IWM_RATE_24M_PLCP },
236 	{  72,	IWM_RATE_36M_PLCP },
237 	{  96,	IWM_RATE_48M_PLCP },
238 	{ 108,	IWM_RATE_54M_PLCP },
239 };
240 #define IWM_RIDX_CCK	0
241 #define IWM_RIDX_OFDM	4
242 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
245 
246 struct iwm_nvm_section {
247 	uint16_t length;
248 	uint8_t *data;
249 };
250 
251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
252 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
253 
254 struct iwm_mvm_alive_data {
255 	int valid;
256 	uint32_t scd_base_addr;
257 };
258 
259 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
260 static int	iwm_firmware_store_section(struct iwm_softc *,
261                                            enum iwm_ucode_type,
262                                            const uint8_t *, size_t);
263 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
264 static void	iwm_fw_info_free(struct iwm_fw_info *);
265 static int	iwm_read_firmware(struct iwm_softc *);
266 #if !defined(__DragonFly__)
267 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
268 #endif
269 static int	iwm_alloc_fwmem(struct iwm_softc *);
270 static int	iwm_alloc_sched(struct iwm_softc *);
271 static int	iwm_alloc_kw(struct iwm_softc *);
272 static int	iwm_alloc_ict(struct iwm_softc *);
273 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
277                                   int);
278 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 static void	iwm_enable_interrupts(struct iwm_softc *);
281 static void	iwm_restore_interrupts(struct iwm_softc *);
282 static void	iwm_disable_interrupts(struct iwm_softc *);
283 static void	iwm_ict_reset(struct iwm_softc *);
284 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
285 static void	iwm_stop_device(struct iwm_softc *);
286 static void	iwm_mvm_nic_config(struct iwm_softc *);
287 static int	iwm_nic_rx_init(struct iwm_softc *);
288 static int	iwm_nic_tx_init(struct iwm_softc *);
289 static int	iwm_nic_init(struct iwm_softc *);
290 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
291 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292                                    uint16_t, uint8_t *, uint16_t *);
293 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294 				     uint16_t *, uint32_t);
295 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
296 static void	iwm_add_channel_band(struct iwm_softc *,
297 		    struct ieee80211_channel[], int, int *, int, size_t,
298 		    const uint8_t[]);
299 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
300 		    struct ieee80211_channel[]);
301 static struct iwm_nvm_data *
302 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
303 			   const uint16_t *, const uint16_t *,
304 			   const uint16_t *, const uint16_t *,
305 			   const uint16_t *);
306 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
307 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
308 					       struct iwm_nvm_data *,
309 					       const uint16_t *,
310 					       const uint16_t *);
311 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312 			    const uint16_t *);
313 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
314 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315 				  const uint16_t *);
316 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
317 				   const uint16_t *);
318 static void	iwm_set_radio_cfg(const struct iwm_softc *,
319 				  struct iwm_nvm_data *, uint32_t);
320 static struct iwm_nvm_data *
321 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
322 static int	iwm_nvm_init(struct iwm_softc *);
323 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
324 				      const struct iwm_fw_desc *);
325 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
326 					     bus_addr_t, uint32_t);
327 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
328 						const struct iwm_fw_img *,
329 						int, int *);
330 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
331 					   const struct iwm_fw_img *,
332 					   int, int *);
333 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
334 					       const struct iwm_fw_img *);
335 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
336 					  const struct iwm_fw_img *);
337 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
338 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
339 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
340 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
341                                               enum iwm_ucode_type);
342 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
343 static int	iwm_mvm_config_ltr(struct iwm_softc *sc);
344 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
345 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
346 					    struct iwm_rx_phy_info *);
347 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
348                                       struct iwm_rx_packet *);
349 static int	iwm_get_noise(struct iwm_softc *,
350 		    const struct iwm_mvm_statistics_rx_non_phy *);
351 static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
352 		    struct iwm_rx_packet *);
353 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
354 				    uint32_t, boolean_t);
355 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
356                                          struct iwm_rx_packet *,
357 				         struct iwm_node *);
358 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
359 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
360 #if 0
361 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
362                                  uint16_t);
363 #endif
364 static uint8_t	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
365 			struct mbuf *, struct iwm_tx_cmd *);
366 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
367                        struct ieee80211_node *, int);
368 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
369 			     const struct ieee80211_bpf_params *);
370 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
371 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
372 static struct ieee80211_node *
373 		iwm_node_alloc(struct ieee80211vap *,
374 		               const uint8_t[IEEE80211_ADDR_LEN]);
375 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
376 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
377 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
378 static int	iwm_media_change(struct ifnet *);
379 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
380 static void	iwm_endscan_cb(void *, int);
381 static int	iwm_send_bt_init_conf(struct iwm_softc *);
382 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
383 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
384 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
385 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
386 static int	iwm_init_hw(struct iwm_softc *);
387 static void	iwm_init(struct iwm_softc *);
388 static void	iwm_start(struct iwm_softc *);
389 static void	iwm_stop(struct iwm_softc *);
390 static void	iwm_watchdog(void *);
391 static void	iwm_parent(struct ieee80211com *);
392 #ifdef IWM_DEBUG
393 static const char *
394 		iwm_desc_lookup(uint32_t);
395 static void	iwm_nic_error(struct iwm_softc *);
396 static void	iwm_nic_umac_error(struct iwm_softc *);
397 #endif
398 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
399 static void	iwm_notif_intr(struct iwm_softc *);
400 static void	iwm_intr(void *);
401 static int	iwm_attach(device_t);
402 static int	iwm_is_valid_ether_addr(uint8_t *);
403 static void	iwm_preinit(void *);
404 static int	iwm_detach_local(struct iwm_softc *sc, int);
405 static void	iwm_init_task(void *);
406 static void	iwm_radiotap_attach(struct iwm_softc *);
407 static struct ieee80211vap *
408 		iwm_vap_create(struct ieee80211com *,
409 		               const char [IFNAMSIZ], int,
410 		               enum ieee80211_opmode, int,
411 		               const uint8_t [IEEE80211_ADDR_LEN],
412 		               const uint8_t [IEEE80211_ADDR_LEN]);
413 static void	iwm_vap_delete(struct ieee80211vap *);
414 static void	iwm_xmit_queue_drain(struct iwm_softc *);
415 static void	iwm_scan_start(struct ieee80211com *);
416 static void	iwm_scan_end(struct ieee80211com *);
417 static void	iwm_update_mcast(struct ieee80211com *);
418 static void	iwm_set_channel(struct ieee80211com *);
419 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
420 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
421 static int	iwm_detach(device_t);
422 
423 #if defined(__DragonFly__)
424 static int	iwm_msi_enable = 1;
425 
426 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
427 #endif
428 
429 static int	iwm_lar_disable = 0;
430 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
431 
432 /*
433  * Firmware parser.
434  */
435 
436 static int
437 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
438 {
439 	const struct iwm_fw_cscheme_list *l = (const void *)data;
440 
441 	if (dlen < sizeof(*l) ||
442 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
443 		return EINVAL;
444 
445 	/* we don't actually store anything for now, always use s/w crypto */
446 
447 	return 0;
448 }
449 
450 static int
451 iwm_firmware_store_section(struct iwm_softc *sc,
452     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
453 {
454 	struct iwm_fw_img *fws;
455 	struct iwm_fw_desc *fwone;
456 
457 	if (type >= IWM_UCODE_TYPE_MAX)
458 		return EINVAL;
459 	if (dlen < sizeof(uint32_t))
460 		return EINVAL;
461 
462 	fws = &sc->sc_fw.img[type];
463 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
464 		return EINVAL;
465 
466 	fwone = &fws->sec[fws->fw_count];
467 
468 	/* first 32bit are device load offset */
469 	memcpy(&fwone->offset, data, sizeof(uint32_t));
470 
471 	/* rest is data */
472 	fwone->data = data + sizeof(uint32_t);
473 	fwone->len = dlen - sizeof(uint32_t);
474 
475 	fws->fw_count++;
476 
477 	return 0;
478 }
479 
480 #define IWM_DEFAULT_SCAN_CHANNELS 40
481 
482 struct iwm_tlv_calib_data {
483 	uint32_t ucode_type;
484 	struct iwm_tlv_calib_ctrl calib;
485 } __packed;
486 
487 static int
488 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
489 {
490 	const struct iwm_tlv_calib_data *def_calib = data;
491 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
492 
493 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
494 		device_printf(sc->sc_dev,
495 		    "Wrong ucode_type %u for default "
496 		    "calibration.\n", ucode_type);
497 		return EINVAL;
498 	}
499 
500 	sc->sc_default_calib[ucode_type].flow_trigger =
501 	    def_calib->calib.flow_trigger;
502 	sc->sc_default_calib[ucode_type].event_trigger =
503 	    def_calib->calib.event_trigger;
504 
505 	return 0;
506 }
507 
508 static int
509 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
510 			struct iwm_ucode_capabilities *capa)
511 {
512 	const struct iwm_ucode_api *ucode_api = (const void *)data;
513 	uint32_t api_index = le32toh(ucode_api->api_index);
514 	uint32_t api_flags = le32toh(ucode_api->api_flags);
515 	int i;
516 
517 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
518 		device_printf(sc->sc_dev,
519 		    "api flags index %d larger than supported by driver\n",
520 		    api_index);
521 		/* don't return an error so we can load FW that has more bits */
522 		return 0;
523 	}
524 
525 	for (i = 0; i < 32; i++) {
526 		if (api_flags & (1U << i))
527 			setbit(capa->enabled_api, i + 32 * api_index);
528 	}
529 
530 	return 0;
531 }
532 
533 static int
534 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
535 			   struct iwm_ucode_capabilities *capa)
536 {
537 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
538 	uint32_t api_index = le32toh(ucode_capa->api_index);
539 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
540 	int i;
541 
542 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
543 		device_printf(sc->sc_dev,
544 		    "capa flags index %d larger than supported by driver\n",
545 		    api_index);
546 		/* don't return an error so we can load FW that has more bits */
547 		return 0;
548 	}
549 
550 	for (i = 0; i < 32; i++) {
551 		if (api_flags & (1U << i))
552 			setbit(capa->enabled_capa, i + 32 * api_index);
553 	}
554 
555 	return 0;
556 }
557 
558 static void
559 iwm_fw_info_free(struct iwm_fw_info *fw)
560 {
561 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
562 	fw->fw_fp = NULL;
563 	memset(fw->img, 0, sizeof(fw->img));
564 }
565 
566 static int
567 iwm_read_firmware(struct iwm_softc *sc)
568 {
569 	struct iwm_fw_info *fw = &sc->sc_fw;
570 	const struct iwm_tlv_ucode_header *uhdr;
571 	const struct iwm_ucode_tlv *tlv;
572 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
573 	enum iwm_ucode_tlv_type tlv_type;
574 	const struct firmware *fwp;
575 	const uint8_t *data;
576 	uint32_t tlv_len;
577 	uint32_t usniffer_img;
578 	const uint8_t *tlv_data;
579 	uint32_t paging_mem_size;
580 	int num_of_cpus;
581 	int error = 0;
582 	size_t len;
583 
584 	/*
585 	 * Load firmware into driver memory.
586 	 * fw_fp will be set.
587 	 */
588 	fwp = firmware_get(sc->cfg->fw_name);
589 	if (fwp == NULL) {
590 		device_printf(sc->sc_dev,
591 		    "could not read firmware %s (error %d)\n",
592 		    sc->cfg->fw_name, error);
593 		goto out;
594 	}
595 	fw->fw_fp = fwp;
596 
597 	/* (Re-)Initialize default values. */
598 	capa->flags = 0;
599 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
600 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
601 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
602 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
603 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
604 
605 	/*
606 	 * Parse firmware contents
607 	 */
608 
609 	uhdr = (const void *)fw->fw_fp->data;
610 	if (*(const uint32_t *)fw->fw_fp->data != 0
611 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
612 		device_printf(sc->sc_dev, "invalid firmware %s\n",
613 		    sc->cfg->fw_name);
614 		error = EINVAL;
615 		goto out;
616 	}
617 
618 	ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
619 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
620 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
621 	    IWM_UCODE_API(le32toh(uhdr->ver)));
622 	data = uhdr->data;
623 	len = fw->fw_fp->datasize - sizeof(*uhdr);
624 
625 	while (len >= sizeof(*tlv)) {
626 		len -= sizeof(*tlv);
627 		tlv = (const void *)data;
628 
629 		tlv_len = le32toh(tlv->length);
630 		tlv_type = le32toh(tlv->type);
631 		tlv_data = tlv->data;
632 
633 		if (len < tlv_len) {
634 			device_printf(sc->sc_dev,
635 			    "firmware too short: %zu bytes\n",
636 			    len);
637 			error = EINVAL;
638 			goto parse_out;
639 		}
640 		len -= roundup2(tlv_len, 4);
641 		data += sizeof(tlv) + roundup2(tlv_len, 4);
642 
643 		switch ((int)tlv_type) {
644 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
645 			if (tlv_len != sizeof(uint32_t)) {
646 				device_printf(sc->sc_dev,
647 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
648 				    __func__, tlv_len);
649 				error = EINVAL;
650 				goto parse_out;
651 			}
652 			capa->max_probe_length =
653 			    le32_to_cpup((const uint32_t *)tlv_data);
654 			/* limit it to something sensible */
655 			if (capa->max_probe_length >
656 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
657 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
658 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
659 				    "ridiculous\n", __func__);
660 				error = EINVAL;
661 				goto parse_out;
662 			}
663 			break;
664 		case IWM_UCODE_TLV_PAN:
665 			if (tlv_len) {
666 				device_printf(sc->sc_dev,
667 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
668 				    __func__, tlv_len);
669 				error = EINVAL;
670 				goto parse_out;
671 			}
672 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
673 			break;
674 		case IWM_UCODE_TLV_FLAGS:
675 			if (tlv_len < sizeof(uint32_t)) {
676 				device_printf(sc->sc_dev,
677 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
678 				    __func__, tlv_len);
679 				error = EINVAL;
680 				goto parse_out;
681 			}
682 			if (tlv_len % sizeof(uint32_t)) {
683 				device_printf(sc->sc_dev,
684 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
685 				    __func__, tlv_len);
686 				error = EINVAL;
687 				goto parse_out;
688 			}
689 			/*
690 			 * Apparently there can be many flags, but Linux driver
691 			 * parses only the first one, and so do we.
692 			 *
693 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
694 			 * Intentional or a bug?  Observations from
695 			 * current firmware file:
696 			 *  1) TLV_PAN is parsed first
697 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
698 			 * ==> this resets TLV_PAN to itself... hnnnk
699 			 */
700 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
701 			break;
702 		case IWM_UCODE_TLV_CSCHEME:
703 			if ((error = iwm_store_cscheme(sc,
704 			    tlv_data, tlv_len)) != 0) {
705 				device_printf(sc->sc_dev,
706 				    "%s: iwm_store_cscheme(): returned %d\n",
707 				    __func__, error);
708 				goto parse_out;
709 			}
710 			break;
711 		case IWM_UCODE_TLV_NUM_OF_CPU:
712 			if (tlv_len != sizeof(uint32_t)) {
713 				device_printf(sc->sc_dev,
714 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
715 				    __func__, tlv_len);
716 				error = EINVAL;
717 				goto parse_out;
718 			}
719 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
720 			if (num_of_cpus == 2) {
721 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
722 					TRUE;
723 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
724 					TRUE;
725 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
726 					TRUE;
727 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
728 				device_printf(sc->sc_dev,
729 				    "%s: Driver supports only 1 or 2 CPUs\n",
730 				    __func__);
731 				error = EINVAL;
732 				goto parse_out;
733 			}
734 			break;
735 		case IWM_UCODE_TLV_SEC_RT:
736 			if ((error = iwm_firmware_store_section(sc,
737 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
738 				device_printf(sc->sc_dev,
739 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
740 				    __func__, error);
741 				goto parse_out;
742 			}
743 			break;
744 		case IWM_UCODE_TLV_SEC_INIT:
745 			if ((error = iwm_firmware_store_section(sc,
746 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
747 				device_printf(sc->sc_dev,
748 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
749 				    __func__, error);
750 				goto parse_out;
751 			}
752 			break;
753 		case IWM_UCODE_TLV_SEC_WOWLAN:
754 			if ((error = iwm_firmware_store_section(sc,
755 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
756 				device_printf(sc->sc_dev,
757 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
758 				    __func__, error);
759 				goto parse_out;
760 			}
761 			break;
762 		case IWM_UCODE_TLV_DEF_CALIB:
763 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
764 				device_printf(sc->sc_dev,
765 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
766 				    __func__, tlv_len,
767 				    sizeof(struct iwm_tlv_calib_data));
768 				error = EINVAL;
769 				goto parse_out;
770 			}
771 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
772 				device_printf(sc->sc_dev,
773 				    "%s: iwm_set_default_calib() failed: %d\n",
774 				    __func__, error);
775 				goto parse_out;
776 			}
777 			break;
778 		case IWM_UCODE_TLV_PHY_SKU:
779 			if (tlv_len != sizeof(uint32_t)) {
780 				error = EINVAL;
781 				device_printf(sc->sc_dev,
782 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
783 				    __func__, tlv_len);
784 				goto parse_out;
785 			}
786 			sc->sc_fw.phy_config =
787 			    le32_to_cpup((const uint32_t *)tlv_data);
788 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
789 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
790 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
791 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
792 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
793 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
794 			break;
795 
796 		case IWM_UCODE_TLV_API_CHANGES_SET: {
797 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
798 				error = EINVAL;
799 				goto parse_out;
800 			}
801 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
802 				error = EINVAL;
803 				goto parse_out;
804 			}
805 			break;
806 		}
807 
808 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
809 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
810 				error = EINVAL;
811 				goto parse_out;
812 			}
813 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
814 				error = EINVAL;
815 				goto parse_out;
816 			}
817 			break;
818 		}
819 
820 		case 48: /* undocumented TLV */
821 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
822 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
823 			/* ignore, not used by current driver */
824 			break;
825 
826 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
827 			if ((error = iwm_firmware_store_section(sc,
828 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
829 			    tlv_len)) != 0)
830 				goto parse_out;
831 			break;
832 
833 		case IWM_UCODE_TLV_PAGING:
834 			if (tlv_len != sizeof(uint32_t)) {
835 				error = EINVAL;
836 				goto parse_out;
837 			}
838 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
839 
840 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
841 			    "%s: Paging: paging enabled (size = %u bytes)\n",
842 			    __func__, paging_mem_size);
843 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
844 				device_printf(sc->sc_dev,
845 					"%s: Paging: driver supports up to %u bytes for paging image\n",
846 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
847 				error = EINVAL;
848 				goto out;
849 			}
850 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
851 				device_printf(sc->sc_dev,
852 				    "%s: Paging: image isn't multiple %u\n",
853 				    __func__, IWM_FW_PAGING_SIZE);
854 				error = EINVAL;
855 				goto out;
856 			}
857 
858 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
859 			    paging_mem_size;
860 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
861 			sc->sc_fw.img[usniffer_img].paging_mem_size =
862 			    paging_mem_size;
863 			break;
864 
865 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
866 			if (tlv_len != sizeof(uint32_t)) {
867 				error = EINVAL;
868 				goto parse_out;
869 			}
870 			capa->n_scan_channels =
871 			    le32_to_cpup((const uint32_t *)tlv_data);
872 			break;
873 
874 		case IWM_UCODE_TLV_FW_VERSION:
875 			if (tlv_len != sizeof(uint32_t) * 3) {
876 				error = EINVAL;
877 				goto parse_out;
878 			}
879 			ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
880 			    "%d.%d.%d",
881 			    le32toh(((const uint32_t *)tlv_data)[0]),
882 			    le32toh(((const uint32_t *)tlv_data)[1]),
883 			    le32toh(((const uint32_t *)tlv_data)[2]));
884 			break;
885 
886 		case IWM_UCODE_TLV_FW_MEM_SEG:
887 			break;
888 
889 		default:
890 			device_printf(sc->sc_dev,
891 			    "%s: unknown firmware section %d, abort\n",
892 			    __func__, tlv_type);
893 			error = EINVAL;
894 			goto parse_out;
895 		}
896 	}
897 
898 	KASSERT(error == 0, ("unhandled error"));
899 
900  parse_out:
901 	if (error) {
902 		device_printf(sc->sc_dev, "firmware parse error %d, "
903 		    "section type %d\n", error, tlv_type);
904 	}
905 
906  out:
907 	if (error) {
908 		if (fw->fw_fp != NULL)
909 			iwm_fw_info_free(fw);
910 	}
911 
912 	return error;
913 }
914 
915 /*
916  * DMA resource routines
917  */
918 
919 /* fwmem is used to load firmware onto the card */
920 static int
921 iwm_alloc_fwmem(struct iwm_softc *sc)
922 {
923 	/* Must be aligned on a 16-byte boundary. */
924 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
925 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
926 }
927 
928 /* tx scheduler rings.  not used? */
929 static int
930 iwm_alloc_sched(struct iwm_softc *sc)
931 {
932 	/* TX scheduler rings must be aligned on a 1KB boundary. */
933 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
934 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
935 }
936 
937 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
938 static int
939 iwm_alloc_kw(struct iwm_softc *sc)
940 {
941 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
942 }
943 
944 /* interrupt cause table */
945 static int
946 iwm_alloc_ict(struct iwm_softc *sc)
947 {
948 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
949 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
950 }
951 
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
954 {
955 	bus_size_t size;
956 	int i, error;
957 
958 	ring->cur = 0;
959 
960 	/* Allocate RX descriptors (256-byte aligned). */
961 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963 	if (error != 0) {
964 		device_printf(sc->sc_dev,
965 		    "could not allocate RX ring DMA memory\n");
966 		goto fail;
967 	}
968 	ring->desc = ring->desc_dma.vaddr;
969 
970 	/* Allocate RX status area (16-byte aligned). */
971 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972 	    sizeof(*ring->stat), 16);
973 	if (error != 0) {
974 		device_printf(sc->sc_dev,
975 		    "could not allocate RX status DMA memory\n");
976 		goto fail;
977 	}
978 	ring->stat = ring->stat_dma.vaddr;
979 
980         /* Create RX buffer DMA tag. */
981 #if defined(__DragonFly__)
982         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
983 				   0,
984 				   BUS_SPACE_MAXADDR_32BIT,
985 				   BUS_SPACE_MAXADDR,
986 				   NULL, NULL,
987 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
988 				   BUS_DMA_NOWAIT, &ring->data_dmat);
989 #else
990         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
991             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
992             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
993 #endif
994         if (error != 0) {
995                 device_printf(sc->sc_dev,
996                     "%s: could not create RX buf DMA tag, error %d\n",
997                     __func__, error);
998                 goto fail;
999         }
1000 
1001 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1002 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1003 	if (error != 0) {
1004 		device_printf(sc->sc_dev,
1005 		    "%s: could not create RX buf DMA map, error %d\n",
1006 		    __func__, error);
1007 		goto fail;
1008 	}
1009 	/*
1010 	 * Allocate and map RX buffers.
1011 	 */
1012 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1013 		struct iwm_rx_data *data = &ring->data[i];
1014 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1015 		if (error != 0) {
1016 			device_printf(sc->sc_dev,
1017 			    "%s: could not create RX buf DMA map, error %d\n",
1018 			    __func__, error);
1019 			goto fail;
1020 		}
1021 		data->m = NULL;
1022 
1023 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1024 			goto fail;
1025 		}
1026 	}
1027 	return 0;
1028 
1029 fail:	iwm_free_rx_ring(sc, ring);
1030 	return error;
1031 }
1032 
1033 static void
1034 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1035 {
1036 	/* Reset the ring state */
1037 	ring->cur = 0;
1038 
1039 	/*
1040 	 * The hw rx ring index in shared memory must also be cleared,
1041 	 * otherwise the discrepancy can cause reprocessing chaos.
1042 	 */
1043 	if (sc->rxq.stat)
1044 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1045 }
1046 
1047 static void
1048 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1049 {
1050 	int i;
1051 
1052 	iwm_dma_contig_free(&ring->desc_dma);
1053 	iwm_dma_contig_free(&ring->stat_dma);
1054 
1055 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1056 		struct iwm_rx_data *data = &ring->data[i];
1057 
1058 		if (data->m != NULL) {
1059 			bus_dmamap_sync(ring->data_dmat, data->map,
1060 			    BUS_DMASYNC_POSTREAD);
1061 			bus_dmamap_unload(ring->data_dmat, data->map);
1062 			m_freem(data->m);
1063 			data->m = NULL;
1064 		}
1065 		if (data->map != NULL) {
1066 			bus_dmamap_destroy(ring->data_dmat, data->map);
1067 			data->map = NULL;
1068 		}
1069 	}
1070 	if (ring->spare_map != NULL) {
1071 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1072 		ring->spare_map = NULL;
1073 	}
1074 	if (ring->data_dmat != NULL) {
1075 		bus_dma_tag_destroy(ring->data_dmat);
1076 		ring->data_dmat = NULL;
1077 	}
1078 }
1079 
1080 static int
1081 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1082 {
1083 	bus_addr_t paddr;
1084 	bus_size_t size;
1085 	size_t maxsize;
1086 	int nsegments;
1087 	int i, error;
1088 
1089 	ring->qid = qid;
1090 	ring->queued = 0;
1091 	ring->cur = 0;
1092 
1093 	/* Allocate TX descriptors (256-byte aligned). */
1094 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1095 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1096 	if (error != 0) {
1097 		device_printf(sc->sc_dev,
1098 		    "could not allocate TX ring DMA memory\n");
1099 		goto fail;
1100 	}
1101 	ring->desc = ring->desc_dma.vaddr;
1102 
1103 	/*
1104 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1105 	 * to allocate commands space for other rings.
1106 	 */
1107 	if (qid > IWM_MVM_CMD_QUEUE)
1108 		return 0;
1109 
1110 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1111 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1112 	if (error != 0) {
1113 		device_printf(sc->sc_dev,
1114 		    "could not allocate TX cmd DMA memory\n");
1115 		goto fail;
1116 	}
1117 	ring->cmd = ring->cmd_dma.vaddr;
1118 
1119 	/* FW commands may require more mapped space than packets. */
1120 	if (qid == IWM_MVM_CMD_QUEUE) {
1121 		maxsize = IWM_RBUF_SIZE;
1122 		nsegments = 1;
1123 	} else {
1124 		maxsize = MCLBYTES;
1125 		nsegments = IWM_MAX_SCATTER - 2;
1126 	}
1127 
1128 #if defined(__DragonFly__)
1129 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1130 				   0,
1131 				   BUS_SPACE_MAXADDR_32BIT,
1132 				   BUS_SPACE_MAXADDR,
1133 				   NULL, NULL,
1134 				   maxsize, nsegments, maxsize,
1135 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1136 #else
1137 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1138 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1139             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1140 #endif
1141 	if (error != 0) {
1142 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1143 		goto fail;
1144 	}
1145 
1146 	paddr = ring->cmd_dma.paddr;
1147 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1148 		struct iwm_tx_data *data = &ring->data[i];
1149 
1150 		data->cmd_paddr = paddr;
1151 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1152 		    + offsetof(struct iwm_tx_cmd, scratch);
1153 		paddr += sizeof(struct iwm_device_cmd);
1154 
1155 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1156 		if (error != 0) {
1157 			device_printf(sc->sc_dev,
1158 			    "could not create TX buf DMA map\n");
1159 			goto fail;
1160 		}
1161 	}
1162 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1163 	    ("invalid physical address"));
1164 	return 0;
1165 
1166 fail:	iwm_free_tx_ring(sc, ring);
1167 	return error;
1168 }
1169 
1170 static void
1171 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1172 {
1173 	int i;
1174 
1175 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1176 		struct iwm_tx_data *data = &ring->data[i];
1177 
1178 		if (data->m != NULL) {
1179 			bus_dmamap_sync(ring->data_dmat, data->map,
1180 			    BUS_DMASYNC_POSTWRITE);
1181 			bus_dmamap_unload(ring->data_dmat, data->map);
1182 			m_freem(data->m);
1183 			data->m = NULL;
1184 		}
1185 	}
1186 	/* Clear TX descriptors. */
1187 	memset(ring->desc, 0, ring->desc_dma.size);
1188 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1189 	    BUS_DMASYNC_PREWRITE);
1190 	sc->qfullmsk &= ~(1 << ring->qid);
1191 	ring->queued = 0;
1192 	ring->cur = 0;
1193 
1194 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1195 		iwm_pcie_clear_cmd_in_flight(sc);
1196 }
1197 
1198 static void
1199 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1200 {
1201 	int i;
1202 
1203 	iwm_dma_contig_free(&ring->desc_dma);
1204 	iwm_dma_contig_free(&ring->cmd_dma);
1205 
1206 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1207 		struct iwm_tx_data *data = &ring->data[i];
1208 
1209 		if (data->m != NULL) {
1210 			bus_dmamap_sync(ring->data_dmat, data->map,
1211 			    BUS_DMASYNC_POSTWRITE);
1212 			bus_dmamap_unload(ring->data_dmat, data->map);
1213 			m_freem(data->m);
1214 			data->m = NULL;
1215 		}
1216 		if (data->map != NULL) {
1217 			bus_dmamap_destroy(ring->data_dmat, data->map);
1218 			data->map = NULL;
1219 		}
1220 	}
1221 	if (ring->data_dmat != NULL) {
1222 		bus_dma_tag_destroy(ring->data_dmat);
1223 		ring->data_dmat = NULL;
1224 	}
1225 }
1226 
1227 /*
1228  * High-level hardware frobbing routines
1229  */
1230 
1231 static void
1232 iwm_enable_interrupts(struct iwm_softc *sc)
1233 {
1234 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1235 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1236 }
1237 
1238 static void
1239 iwm_restore_interrupts(struct iwm_softc *sc)
1240 {
1241 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1242 }
1243 
1244 static void
1245 iwm_disable_interrupts(struct iwm_softc *sc)
1246 {
1247 	/* disable interrupts */
1248 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1249 
1250 	/* acknowledge all interrupts */
1251 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1252 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1253 }
1254 
1255 static void
1256 iwm_ict_reset(struct iwm_softc *sc)
1257 {
1258 	iwm_disable_interrupts(sc);
1259 
1260 	/* Reset ICT table. */
1261 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1262 	sc->ict_cur = 0;
1263 
1264 	/* Set physical address of ICT table (4KB aligned). */
1265 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1266 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1267 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1268 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1269 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1270 
1271 	/* Switch to ICT interrupt mode in driver. */
1272 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1273 
1274 	/* Re-enable interrupts. */
1275 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1276 	iwm_enable_interrupts(sc);
1277 }
1278 
1279 /*
1280  * Since this .. hard-resets things, it's time to actually
1281  * mark the first vap (if any) as having no mac context.
1282  * It's annoying, but since the driver is potentially being
1283  * stop/start'ed whilst active (thanks openbsd port!) we
1284  * have to correctly track this.
1285  */
1286 static void
1287 iwm_stop_device(struct iwm_softc *sc)
1288 {
1289 	struct ieee80211com *ic = &sc->sc_ic;
1290 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1291 	int chnl, qid;
1292 	uint32_t mask = 0;
1293 
1294 	/* tell the device to stop sending interrupts */
1295 	iwm_disable_interrupts(sc);
1296 
1297 	/*
1298 	 * FreeBSD-local: mark the first vap as not-uploaded,
1299 	 * so the next transition through auth/assoc
1300 	 * will correctly populate the MAC context.
1301 	 */
1302 	if (vap) {
1303 		struct iwm_vap *iv = IWM_VAP(vap);
1304 		iv->phy_ctxt = NULL;
1305 		iv->is_uploaded = 0;
1306 	}
1307 	sc->sc_firmware_state = 0;
1308 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1309 
1310 	/* device going down, Stop using ICT table */
1311 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1312 
1313 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1314 
1315 	if (iwm_nic_lock(sc)) {
1316 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1317 
1318 		/* Stop each Tx DMA channel */
1319 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1320 			IWM_WRITE(sc,
1321 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1322 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1323 		}
1324 
1325 		/* Wait for DMA channels to be idle */
1326 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1327 		    5000)) {
1328 			device_printf(sc->sc_dev,
1329 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1330 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1331 		}
1332 		iwm_nic_unlock(sc);
1333 	}
1334 	iwm_pcie_rx_stop(sc);
1335 
1336 	/* Stop RX ring. */
1337 	iwm_reset_rx_ring(sc, &sc->rxq);
1338 
1339 	/* Reset all TX rings. */
1340 	for (qid = 0; qid < nitems(sc->txq); qid++)
1341 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1342 
1343 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1344 		/* Power-down device's busmaster DMA clocks */
1345 		if (iwm_nic_lock(sc)) {
1346 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1347 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1348 			iwm_nic_unlock(sc);
1349 		}
1350 		DELAY(5);
1351 	}
1352 
1353 	/* Make sure (redundant) we've released our request to stay awake */
1354 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1355 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1356 
1357 	/* Stop the device, and put it in low power state */
1358 	iwm_apm_stop(sc);
1359 
1360 	/* stop and reset the on-board processor */
1361 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1362 	DELAY(1000);
1363 
1364 	/*
1365 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1366 	 * This is a bug in certain verions of the hardware.
1367 	 * Certain devices also keep sending HW RF kill interrupt all
1368 	 * the time, unless the interrupt is ACKed even if the interrupt
1369 	 * should be masked. Re-ACK all the interrupts here.
1370 	 */
1371 	iwm_disable_interrupts(sc);
1372 
1373 	/*
1374 	 * Even if we stop the HW, we still want the RF kill
1375 	 * interrupt
1376 	 */
1377 	iwm_enable_rfkill_int(sc);
1378 	iwm_check_rfkill(sc);
1379 }
1380 
1381 static void
1382 iwm_mvm_nic_config(struct iwm_softc *sc)
1383 {
1384 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1385 	uint32_t reg_val = 0;
1386 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1387 
1388 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1389 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1390 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1391 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1392 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1393 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1394 
1395 	/* SKU control */
1396 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1397 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1398 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1399 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1400 
1401 	/* radio configuration */
1402 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1403 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1404 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1405 
1406 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1407 
1408 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1409 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1410 	    radio_cfg_step, radio_cfg_dash);
1411 
1412 	/*
1413 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1414 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1415 	 * to lose ownership and not being able to obtain it back.
1416 	 */
1417 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1418 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1419 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1420 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1421 	}
1422 }
1423 
1424 static int
1425 iwm_nic_rx_init(struct iwm_softc *sc)
1426 {
1427 	/*
1428 	 * Initialize RX ring.  This is from the iwn driver.
1429 	 */
1430 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1431 
1432 	/* Stop Rx DMA */
1433 	iwm_pcie_rx_stop(sc);
1434 
1435 	if (!iwm_nic_lock(sc))
1436 		return EBUSY;
1437 
1438 	/* reset and flush pointers */
1439 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1440 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1441 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1442 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1443 
1444 	/* Set physical address of RX ring (256-byte aligned). */
1445 	IWM_WRITE(sc,
1446 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1447 
1448 	/* Set physical address of RX status (16-byte aligned). */
1449 	IWM_WRITE(sc,
1450 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1451 
1452 #if defined(__DragonFly__)
1453 	/* Force serialization (probably not needed but don't trust the HW) */
1454 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1455 #endif
1456 
1457 	/* Enable Rx DMA
1458 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1459 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1460 	 *      the credit mechanism in 5000 HW RX FIFO
1461 	 * Direct rx interrupts to hosts
1462 	 * Rx buffer size 4 or 8k or 12k
1463 	 * RB timeout 0x10
1464 	 * 256 RBDs
1465 	 */
1466 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1467 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1468 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1469 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1470 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1471 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1472 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1473 
1474 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1475 
1476 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1477 	if (sc->cfg->host_interrupt_operation_mode)
1478 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1479 
1480 	/*
1481 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1482 	 *
1483 	 * This value should initially be 0 (before preparing any
1484 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1485 	 */
1486 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1487 
1488 	iwm_nic_unlock(sc);
1489 
1490 	return 0;
1491 }
1492 
1493 static int
1494 iwm_nic_tx_init(struct iwm_softc *sc)
1495 {
1496 	int qid;
1497 
1498 	if (!iwm_nic_lock(sc))
1499 		return EBUSY;
1500 
1501 	/* Deactivate TX scheduler. */
1502 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1503 
1504 	/* Set physical address of "keep warm" page (16-byte aligned). */
1505 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1506 
1507 	/* Initialize TX rings. */
1508 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1509 		struct iwm_tx_ring *txq = &sc->txq[qid];
1510 
1511 		/* Set physical address of TX ring (256-byte aligned). */
1512 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1513 		    txq->desc_dma.paddr >> 8);
1514 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1515 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1516 		    __func__,
1517 		    qid, txq->desc,
1518 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1519 	}
1520 
1521 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1522 
1523 	iwm_nic_unlock(sc);
1524 
1525 	return 0;
1526 }
1527 
1528 static int
1529 iwm_nic_init(struct iwm_softc *sc)
1530 {
1531 	int error;
1532 
1533 	iwm_apm_init(sc);
1534 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1535 		iwm_set_pwr(sc);
1536 
1537 	iwm_mvm_nic_config(sc);
1538 
1539 	if ((error = iwm_nic_rx_init(sc)) != 0)
1540 		return error;
1541 
1542 	/*
1543 	 * Ditto for TX, from iwn
1544 	 */
1545 	if ((error = iwm_nic_tx_init(sc)) != 0)
1546 		return error;
1547 
1548 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1549 	    "%s: shadow registers enabled\n", __func__);
1550 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1551 
1552 	return 0;
1553 }
1554 
1555 int
1556 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1557 {
1558 	if (!iwm_nic_lock(sc)) {
1559 		device_printf(sc->sc_dev,
1560 		    "%s: cannot enable txq %d\n",
1561 		    __func__,
1562 		    qid);
1563 		return EBUSY;
1564 	}
1565 
1566 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1567 
1568 	if (qid == IWM_MVM_CMD_QUEUE) {
1569 		/* unactivate before configuration */
1570 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1571 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1572 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1573 
1574 		iwm_nic_unlock(sc);
1575 
1576 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1577 
1578 		if (!iwm_nic_lock(sc)) {
1579 			device_printf(sc->sc_dev,
1580 			    "%s: cannot enable txq %d\n", __func__, qid);
1581 			return EBUSY;
1582 		}
1583 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1584 		iwm_nic_unlock(sc);
1585 
1586 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1587 		/* Set scheduler window size and frame limit. */
1588 		iwm_write_mem32(sc,
1589 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1590 		    sizeof(uint32_t),
1591 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1592 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1593 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1594 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1595 
1596 		if (!iwm_nic_lock(sc)) {
1597 			device_printf(sc->sc_dev,
1598 			    "%s: cannot enable txq %d\n", __func__, qid);
1599 			return EBUSY;
1600 		}
1601 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1602 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1603 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1604 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1605 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1606 	} else {
1607 		struct iwm_scd_txq_cfg_cmd cmd;
1608 		int error;
1609 
1610 		iwm_nic_unlock(sc);
1611 
1612 		memset(&cmd, 0, sizeof(cmd));
1613 		cmd.scd_queue = qid;
1614 		cmd.enable = 1;
1615 		cmd.sta_id = sta_id;
1616 		cmd.tx_fifo = fifo;
1617 		cmd.aggregate = 0;
1618 		cmd.window = IWM_FRAME_LIMIT;
1619 
1620 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1621 		    sizeof(cmd), &cmd);
1622 		if (error) {
1623 			device_printf(sc->sc_dev,
1624 			    "cannot enable txq %d\n", qid);
1625 			return error;
1626 		}
1627 
1628 		if (!iwm_nic_lock(sc))
1629 			return EBUSY;
1630 	}
1631 
1632 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1633 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1634 
1635 	iwm_nic_unlock(sc);
1636 
1637 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1638 	    __func__, qid, fifo);
1639 
1640 	return 0;
1641 }
1642 
1643 static int
1644 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1645 {
1646 	int error, chnl;
1647 
1648 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1649 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1650 
1651 	if (!iwm_nic_lock(sc))
1652 		return EBUSY;
1653 
1654 	iwm_ict_reset(sc);
1655 
1656 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1657 	if (scd_base_addr != 0 &&
1658 	    scd_base_addr != sc->scd_base_addr) {
1659 		device_printf(sc->sc_dev,
1660 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1661 		    __func__, sc->scd_base_addr, scd_base_addr);
1662 	}
1663 
1664 	iwm_nic_unlock(sc);
1665 
1666 	/* reset context data, TX status and translation data */
1667 	error = iwm_write_mem(sc,
1668 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1669 	    NULL, clear_dwords);
1670 	if (error)
1671 		return EBUSY;
1672 
1673 	if (!iwm_nic_lock(sc))
1674 		return EBUSY;
1675 
1676 	/* Set physical address of TX scheduler rings (1KB aligned). */
1677 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1678 
1679 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1680 
1681 	iwm_nic_unlock(sc);
1682 
1683 	/* enable command channel */
1684 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1685 	if (error)
1686 		return error;
1687 
1688 	if (!iwm_nic_lock(sc))
1689 		return EBUSY;
1690 
1691 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1692 
1693 	/* Enable DMA channels. */
1694 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1695 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1696 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1697 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1698 	}
1699 
1700 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1701 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1702 
1703 	iwm_nic_unlock(sc);
1704 
1705 	/* Enable L1-Active */
1706 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1707 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1708 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1709 	}
1710 
1711 	return error;
1712 }
1713 
1714 /*
1715  * NVM read access and content parsing.  We do not support
1716  * external NVM or writing NVM.
1717  * iwlwifi/mvm/nvm.c
1718  */
1719 
1720 /* Default NVM size to read */
1721 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1722 
1723 #define IWM_NVM_WRITE_OPCODE 1
1724 #define IWM_NVM_READ_OPCODE 0
1725 
1726 /* load nvm chunk response */
1727 enum {
1728 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1729 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1730 };
1731 
1732 static int
1733 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1734 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1735 {
1736 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1737 		.offset = htole16(offset),
1738 		.length = htole16(length),
1739 		.type = htole16(section),
1740 		.op_code = IWM_NVM_READ_OPCODE,
1741 	};
1742 	struct iwm_nvm_access_resp *nvm_resp;
1743 	struct iwm_rx_packet *pkt;
1744 	struct iwm_host_cmd cmd = {
1745 		.id = IWM_NVM_ACCESS_CMD,
1746 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1747 		.data = { &nvm_access_cmd, },
1748 	};
1749 	int ret, bytes_read, offset_read;
1750 	uint8_t *resp_data;
1751 
1752 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1753 
1754 	ret = iwm_send_cmd(sc, &cmd);
1755 	if (ret) {
1756 		device_printf(sc->sc_dev,
1757 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1758 		return ret;
1759 	}
1760 
1761 	pkt = cmd.resp_pkt;
1762 
1763 	/* Extract NVM response */
1764 	nvm_resp = (void *)pkt->data;
1765 	ret = le16toh(nvm_resp->status);
1766 	bytes_read = le16toh(nvm_resp->length);
1767 	offset_read = le16toh(nvm_resp->offset);
1768 	resp_data = nvm_resp->data;
1769 	if (ret) {
1770 		if ((offset != 0) &&
1771 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1772 			/*
1773 			 * meaning of NOT_VALID_ADDRESS:
1774 			 * driver try to read chunk from address that is
1775 			 * multiple of 2K and got an error since addr is empty.
1776 			 * meaning of (offset != 0): driver already
1777 			 * read valid data from another chunk so this case
1778 			 * is not an error.
1779 			 */
1780 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1781 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1782 				    offset);
1783 			*len = 0;
1784 			ret = 0;
1785 		} else {
1786 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1787 				    "NVM access command failed with status %d\n", ret);
1788 			ret = EIO;
1789 		}
1790 		goto exit;
1791 	}
1792 
1793 	if (offset_read != offset) {
1794 		device_printf(sc->sc_dev,
1795 		    "NVM ACCESS response with invalid offset %d\n",
1796 		    offset_read);
1797 		ret = EINVAL;
1798 		goto exit;
1799 	}
1800 
1801 	if (bytes_read > length) {
1802 		device_printf(sc->sc_dev,
1803 		    "NVM ACCESS response with too much data "
1804 		    "(%d bytes requested, %d bytes received)\n",
1805 		    length, bytes_read);
1806 		ret = EINVAL;
1807 		goto exit;
1808 	}
1809 
1810 	/* Write data to NVM */
1811 	memcpy(data + offset, resp_data, bytes_read);
1812 	*len = bytes_read;
1813 
1814  exit:
1815 	iwm_free_resp(sc, &cmd);
1816 	return ret;
1817 }
1818 
1819 /*
1820  * Reads an NVM section completely.
1821  * NICs prior to 7000 family don't have a real NVM, but just read
1822  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1823  * by uCode, we need to manually check in this case that we don't
1824  * overflow and try to read more than the EEPROM size.
1825  * For 7000 family NICs, we supply the maximal size we can read, and
1826  * the uCode fills the response with as much data as we can,
1827  * without overflowing, so no check is needed.
1828  */
1829 static int
1830 iwm_nvm_read_section(struct iwm_softc *sc,
1831 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1832 {
1833 	uint16_t seglen, length, offset = 0;
1834 	int ret;
1835 
1836 	/* Set nvm section read length */
1837 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1838 
1839 	seglen = length;
1840 
1841 	/* Read the NVM until exhausted (reading less than requested) */
1842 	while (seglen == length) {
1843 		/* Check no memory assumptions fail and cause an overflow */
1844 		if ((size_read + offset + length) >
1845 		    sc->cfg->eeprom_size) {
1846 			device_printf(sc->sc_dev,
1847 			    "EEPROM size is too small for NVM\n");
1848 			return ENOBUFS;
1849 		}
1850 
1851 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1852 		if (ret) {
1853 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1854 				    "Cannot read NVM from section %d offset %d, length %d\n",
1855 				    section, offset, length);
1856 			return ret;
1857 		}
1858 		offset += seglen;
1859 	}
1860 
1861 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1862 		    "NVM section %d read completed\n", section);
1863 	*len = offset;
1864 	return 0;
1865 }
1866 
1867 /* NVM offsets (in words) definitions */
1868 enum iwm_nvm_offsets {
1869 	/* NVM HW-Section offset (in words) definitions */
1870 	IWM_HW_ADDR = 0x15,
1871 
1872 /* NVM SW-Section offset (in words) definitions */
1873 	IWM_NVM_SW_SECTION = 0x1C0,
1874 	IWM_NVM_VERSION = 0,
1875 	IWM_RADIO_CFG = 1,
1876 	IWM_SKU = 2,
1877 	IWM_N_HW_ADDRS = 3,
1878 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1879 
1880 /* NVM calibration section offset (in words) definitions */
1881 	IWM_NVM_CALIB_SECTION = 0x2B8,
1882 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1883 };
1884 
1885 enum iwm_8000_nvm_offsets {
1886 	/* NVM HW-Section offset (in words) definitions */
1887 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1888 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1889 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1890 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1891 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1892 
1893 	/* NVM SW-Section offset (in words) definitions */
1894 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1895 	IWM_NVM_VERSION_8000 = 0,
1896 	IWM_RADIO_CFG_8000 = 0,
1897 	IWM_SKU_8000 = 2,
1898 	IWM_N_HW_ADDRS_8000 = 3,
1899 
1900 	/* NVM REGULATORY -Section offset (in words) definitions */
1901 	IWM_NVM_CHANNELS_8000 = 0,
1902 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1903 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1904 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1905 
1906 	/* NVM calibration section offset (in words) definitions */
1907 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1908 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1909 };
1910 
1911 /* SKU Capabilities (actual values from NVM definition) */
1912 enum nvm_sku_bits {
1913 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1914 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1915 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1916 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1917 };
1918 
1919 /* radio config bits (actual values from NVM definition) */
1920 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1921 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1922 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1923 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1924 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1925 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1926 
1927 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1928 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1929 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1930 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1931 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1932 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1933 
1934 /**
1935  * enum iwm_nvm_channel_flags - channel flags in NVM
1936  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1937  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1938  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1939  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1940  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1941  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1942  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1943  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1944  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1945  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1946  */
1947 enum iwm_nvm_channel_flags {
1948 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1949 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1950 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1951 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1952 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1953 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1954 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1955 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1956 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1957 };
1958 
1959 /*
1960  * Translate EEPROM flags to net80211.
1961  */
1962 static uint32_t
1963 iwm_eeprom_channel_flags(uint16_t ch_flags)
1964 {
1965 	uint32_t nflags;
1966 
1967 	nflags = 0;
1968 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1969 		nflags |= IEEE80211_CHAN_PASSIVE;
1970 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1971 		nflags |= IEEE80211_CHAN_NOADHOC;
1972 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1973 		nflags |= IEEE80211_CHAN_DFS;
1974 		/* Just in case. */
1975 		nflags |= IEEE80211_CHAN_NOADHOC;
1976 	}
1977 
1978 	return (nflags);
1979 }
1980 
1981 static void
1982 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1983     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1984     const uint8_t bands[])
1985 {
1986 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1987 	uint32_t nflags;
1988 	uint16_t ch_flags;
1989 	uint8_t ieee;
1990 	int error;
1991 
1992 	for (; ch_idx < ch_num; ch_idx++) {
1993 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1994 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1995 			ieee = iwm_nvm_channels[ch_idx];
1996 		else
1997 			ieee = iwm_nvm_channels_8000[ch_idx];
1998 
1999 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2000 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2001 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2002 			    ieee, ch_flags,
2003 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2004 			    "5.2" : "2.4");
2005 			continue;
2006 		}
2007 
2008 		nflags = iwm_eeprom_channel_flags(ch_flags);
2009 		error = ieee80211_add_channel(chans, maxchans, nchans,
2010 		    ieee, 0, 0, nflags, bands);
2011 		if (error != 0)
2012 			break;
2013 
2014 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2015 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2016 		    ieee, ch_flags,
2017 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2018 		    "5.2" : "2.4");
2019 	}
2020 }
2021 
2022 static void
2023 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2024     struct ieee80211_channel chans[])
2025 {
2026 	struct iwm_softc *sc = ic->ic_softc;
2027 	struct iwm_nvm_data *data = sc->nvm_data;
2028 	uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2029 	size_t ch_num;
2030 
2031 	memset(bands, 0, sizeof(bands));
2032 	/* 1-13: 11b/g channels. */
2033 	setbit(bands, IEEE80211_MODE_11B);
2034 	setbit(bands, IEEE80211_MODE_11G);
2035 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2036 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2037 
2038 	/* 14: 11b channel only. */
2039 	clrbit(bands, IEEE80211_MODE_11G);
2040 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2041 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2042 
2043 	if (data->sku_cap_band_52GHz_enable) {
2044 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2045 			ch_num = nitems(iwm_nvm_channels);
2046 		else
2047 			ch_num = nitems(iwm_nvm_channels_8000);
2048 		memset(bands, 0, sizeof(bands));
2049 		setbit(bands, IEEE80211_MODE_11A);
2050 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2051 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2052 	}
2053 }
2054 
2055 static void
2056 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2057 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2058 {
2059 	const uint8_t *hw_addr;
2060 
2061 	if (mac_override) {
2062 		static const uint8_t reserved_mac[] = {
2063 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2064 		};
2065 
2066 		hw_addr = (const uint8_t *)(mac_override +
2067 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2068 
2069 		/*
2070 		 * Store the MAC address from MAO section.
2071 		 * No byte swapping is required in MAO section
2072 		 */
2073 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2074 
2075 		/*
2076 		 * Force the use of the OTP MAC address in case of reserved MAC
2077 		 * address in the NVM, or if address is given but invalid.
2078 		 */
2079 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2080 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2081 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2082 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2083 			return;
2084 
2085 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2086 		    "%s: mac address from nvm override section invalid\n",
2087 		    __func__);
2088 	}
2089 
2090 	if (nvm_hw) {
2091 		/* read the mac address from WFMP registers */
2092 		uint32_t mac_addr0 =
2093 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2094 		uint32_t mac_addr1 =
2095 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2096 
2097 		hw_addr = (const uint8_t *)&mac_addr0;
2098 		data->hw_addr[0] = hw_addr[3];
2099 		data->hw_addr[1] = hw_addr[2];
2100 		data->hw_addr[2] = hw_addr[1];
2101 		data->hw_addr[3] = hw_addr[0];
2102 
2103 		hw_addr = (const uint8_t *)&mac_addr1;
2104 		data->hw_addr[4] = hw_addr[1];
2105 		data->hw_addr[5] = hw_addr[0];
2106 
2107 		return;
2108 	}
2109 
2110 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2111 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2112 }
2113 
2114 static int
2115 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2116 	    const uint16_t *phy_sku)
2117 {
2118 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2119 		return le16_to_cpup(nvm_sw + IWM_SKU);
2120 
2121 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2122 }
2123 
2124 static int
2125 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2126 {
2127 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2128 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2129 	else
2130 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2131 						IWM_NVM_VERSION_8000));
2132 }
2133 
2134 static int
2135 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2136 		  const uint16_t *phy_sku)
2137 {
2138         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2139                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2140 
2141         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2142 }
2143 
2144 static int
2145 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2146 {
2147 	int n_hw_addr;
2148 
2149 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2150 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2151 
2152 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2153 
2154         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2155 }
2156 
2157 static void
2158 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2159 		  uint32_t radio_cfg)
2160 {
2161 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2162 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2163 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2164 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2165 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2166 		return;
2167 	}
2168 
2169 	/* set the radio configuration for family 8000 */
2170 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2171 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2172 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2173 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2174 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2175 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2176 }
2177 
2178 static int
2179 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2180 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2181 {
2182 #ifdef notyet /* for FAMILY 9000 */
2183 	if (cfg->mac_addr_from_csr) {
2184 		iwm_set_hw_address_from_csr(sc, data);
2185         } else
2186 #endif
2187 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2188 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2189 
2190 		/* The byte order is little endian 16 bit, meaning 214365 */
2191 		data->hw_addr[0] = hw_addr[1];
2192 		data->hw_addr[1] = hw_addr[0];
2193 		data->hw_addr[2] = hw_addr[3];
2194 		data->hw_addr[3] = hw_addr[2];
2195 		data->hw_addr[4] = hw_addr[5];
2196 		data->hw_addr[5] = hw_addr[4];
2197 	} else {
2198 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2199 	}
2200 
2201 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2202 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2203 		return EINVAL;
2204 	}
2205 
2206 	return 0;
2207 }
2208 
2209 static struct iwm_nvm_data *
2210 iwm_parse_nvm_data(struct iwm_softc *sc,
2211 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2212 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2213 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2214 {
2215 	struct iwm_nvm_data *data;
2216 	uint32_t sku, radio_cfg;
2217 	uint16_t lar_config;
2218 
2219 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2220 		data = kmalloc(sizeof(*data) +
2221 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2222 		    M_DEVBUF, M_WAITOK | M_ZERO);
2223 	} else {
2224 		data = kmalloc(sizeof(*data) +
2225 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2226 		    M_DEVBUF, M_WAITOK | M_ZERO);
2227 	}
2228 	if (!data)
2229 		return NULL;
2230 
2231 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2232 
2233 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2234 	iwm_set_radio_cfg(sc, data, radio_cfg);
2235 
2236 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2237 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2238 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2239 	data->sku_cap_11n_enable = 0;
2240 
2241 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2242 
2243 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2244 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2245 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2246 				       IWM_NVM_LAR_OFFSET_8000;
2247 
2248 		lar_config = le16_to_cpup(regulatory + lar_offset);
2249 		data->lar_enabled = !!(lar_config &
2250 				       IWM_NVM_LAR_ENABLED_8000);
2251 	}
2252 
2253 	/* If no valid mac address was found - bail out */
2254 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2255 		kfree(data, M_DEVBUF);
2256 		return NULL;
2257 	}
2258 
2259 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2260 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2261 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2262 	} else {
2263 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2264 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2265 	}
2266 
2267 	return data;
2268 }
2269 
2270 static void
2271 iwm_free_nvm_data(struct iwm_nvm_data *data)
2272 {
2273 	if (data != NULL)
2274 		kfree(data, M_DEVBUF);
2275 }
2276 
2277 static struct iwm_nvm_data *
2278 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2279 {
2280 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2281 
2282 	/* Checking for required sections */
2283 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2284 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2285 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2286 			device_printf(sc->sc_dev,
2287 			    "Can't parse empty OTP/NVM sections\n");
2288 			return NULL;
2289 		}
2290 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2291 		/* SW and REGULATORY sections are mandatory */
2292 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2293 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2294 			device_printf(sc->sc_dev,
2295 			    "Can't parse empty OTP/NVM sections\n");
2296 			return NULL;
2297 		}
2298 		/* MAC_OVERRIDE or at least HW section must exist */
2299 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2300 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2301 			device_printf(sc->sc_dev,
2302 			    "Can't parse mac_address, empty sections\n");
2303 			return NULL;
2304 		}
2305 
2306 		/* PHY_SKU section is mandatory in B0 */
2307 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2308 			device_printf(sc->sc_dev,
2309 			    "Can't parse phy_sku in B0, empty sections\n");
2310 			return NULL;
2311 		}
2312 	} else {
2313 		panic("unknown device family %d\n", sc->cfg->device_family);
2314 	}
2315 
2316 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2317 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2318 	calib = (const uint16_t *)
2319 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2320 	regulatory = (const uint16_t *)
2321 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2322 	mac_override = (const uint16_t *)
2323 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2324 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2325 
2326 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2327 	    phy_sku, regulatory);
2328 }
2329 
2330 static int
2331 iwm_nvm_init(struct iwm_softc *sc)
2332 {
2333 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2334 	int i, ret, section;
2335 	uint32_t size_read = 0;
2336 	uint8_t *nvm_buffer, *temp;
2337 	uint16_t len;
2338 
2339 	memset(nvm_sections, 0, sizeof(nvm_sections));
2340 
2341 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2342 		return EINVAL;
2343 
2344 	/* load NVM values from nic */
2345 	/* Read From FW NVM */
2346 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2347 
2348 	nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2349 	    M_INTWAIT | M_ZERO);
2350 	if (!nvm_buffer)
2351 		return ENOMEM;
2352 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2353 		/* we override the constness for initial read */
2354 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2355 					   &len, size_read);
2356 		if (ret)
2357 			continue;
2358 		size_read += len;
2359 		temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2360 		if (!temp) {
2361 			ret = ENOMEM;
2362 			break;
2363 		}
2364 		memcpy(temp, nvm_buffer, len);
2365 
2366 		nvm_sections[section].data = temp;
2367 		nvm_sections[section].length = len;
2368 	}
2369 	if (!size_read)
2370 		device_printf(sc->sc_dev, "OTP is blank\n");
2371 	kfree(nvm_buffer, M_DEVBUF);
2372 
2373 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2374 	if (!sc->nvm_data)
2375 		return EINVAL;
2376 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2377 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2378 
2379 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2380 		if (nvm_sections[i].data != NULL)
2381 			kfree(nvm_sections[i].data, M_DEVBUF);
2382 	}
2383 
2384 	return 0;
2385 }
2386 
2387 static int
2388 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2389 	const struct iwm_fw_desc *section)
2390 {
2391 	struct iwm_dma_info *dma = &sc->fw_dma;
2392 	uint8_t *v_addr;
2393 	bus_addr_t p_addr;
2394 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2395 	int ret = 0;
2396 
2397 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2398 		    "%s: [%d] uCode section being loaded...\n",
2399 		    __func__, section_num);
2400 
2401 	v_addr = dma->vaddr;
2402 	p_addr = dma->paddr;
2403 
2404 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2405 		uint32_t copy_size, dst_addr;
2406 		int extended_addr = FALSE;
2407 
2408 		copy_size = MIN(chunk_sz, section->len - offset);
2409 		dst_addr = section->offset + offset;
2410 
2411 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2412 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2413 			extended_addr = TRUE;
2414 
2415 		if (extended_addr)
2416 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2417 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2418 
2419 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2420 		    copy_size);
2421 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2422 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2423 						   copy_size);
2424 
2425 		if (extended_addr)
2426 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2427 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2428 
2429 		if (ret) {
2430 			device_printf(sc->sc_dev,
2431 			    "%s: Could not load the [%d] uCode section\n",
2432 			    __func__, section_num);
2433 			break;
2434 		}
2435 	}
2436 
2437 	return ret;
2438 }
2439 
2440 /*
2441  * ucode
2442  */
2443 static int
2444 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2445 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2446 {
2447 	int ret;
2448 
2449 	sc->sc_fw_chunk_done = 0;
2450 
2451 	if (!iwm_nic_lock(sc))
2452 		return EBUSY;
2453 
2454 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2455 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2456 
2457 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2458 	    dst_addr);
2459 
2460 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2461 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2462 
2463 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2464 	    (iwm_get_dma_hi_addr(phy_addr)
2465 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2466 
2467 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2468 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2469 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2470 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2471 
2472 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2473 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2474 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2475 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2476 
2477 	iwm_nic_unlock(sc);
2478 
2479 	/* wait up to 5s for this segment to load */
2480 	ret = 0;
2481 	while (!sc->sc_fw_chunk_done) {
2482 #if defined(__DragonFly__)
2483 		ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2484 #else
2485 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2486 #endif
2487 		if (ret)
2488 			break;
2489 	}
2490 
2491 	if (ret != 0) {
2492 		device_printf(sc->sc_dev,
2493 		    "fw chunk addr 0x%x len %d failed to load\n",
2494 		    dst_addr, byte_cnt);
2495 		return ETIMEDOUT;
2496 	}
2497 
2498 	return 0;
2499 }
2500 
2501 static int
2502 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2503 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2504 {
2505 	int shift_param;
2506 	int i, ret = 0, sec_num = 0x1;
2507 	uint32_t val, last_read_idx = 0;
2508 
2509 	if (cpu == 1) {
2510 		shift_param = 0;
2511 		*first_ucode_section = 0;
2512 	} else {
2513 		shift_param = 16;
2514 		(*first_ucode_section)++;
2515 	}
2516 
2517 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2518 		last_read_idx = i;
2519 
2520 		/*
2521 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2522 		 * CPU1 to CPU2.
2523 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2524 		 * CPU2 non paged to CPU2 paging sec.
2525 		 */
2526 		if (!image->sec[i].data ||
2527 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2528 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2529 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2530 				    "Break since Data not valid or Empty section, sec = %d\n",
2531 				    i);
2532 			break;
2533 		}
2534 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2535 		if (ret)
2536 			return ret;
2537 
2538 		/* Notify the ucode of the loaded section number and status */
2539 		if (iwm_nic_lock(sc)) {
2540 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2541 			val = val | (sec_num << shift_param);
2542 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2543 			sec_num = (sec_num << 1) | 0x1;
2544 			iwm_nic_unlock(sc);
2545 		}
2546 	}
2547 
2548 	*first_ucode_section = last_read_idx;
2549 
2550 	iwm_enable_interrupts(sc);
2551 
2552 	if (iwm_nic_lock(sc)) {
2553 		if (cpu == 1)
2554 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2555 		else
2556 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2557 		iwm_nic_unlock(sc);
2558 	}
2559 
2560 	return 0;
2561 }
2562 
2563 static int
2564 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2565 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2566 {
2567 	int shift_param;
2568 	int i, ret = 0;
2569 	uint32_t last_read_idx = 0;
2570 
2571 	if (cpu == 1) {
2572 		shift_param = 0;
2573 		*first_ucode_section = 0;
2574 	} else {
2575 		shift_param = 16;
2576 		(*first_ucode_section)++;
2577 	}
2578 
2579 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2580 		last_read_idx = i;
2581 
2582 		/*
2583 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2584 		 * CPU1 to CPU2.
2585 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2586 		 * CPU2 non paged to CPU2 paging sec.
2587 		 */
2588 		if (!image->sec[i].data ||
2589 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2590 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2591 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2592 				    "Break since Data not valid or Empty section, sec = %d\n",
2593 				     i);
2594 			break;
2595 		}
2596 
2597 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2598 		if (ret)
2599 			return ret;
2600 	}
2601 
2602 	*first_ucode_section = last_read_idx;
2603 
2604 	return 0;
2605 
2606 }
2607 
2608 static int
2609 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2610 {
2611 	int ret = 0;
2612 	int first_ucode_section;
2613 
2614 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2615 		     image->is_dual_cpus ? "Dual" : "Single");
2616 
2617 	/* load to FW the binary non secured sections of CPU1 */
2618 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2619 	if (ret)
2620 		return ret;
2621 
2622 	if (image->is_dual_cpus) {
2623 		/* set CPU2 header address */
2624 		if (iwm_nic_lock(sc)) {
2625 			iwm_write_prph(sc,
2626 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2627 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2628 			iwm_nic_unlock(sc);
2629 		}
2630 
2631 		/* load to FW the binary sections of CPU2 */
2632 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2633 						 &first_ucode_section);
2634 		if (ret)
2635 			return ret;
2636 	}
2637 
2638 	iwm_enable_interrupts(sc);
2639 
2640 	/* release CPU reset */
2641 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2642 
2643 	return 0;
2644 }
2645 
2646 int
2647 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2648 	const struct iwm_fw_img *image)
2649 {
2650 	int ret = 0;
2651 	int first_ucode_section;
2652 
2653 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2654 		    image->is_dual_cpus ? "Dual" : "Single");
2655 
2656 	/* configure the ucode to be ready to get the secured image */
2657 	/* release CPU reset */
2658 	if (iwm_nic_lock(sc)) {
2659 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2660 		    IWM_RELEASE_CPU_RESET_BIT);
2661 		iwm_nic_unlock(sc);
2662 	}
2663 
2664 	/* load to FW the binary Secured sections of CPU1 */
2665 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2666 	    &first_ucode_section);
2667 	if (ret)
2668 		return ret;
2669 
2670 	/* load to FW the binary sections of CPU2 */
2671 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2672 	    &first_ucode_section);
2673 }
2674 
2675 /* XXX Get rid of this definition */
2676 static inline void
2677 iwm_enable_fw_load_int(struct iwm_softc *sc)
2678 {
2679 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2680 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2681 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2682 }
2683 
2684 /* XXX Add proper rfkill support code */
2685 static int
2686 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2687 {
2688 	int ret;
2689 
2690 	/* This may fail if AMT took ownership of the device */
2691 	if (iwm_prepare_card_hw(sc)) {
2692 		device_printf(sc->sc_dev,
2693 		    "%s: Exit HW not ready\n", __func__);
2694 		ret = EIO;
2695 		goto out;
2696 	}
2697 
2698 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2699 
2700 	iwm_disable_interrupts(sc);
2701 
2702 	/* make sure rfkill handshake bits are cleared */
2703 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2704 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2705 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2706 
2707 	/* clear (again), then enable host interrupts */
2708 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2709 
2710 	ret = iwm_nic_init(sc);
2711 	if (ret) {
2712 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2713 		goto out;
2714 	}
2715 
2716 	/*
2717 	 * Now, we load the firmware and don't want to be interrupted, even
2718 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2719 	 * FH_TX interrupt which is needed to load the firmware). If the
2720 	 * RF-Kill switch is toggled, we will find out after having loaded
2721 	 * the firmware and return the proper value to the caller.
2722 	 */
2723 	iwm_enable_fw_load_int(sc);
2724 
2725 	/* really make sure rfkill handshake bits are cleared */
2726 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2727 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2728 
2729 	/* Load the given image to the HW */
2730 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2731 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2732 	else
2733 		ret = iwm_pcie_load_given_ucode(sc, fw);
2734 
2735 	/* XXX re-check RF-Kill state */
2736 
2737 out:
2738 	return ret;
2739 }
2740 
2741 static int
2742 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2743 {
2744 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2745 		.valid = htole32(valid_tx_ant),
2746 	};
2747 
2748 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2749 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2750 }
2751 
2752 static int
2753 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2754 {
2755 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2756 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2757 
2758 	/* Set parameters */
2759 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2760 	phy_cfg_cmd.calib_control.event_trigger =
2761 	    sc->sc_default_calib[ucode_type].event_trigger;
2762 	phy_cfg_cmd.calib_control.flow_trigger =
2763 	    sc->sc_default_calib[ucode_type].flow_trigger;
2764 
2765 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2766 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2767 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2768 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2769 }
2770 
2771 static int
2772 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2773 {
2774 	struct iwm_mvm_alive_data *alive_data = data;
2775 	struct iwm_mvm_alive_resp_v3 *palive3;
2776 	struct iwm_mvm_alive_resp *palive;
2777 	struct iwm_umac_alive *umac;
2778 	struct iwm_lmac_alive *lmac1;
2779 	struct iwm_lmac_alive *lmac2 = NULL;
2780 	uint16_t status;
2781 
2782 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2783 		palive = (void *)pkt->data;
2784 		umac = &palive->umac_data;
2785 		lmac1 = &palive->lmac_data[0];
2786 		lmac2 = &palive->lmac_data[1];
2787 		status = le16toh(palive->status);
2788 	} else {
2789 		palive3 = (void *)pkt->data;
2790 		umac = &palive3->umac_data;
2791 		lmac1 = &palive3->lmac_data;
2792 		status = le16toh(palive3->status);
2793 	}
2794 
2795 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2796 	if (lmac2)
2797 		sc->error_event_table[1] =
2798 			le32toh(lmac2->error_event_table_ptr);
2799 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2800 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2801 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2802 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2803 	if (sc->umac_error_event_table)
2804 		sc->support_umac_log = TRUE;
2805 
2806 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2807 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2808 		    status, lmac1->ver_type, lmac1->ver_subtype);
2809 
2810 	if (lmac2)
2811 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2812 
2813 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2814 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2815 		    le32toh(umac->umac_major),
2816 		    le32toh(umac->umac_minor));
2817 
2818 	return TRUE;
2819 }
2820 
2821 static int
2822 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2823 	struct iwm_rx_packet *pkt, void *data)
2824 {
2825 	struct iwm_phy_db *phy_db = data;
2826 
2827 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2828 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2829 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2830 			    __func__, pkt->hdr.code);
2831 		}
2832 		return TRUE;
2833 	}
2834 
2835 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2836 		device_printf(sc->sc_dev,
2837 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2838 	}
2839 
2840 	return FALSE;
2841 }
2842 
2843 static int
2844 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2845 	enum iwm_ucode_type ucode_type)
2846 {
2847 	struct iwm_notification_wait alive_wait;
2848 	struct iwm_mvm_alive_data alive_data;
2849 	const struct iwm_fw_img *fw;
2850 	enum iwm_ucode_type old_type = sc->cur_ucode;
2851 	int error;
2852 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2853 
2854 	fw = &sc->sc_fw.img[ucode_type];
2855 	sc->cur_ucode = ucode_type;
2856 	sc->ucode_loaded = FALSE;
2857 
2858 	memset(&alive_data, 0, sizeof(alive_data));
2859 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2860 				   alive_cmd, NELEM(alive_cmd),
2861 				   iwm_alive_fn, &alive_data);
2862 
2863 	error = iwm_start_fw(sc, fw);
2864 	if (error) {
2865 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2866 		sc->cur_ucode = old_type;
2867 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2868 		return error;
2869 	}
2870 
2871 	/*
2872 	 * Some things may run in the background now, but we
2873 	 * just wait for the ALIVE notification here.
2874 	 */
2875 	IWM_UNLOCK(sc);
2876 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2877 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2878 	IWM_LOCK(sc);
2879 	if (error) {
2880 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2881 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2882 			if (iwm_nic_lock(sc)) {
2883 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2884 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2885 				iwm_nic_unlock(sc);
2886 			}
2887 			device_printf(sc->sc_dev,
2888 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2889 			    a, b);
2890 		}
2891 		sc->cur_ucode = old_type;
2892 		return error;
2893 	}
2894 
2895 	if (!alive_data.valid) {
2896 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2897 		    __func__);
2898 		sc->cur_ucode = old_type;
2899 		return EIO;
2900 	}
2901 
2902 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2903 
2904 	/*
2905 	 * configure and operate fw paging mechanism.
2906 	 * driver configures the paging flow only once, CPU2 paging image
2907 	 * included in the IWM_UCODE_INIT image.
2908 	 */
2909 	if (fw->paging_mem_size) {
2910 		error = iwm_save_fw_paging(sc, fw);
2911 		if (error) {
2912 			device_printf(sc->sc_dev,
2913 			    "%s: failed to save the FW paging image\n",
2914 			    __func__);
2915 			return error;
2916 		}
2917 
2918 		error = iwm_send_paging_cmd(sc, fw);
2919 		if (error) {
2920 			device_printf(sc->sc_dev,
2921 			    "%s: failed to send the paging cmd\n", __func__);
2922 			iwm_free_fw_paging(sc);
2923 			return error;
2924 		}
2925 	}
2926 
2927 	if (!error)
2928 		sc->ucode_loaded = TRUE;
2929 	return error;
2930 }
2931 
2932 /*
2933  * mvm misc bits
2934  */
2935 
2936 static int
2937 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2938 {
2939 	struct iwm_notification_wait calib_wait;
2940 	static const uint16_t init_complete[] = {
2941 		IWM_INIT_COMPLETE_NOTIF,
2942 		IWM_CALIB_RES_NOTIF_PHY_DB
2943 	};
2944 	int ret;
2945 
2946 	/* do not operate with rfkill switch turned on */
2947 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2948 		device_printf(sc->sc_dev,
2949 		    "radio is disabled by hardware switch\n");
2950 		return EPERM;
2951 	}
2952 
2953 	iwm_init_notification_wait(sc->sc_notif_wait,
2954 				   &calib_wait,
2955 				   init_complete,
2956 				   NELEM(init_complete),
2957 				   iwm_wait_phy_db_entry,
2958 				   sc->sc_phy_db);
2959 
2960 	/* Will also start the device */
2961 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2962 	if (ret) {
2963 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2964 		    ret);
2965 		goto error;
2966 	}
2967 
2968 	if (justnvm) {
2969 		/* Read nvm */
2970 		ret = iwm_nvm_init(sc);
2971 		if (ret) {
2972 			device_printf(sc->sc_dev, "failed to read nvm\n");
2973 			goto error;
2974 		}
2975 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2976 		goto error;
2977 	}
2978 
2979 	ret = iwm_send_bt_init_conf(sc);
2980 	if (ret) {
2981 		device_printf(sc->sc_dev,
2982 		    "failed to send bt coex configuration: %d\n", ret);
2983 		goto error;
2984 	}
2985 
2986 	/* Send TX valid antennas before triggering calibrations */
2987 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2988 	if (ret) {
2989 		device_printf(sc->sc_dev,
2990 		    "failed to send antennas before calibration: %d\n", ret);
2991 		goto error;
2992 	}
2993 
2994 	/*
2995 	 * Send phy configurations command to init uCode
2996 	 * to start the 16.0 uCode init image internal calibrations.
2997 	 */
2998 	ret = iwm_send_phy_cfg_cmd(sc);
2999 	if (ret) {
3000 		device_printf(sc->sc_dev,
3001 		    "%s: Failed to run INIT calibrations: %d\n",
3002 		    __func__, ret);
3003 		goto error;
3004 	}
3005 
3006 	/*
3007 	 * Nothing to do but wait for the init complete notification
3008 	 * from the firmware.
3009 	 */
3010 	IWM_UNLOCK(sc);
3011 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3012 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3013 	IWM_LOCK(sc);
3014 
3015 
3016 	goto out;
3017 
3018 error:
3019 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3020 out:
3021 	return ret;
3022 }
3023 
3024 static int
3025 iwm_mvm_config_ltr(struct iwm_softc *sc)
3026 {
3027 	struct iwm_ltr_config_cmd cmd = {
3028 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3029 	};
3030 
3031 	if (!sc->sc_ltr_enabled)
3032 		return 0;
3033 
3034 	return iwm_mvm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3035 }
3036 
3037 /*
3038  * receive side
3039  */
3040 
3041 /* (re)stock rx ring, called at init-time and at runtime */
3042 static int
3043 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3044 {
3045 	struct iwm_rx_ring *ring = &sc->rxq;
3046 	struct iwm_rx_data *data = &ring->data[idx];
3047 	struct mbuf *m;
3048 	bus_dmamap_t dmamap;
3049 	bus_dma_segment_t seg;
3050 	int nsegs, error;
3051 
3052 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3053 	if (m == NULL)
3054 		return ENOBUFS;
3055 
3056 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3057 #if defined(__DragonFly__)
3058 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3059 	    m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3060 #else
3061 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3062 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3063 #endif
3064 	if (error != 0) {
3065 		device_printf(sc->sc_dev,
3066 		    "%s: can't map mbuf, error %d\n", __func__, error);
3067 		m_freem(m);
3068 		return error;
3069 	}
3070 
3071 	if (data->m != NULL)
3072 		bus_dmamap_unload(ring->data_dmat, data->map);
3073 
3074 	/* Swap ring->spare_map with data->map */
3075 	dmamap = data->map;
3076 	data->map = ring->spare_map;
3077 	ring->spare_map = dmamap;
3078 
3079 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3080 	data->m = m;
3081 
3082 	/* Update RX descriptor. */
3083 	KKASSERT((seg.ds_addr & 255) == 0);
3084 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3085 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3086 	    BUS_DMASYNC_PREWRITE);
3087 
3088 	return 0;
3089 }
3090 
3091 /*
3092  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3093  * values are reported by the fw as positive values - need to negate
3094  * to obtain their dBM.  Account for missing antennas by replacing 0
3095  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3096  */
3097 static int
3098 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3099 {
3100 	int energy_a, energy_b, energy_c, max_energy;
3101 	uint32_t val;
3102 
3103 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3104 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3105 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3106 	energy_a = energy_a ? -energy_a : -256;
3107 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3108 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3109 	energy_b = energy_b ? -energy_b : -256;
3110 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3111 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3112 	energy_c = energy_c ? -energy_c : -256;
3113 	max_energy = MAX(energy_a, energy_b);
3114 	max_energy = MAX(max_energy, energy_c);
3115 
3116 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3117 	    "energy In A %d B %d C %d , and max %d\n",
3118 	    energy_a, energy_b, energy_c, max_energy);
3119 
3120 	return max_energy;
3121 }
3122 
3123 static void
3124 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3125 {
3126 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3127 
3128 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3129 
3130 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3131 }
3132 
3133 /*
3134  * Retrieve the average noise (in dBm) among receivers.
3135  */
3136 static int
3137 iwm_get_noise(struct iwm_softc *sc,
3138 	const struct iwm_mvm_statistics_rx_non_phy *stats)
3139 {
3140 	int i, total, nbant, noise;
3141 
3142 	total = nbant = noise = 0;
3143 	for (i = 0; i < 3; i++) {
3144 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3145 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3146 		    __func__, i, noise);
3147 
3148 		if (noise) {
3149 			total += noise;
3150 			nbant++;
3151 		}
3152 	}
3153 
3154 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3155 	    __func__, nbant, total);
3156 #if 0
3157 	/* There should be at least one antenna but check anyway. */
3158 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3159 #else
3160 	/* For now, just hard-code it to -96 to be safe */
3161 	return (-96);
3162 #endif
3163 }
3164 
3165 static void
3166 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3167 {
3168 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3169 
3170 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3171 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3172 }
3173 
3174 /*
3175  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3176  *
3177  * Handles the actual data of the Rx packet from the fw
3178  */
3179 static boolean_t
3180 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3181 	boolean_t stolen)
3182 {
3183 	struct ieee80211com *ic = &sc->sc_ic;
3184 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3185 	struct ieee80211_frame *wh;
3186 	struct ieee80211_node *ni;
3187 	struct ieee80211_rx_stats rxs;
3188 	struct iwm_rx_phy_info *phy_info;
3189 	struct iwm_rx_mpdu_res_start *rx_res;
3190 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3191 	uint32_t len;
3192 	uint32_t rx_pkt_status;
3193 	int rssi;
3194 
3195 	phy_info = &sc->sc_last_phy_info;
3196 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3197 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3198 	len = le16toh(rx_res->byte_count);
3199 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3200 
3201 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3202 		device_printf(sc->sc_dev,
3203 		    "dsp size out of range [0,20]: %d\n",
3204 		    phy_info->cfg_phy_cnt);
3205 		return FALSE;
3206 	}
3207 
3208 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3209 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3210 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3211 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3212 		return FALSE; /* drop */
3213 	}
3214 
3215 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3216 	/* Note: RSSI is absolute (ie a -ve value) */
3217 	if (rssi < IWM_MIN_DBM)
3218 		rssi = IWM_MIN_DBM;
3219 	else if (rssi > IWM_MAX_DBM)
3220 		rssi = IWM_MAX_DBM;
3221 
3222 	/* Map it to relative value */
3223 	rssi = rssi - sc->sc_noise;
3224 
3225 	/* replenish ring for the buffer we're going to feed to the sharks */
3226 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3227 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3228 		    __func__);
3229 		return FALSE;
3230 	}
3231 
3232 	m->m_data = pkt->data + sizeof(*rx_res);
3233 	m->m_pkthdr.len = m->m_len = len;
3234 
3235 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3236 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3237 
3238 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3239 
3240 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3241 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3242 	    __func__,
3243 	    le16toh(phy_info->channel),
3244 	    le16toh(phy_info->phy_flags));
3245 
3246 	/*
3247 	 * Populate an RX state struct with the provided information.
3248 	 */
3249 	bzero(&rxs, sizeof(rxs));
3250 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3251 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3252 	rxs.c_ieee = le16toh(phy_info->channel);
3253 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3254 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3255 	} else {
3256 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3257 	}
3258 	/* rssi is in 1/2db units */
3259 	rxs.rssi = rssi * 2;
3260 	rxs.nf = sc->sc_noise;
3261 
3262 	if (ieee80211_radiotap_active_vap(vap)) {
3263 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3264 
3265 		tap->wr_flags = 0;
3266 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3267 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3268 		tap->wr_chan_freq = htole16(rxs.c_freq);
3269 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3270 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3271 		tap->wr_dbm_antsignal = (int8_t)rssi;
3272 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3273 		tap->wr_tsft = phy_info->system_timestamp;
3274 		switch (phy_info->rate) {
3275 		/* CCK rates. */
3276 		case  10: tap->wr_rate =   2; break;
3277 		case  20: tap->wr_rate =   4; break;
3278 		case  55: tap->wr_rate =  11; break;
3279 		case 110: tap->wr_rate =  22; break;
3280 		/* OFDM rates. */
3281 		case 0xd: tap->wr_rate =  12; break;
3282 		case 0xf: tap->wr_rate =  18; break;
3283 		case 0x5: tap->wr_rate =  24; break;
3284 		case 0x7: tap->wr_rate =  36; break;
3285 		case 0x9: tap->wr_rate =  48; break;
3286 		case 0xb: tap->wr_rate =  72; break;
3287 		case 0x1: tap->wr_rate =  96; break;
3288 		case 0x3: tap->wr_rate = 108; break;
3289 		/* Unknown rate: should not happen. */
3290 		default:  tap->wr_rate =   0;
3291 		}
3292 	}
3293 
3294 	IWM_UNLOCK(sc);
3295 	if (ni != NULL) {
3296 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3297 		ieee80211_input_mimo(ni, m, &rxs);
3298 		ieee80211_free_node(ni);
3299 	} else {
3300 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3301 		ieee80211_input_mimo_all(ic, m, &rxs);
3302 	}
3303 	IWM_LOCK(sc);
3304 
3305 	return TRUE;
3306 }
3307 
3308 static int
3309 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3310 	struct iwm_node *in)
3311 {
3312 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3313 	struct ieee80211_node *ni = &in->in_ni;
3314 	struct ieee80211vap *vap = ni->ni_vap;
3315 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3316 	int failack = tx_resp->failure_frame;
3317 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3318 	boolean_t rate_matched;
3319 	uint8_t tx_resp_rate;
3320 	int ret;
3321 
3322 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3323 
3324 	/* Update rate control statistics. */
3325 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3326 	    __func__,
3327 	    (int) le16toh(tx_resp->status.status),
3328 	    (int) le16toh(tx_resp->status.sequence),
3329 	    tx_resp->frame_count,
3330 	    tx_resp->bt_kill_count,
3331 	    tx_resp->failure_rts,
3332 	    tx_resp->failure_frame,
3333 	    le32toh(tx_resp->initial_rate),
3334 	    (int) le16toh(tx_resp->wireless_media_time));
3335 
3336 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3337 
3338 	/* For rate control, ignore frames sent at different initial rate */
3339 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3340 
3341 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3342 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3343 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3344 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3345 	}
3346 
3347 	if (status != IWM_TX_STATUS_SUCCESS &&
3348 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3349 		if (rate_matched) {
3350 			ieee80211_ratectl_tx_complete(vap, ni,
3351 			    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3352 		}
3353 		ret = 1;
3354 	} else {
3355 		if (rate_matched) {
3356 			ieee80211_ratectl_tx_complete(vap, ni,
3357 			    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3358 		}
3359 		ret = 0;
3360 	}
3361 
3362 	if (rate_matched) {
3363 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3364 		new_rate = vap->iv_bss->ni_txrate;
3365 		if (new_rate != 0 && new_rate != cur_rate) {
3366 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3367 			iwm_setrates(sc, in, rix);
3368 			iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3369 		}
3370 	}
3371 
3372 	return ret;
3373 }
3374 
3375 static void
3376 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3377 {
3378 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3379 	int idx = cmd_hdr->idx;
3380 	int qid = cmd_hdr->qid;
3381 	struct iwm_tx_ring *ring = &sc->txq[qid];
3382 	struct iwm_tx_data *txd = &ring->data[idx];
3383 	struct iwm_node *in = txd->in;
3384 	struct mbuf *m = txd->m;
3385 	int status;
3386 
3387 	KASSERT(txd->done == 0, ("txd not done"));
3388 	KASSERT(txd->in != NULL, ("txd without node"));
3389 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3390 
3391 	sc->sc_tx_timer = 0;
3392 
3393 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3394 
3395 	/* Unmap and free mbuf. */
3396 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3397 	bus_dmamap_unload(ring->data_dmat, txd->map);
3398 
3399 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3400 	    "free txd %p, in %p\n", txd, txd->in);
3401 	txd->done = 1;
3402 	txd->m = NULL;
3403 	txd->in = NULL;
3404 
3405 	ieee80211_tx_complete(&in->in_ni, m, status);
3406 
3407 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3408 		sc->qfullmsk &= ~(1 << ring->qid);
3409 		if (sc->qfullmsk == 0) {
3410 			iwm_start(sc);
3411 		}
3412 	}
3413 }
3414 
3415 /*
3416  * transmit side
3417  */
3418 
3419 /*
3420  * Process a "command done" firmware notification.  This is where we wakeup
3421  * processes waiting for a synchronous command completion.
3422  * from if_iwn
3423  */
3424 static void
3425 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3426 {
3427 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3428 	struct iwm_tx_data *data;
3429 
3430 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3431 		return;	/* Not a command ack. */
3432 	}
3433 
3434 	data = &ring->data[pkt->hdr.idx];
3435 
3436 	/* If the command was mapped in an mbuf, free it. */
3437 	if (data->m != NULL) {
3438 		bus_dmamap_sync(ring->data_dmat, data->map,
3439 		    BUS_DMASYNC_POSTWRITE);
3440 		bus_dmamap_unload(ring->data_dmat, data->map);
3441 		m_freem(data->m);
3442 		data->m = NULL;
3443 	}
3444 	wakeup(&ring->desc[pkt->hdr.idx]);
3445 
3446 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3447 		device_printf(sc->sc_dev,
3448 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3449 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3450 		/* XXX call iwm_force_nmi() */
3451 	}
3452 
3453 	KKASSERT(ring->queued > 0);
3454 	ring->queued--;
3455 	if (ring->queued == 0)
3456 		iwm_pcie_clear_cmd_in_flight(sc);
3457 }
3458 
3459 #if 0
3460 /*
3461  * necessary only for block ack mode
3462  */
3463 void
3464 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3465 	uint16_t len)
3466 {
3467 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3468 	uint16_t w_val;
3469 
3470 	scd_bc_tbl = sc->sched_dma.vaddr;
3471 
3472 	len += 8; /* magic numbers came naturally from paris */
3473 	len = roundup(len, 4) / 4;
3474 
3475 	w_val = htole16(sta_id << 12 | len);
3476 
3477 	/* Update TX scheduler. */
3478 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3479 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3480 	    BUS_DMASYNC_PREWRITE);
3481 
3482 	/* I really wonder what this is ?!? */
3483 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3484 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3485 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3486 		    BUS_DMASYNC_PREWRITE);
3487 	}
3488 }
3489 #endif
3490 
3491 /*
3492  * Fill in the rate related information for a transmit command.
3493  */
3494 static uint8_t
3495 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3496 	struct mbuf *m, struct iwm_tx_cmd *tx)
3497 {
3498 	struct ieee80211com *ic = &sc->sc_ic;
3499 	struct ieee80211_node *ni = &in->in_ni;
3500 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3501 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3502 	const struct iwm_rate *rinfo;
3503 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3504 	int ridx, rate_flags;
3505 
3506 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3507 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3508 
3509 	if (type == IEEE80211_FC0_TYPE_MGT) {
3510 		ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3511 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3512 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3513 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3514                 ridx = iwm_rate2ridx(sc, tp->mcastrate);
3515 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3516 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3517         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3518                 ridx = iwm_rate2ridx(sc, tp->ucastrate);
3519 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3520 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3521         } else if (m->m_flags & M_EAPOL) {
3522                 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3523 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3524 		    "%s: EAPOL (%d)\n", __func__, tp->mgmtrate);
3525 	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3526 		/* This is the index into the programmed table */
3527 		tx->initial_rate_index = 0;
3528 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3529 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA (%d)\n",
3530 		    __func__, ni->ni_txrate);
3531 		return ni->ni_txrate;
3532 	} else {
3533 		ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3534 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3535 		    "%s: DEFAULT (%d)\n", __func__, tp->mgmtrate);
3536 	}
3537 
3538 	/*
3539 	 * Sanity check ridx, and provide fallback. If the rate lookup
3540 	 * ever fails, iwm_rate2ridx() will already print an error message.
3541 	 */
3542 	if (ridx < 0 || ridx > IWM_RIDX_MAX) {
3543 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
3544 			/*
3545 			 * XXX this assumes the mode is either 11a or not 11a;
3546 			 * definitely won't work for 11n.
3547 			 */
3548 			ridx = IWM_RIDX_OFDM;
3549 		} else {
3550 			ridx = IWM_RIDX_CCK;
3551 		}
3552 	}
3553 
3554 	rinfo = &iwm_rates[ridx];
3555 
3556 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3557 	    "%s: frame type=%d, ridx=%d, rate=%d, CCK=%d\n",
3558 	    __func__, type, ridx, rinfo->rate, !! (IWM_RIDX_IS_CCK(ridx)));
3559 
3560 	/* XXX TODO: hard-coded TX antenna? */
3561 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3562 	if (IWM_RIDX_IS_CCK(ridx))
3563 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3564 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3565 
3566 	return rinfo->rate;
3567 }
3568 
3569 #define TB0_SIZE 16
3570 static int
3571 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3572 {
3573 	struct ieee80211com *ic = &sc->sc_ic;
3574 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3575 	struct iwm_node *in = IWM_NODE(ni);
3576 	struct iwm_tx_ring *ring;
3577 	struct iwm_tx_data *data;
3578 	struct iwm_tfd *desc;
3579 	struct iwm_device_cmd *cmd;
3580 	struct iwm_tx_cmd *tx;
3581 	struct ieee80211_frame *wh;
3582 	struct ieee80211_key *k = NULL;
3583 #if !defined(__DragonFly__)
3584 	struct mbuf *m1;
3585 #endif
3586 	uint32_t flags;
3587 	u_int hdrlen;
3588 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3589 	int nsegs;
3590 	uint8_t rate, tid, type;
3591 	int i, totlen, error, pad;
3592 
3593 	wh = mtod(m, struct ieee80211_frame *);
3594 	hdrlen = ieee80211_anyhdrsize(wh);
3595 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3596 	tid = 0;
3597 	ring = &sc->txq[ac];
3598 	desc = &ring->desc[ring->cur];
3599 	memset(desc, 0, sizeof(*desc));
3600 	data = &ring->data[ring->cur];
3601 
3602 	/* Fill out iwm_tx_cmd to send to the firmware */
3603 	cmd = &ring->cmd[ring->cur];
3604 	cmd->hdr.code = IWM_TX_CMD;
3605 	cmd->hdr.flags = 0;
3606 	cmd->hdr.qid = ring->qid;
3607 	cmd->hdr.idx = ring->cur;
3608 
3609 	tx = (void *)cmd->data;
3610 	memset(tx, 0, sizeof(*tx));
3611 
3612 	rate = iwm_tx_fill_cmd(sc, in, m, tx);
3613 
3614 	/* Encrypt the frame if need be. */
3615 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3616 		/* Retrieve key for TX && do software encryption. */
3617 		k = ieee80211_crypto_encap(ni, m);
3618 		if (k == NULL) {
3619 			m_freem(m);
3620 			return (ENOBUFS);
3621 		}
3622 		/* 802.11 header may have moved. */
3623 		wh = mtod(m, struct ieee80211_frame *);
3624 	}
3625 
3626 	if (ieee80211_radiotap_active_vap(vap)) {
3627 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3628 
3629 		tap->wt_flags = 0;
3630 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3631 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3632 		tap->wt_rate = rate;
3633 		if (k != NULL)
3634 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3635 		ieee80211_radiotap_tx(vap, m);
3636 	}
3637 
3638 
3639 	totlen = m->m_pkthdr.len;
3640 
3641 	flags = 0;
3642 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3643 		flags |= IWM_TX_CMD_FLG_ACK;
3644 	}
3645 
3646 	if (type == IEEE80211_FC0_TYPE_DATA
3647 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3648 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3649 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3650 	}
3651 
3652 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3653 	    type != IEEE80211_FC0_TYPE_DATA)
3654 		tx->sta_id = sc->sc_aux_sta.sta_id;
3655 	else
3656 		tx->sta_id = IWM_STATION_ID;
3657 
3658 	if (type == IEEE80211_FC0_TYPE_MGT) {
3659 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3660 
3661 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3662 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3663 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3664 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3665 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3666 		} else {
3667 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3668 		}
3669 	} else {
3670 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3671 	}
3672 
3673 	if (hdrlen & 3) {
3674 		/* First segment length must be a multiple of 4. */
3675 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3676 		pad = 4 - (hdrlen & 3);
3677 	} else
3678 		pad = 0;
3679 
3680 	tx->driver_txop = 0;
3681 	tx->next_frame_len = 0;
3682 
3683 	tx->len = htole16(totlen);
3684 	tx->tid_tspec = tid;
3685 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3686 
3687 	/* Set physical address of "scratch area". */
3688 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3689 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3690 
3691 	/* Copy 802.11 header in TX command. */
3692 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3693 
3694 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3695 
3696 	tx->sec_ctl = 0;
3697 	tx->tx_flags |= htole32(flags);
3698 
3699 	/* Trim 802.11 header. */
3700 	m_adj(m, hdrlen);
3701 #if defined(__DragonFly__)
3702 	error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3703 					    segs, IWM_MAX_SCATTER - 2,
3704 					    &nsegs, BUS_DMA_NOWAIT);
3705 #else
3706 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3707 	    segs, &nsegs, BUS_DMA_NOWAIT);
3708 #endif
3709 	if (error != 0) {
3710 #if defined(__DragonFly__)
3711 		device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3712 		    error);
3713 		m_freem(m);
3714 		return error;
3715 #else
3716 		if (error != EFBIG) {
3717 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3718 			    error);
3719 			m_freem(m);
3720 			return error;
3721 		}
3722 		/* Too many DMA segments, linearize mbuf. */
3723 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3724 		if (m1 == NULL) {
3725 			device_printf(sc->sc_dev,
3726 			    "%s: could not defrag mbuf\n", __func__);
3727 			m_freem(m);
3728 			return (ENOBUFS);
3729 		}
3730 		m = m1;
3731 
3732 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3733 		    segs, &nsegs, BUS_DMA_NOWAIT);
3734 		if (error != 0) {
3735 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3736 			    error);
3737 			m_freem(m);
3738 			return error;
3739 		}
3740 #endif
3741 	}
3742 	data->m = m;
3743 	data->in = in;
3744 	data->done = 0;
3745 
3746 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3747 	    "sending txd %p, in %p\n", data, data->in);
3748 	KASSERT(data->in != NULL, ("node is NULL"));
3749 
3750 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3751 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3752 	    ring->qid, ring->cur, totlen, nsegs,
3753 	    le32toh(tx->tx_flags),
3754 	    le32toh(tx->rate_n_flags),
3755 	    tx->initial_rate_index
3756 	    );
3757 
3758 	/* Fill TX descriptor. */
3759 	desc->num_tbs = 2 + nsegs;
3760 
3761 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3762 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3763 	    (TB0_SIZE << 4);
3764 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3765 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3766 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3767 	      + hdrlen + pad - TB0_SIZE) << 4);
3768 
3769 	/* Other DMA segments are for data payload. */
3770 	for (i = 0; i < nsegs; i++) {
3771 		seg = &segs[i];
3772 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3773 		desc->tbs[i+2].hi_n_len = \
3774 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3775 		    | ((seg->ds_len) << 4);
3776 	}
3777 
3778 	bus_dmamap_sync(ring->data_dmat, data->map,
3779 	    BUS_DMASYNC_PREWRITE);
3780 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3781 	    BUS_DMASYNC_PREWRITE);
3782 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3783 	    BUS_DMASYNC_PREWRITE);
3784 
3785 #if 0
3786 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3787 #endif
3788 
3789 	/* Kick TX ring. */
3790 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3791 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3792 
3793 	/* Mark TX ring as full if we reach a certain threshold. */
3794 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3795 		sc->qfullmsk |= 1 << ring->qid;
3796 	}
3797 
3798 	return 0;
3799 }
3800 
3801 static int
3802 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3803     const struct ieee80211_bpf_params *params)
3804 {
3805 	struct ieee80211com *ic = ni->ni_ic;
3806 	struct iwm_softc *sc = ic->ic_softc;
3807 	int error = 0;
3808 
3809 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3810 	    "->%s begin\n", __func__);
3811 
3812 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3813 		m_freem(m);
3814 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3815 		    "<-%s not RUNNING\n", __func__);
3816 		return (ENETDOWN);
3817         }
3818 
3819 	IWM_LOCK(sc);
3820 	/* XXX fix this */
3821         if (params == NULL) {
3822 		error = iwm_tx(sc, m, ni, 0);
3823 	} else {
3824 		error = iwm_tx(sc, m, ni, 0);
3825 	}
3826 	if (sc->sc_tx_timer == 0)
3827 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3828 	sc->sc_tx_timer = 5;
3829 	IWM_UNLOCK(sc);
3830 
3831         return (error);
3832 }
3833 
3834 /*
3835  * mvm/tx.c
3836  */
3837 
3838 /*
3839  * Note that there are transports that buffer frames before they reach
3840  * the firmware. This means that after flush_tx_path is called, the
3841  * queue might not be empty. The race-free way to handle this is to:
3842  * 1) set the station as draining
3843  * 2) flush the Tx path
3844  * 3) wait for the transport queues to be empty
3845  */
3846 int
3847 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3848 {
3849 	int ret;
3850 	struct iwm_tx_path_flush_cmd flush_cmd = {
3851 		.queues_ctl = htole32(tfd_msk),
3852 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3853 	};
3854 
3855 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3856 	    sizeof(flush_cmd), &flush_cmd);
3857 	if (ret)
3858                 device_printf(sc->sc_dev,
3859 		    "Flushing tx queue failed: %d\n", ret);
3860 	return ret;
3861 }
3862 
3863 static int
3864 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3865 {
3866 	struct iwm_time_quota_cmd cmd;
3867 	int i, idx, ret, num_active_macs, quota, quota_rem;
3868 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3869 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3870 	uint16_t id;
3871 
3872 	memset(&cmd, 0, sizeof(cmd));
3873 
3874 	/* currently, PHY ID == binding ID */
3875 	if (ivp) {
3876 		id = ivp->phy_ctxt->id;
3877 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3878 		colors[id] = ivp->phy_ctxt->color;
3879 
3880 		if (1)
3881 			n_ifs[id] = 1;
3882 	}
3883 
3884 	/*
3885 	 * The FW's scheduling session consists of
3886 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3887 	 * equally between all the bindings that require quota
3888 	 */
3889 	num_active_macs = 0;
3890 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3891 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3892 		num_active_macs += n_ifs[i];
3893 	}
3894 
3895 	quota = 0;
3896 	quota_rem = 0;
3897 	if (num_active_macs) {
3898 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3899 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3900 	}
3901 
3902 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3903 		if (colors[i] < 0)
3904 			continue;
3905 
3906 		cmd.quotas[idx].id_and_color =
3907 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3908 
3909 		if (n_ifs[i] <= 0) {
3910 			cmd.quotas[idx].quota = htole32(0);
3911 			cmd.quotas[idx].max_duration = htole32(0);
3912 		} else {
3913 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3914 			cmd.quotas[idx].max_duration = htole32(0);
3915 		}
3916 		idx++;
3917 	}
3918 
3919 	/* Give the remainder of the session to the first binding */
3920 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3921 
3922 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3923 	    sizeof(cmd), &cmd);
3924 	if (ret)
3925 		device_printf(sc->sc_dev,
3926 		    "%s: Failed to send quota: %d\n", __func__, ret);
3927 	return ret;
3928 }
3929 
3930 /*
3931  * ieee80211 routines
3932  */
3933 
3934 /*
3935  * Change to AUTH state in 80211 state machine.  Roughly matches what
3936  * Linux does in bss_info_changed().
3937  */
3938 static int
3939 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3940 {
3941 	struct ieee80211_node *ni;
3942 	struct iwm_node *in;
3943 	struct iwm_vap *iv = IWM_VAP(vap);
3944 	uint32_t duration;
3945 	int error;
3946 
3947 	/*
3948 	 * XXX i have a feeling that the vap node is being
3949 	 * freed from underneath us. Grr.
3950 	 */
3951 	ni = ieee80211_ref_node(vap->iv_bss);
3952 	in = IWM_NODE(ni);
3953 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3954 	    "%s: called; vap=%p, bss ni=%p\n",
3955 	    __func__,
3956 	    vap,
3957 	    ni);
3958 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
3959 	    __func__, ether_sprintf(ni->ni_bssid));
3960 
3961 	in->in_assoc = 0;
3962 	iv->iv_auth = 1;
3963 
3964 	/*
3965 	 * Firmware bug - it'll crash if the beacon interval is less
3966 	 * than 16. We can't avoid connecting at all, so refuse the
3967 	 * station state change, this will cause net80211 to abandon
3968 	 * attempts to connect to this AP, and eventually wpa_s will
3969 	 * blacklist the AP...
3970 	 */
3971 	if (ni->ni_intval < 16) {
3972 		device_printf(sc->sc_dev,
3973 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3974 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3975 		error = EINVAL;
3976 		goto out;
3977 	}
3978 
3979 	error = iwm_allow_mcast(vap, sc);
3980 	if (error) {
3981 		device_printf(sc->sc_dev,
3982 		    "%s: failed to set multicast\n", __func__);
3983 		goto out;
3984 	}
3985 
3986 	/*
3987 	 * This is where it deviates from what Linux does.
3988 	 *
3989 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3990 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3991 	 * and always does a mac_ctx_changed().
3992 	 *
3993 	 * The openbsd port doesn't attempt to do that - it reset things
3994 	 * at odd states and does the add here.
3995 	 *
3996 	 * So, until the state handling is fixed (ie, we never reset
3997 	 * the NIC except for a firmware failure, which should drag
3998 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3999 	 * contexts that are required), let's do a dirty hack here.
4000 	 */
4001 	if (iv->is_uploaded) {
4002 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4003 			device_printf(sc->sc_dev,
4004 			    "%s: failed to update MAC\n", __func__);
4005 			goto out;
4006 		}
4007 	} else {
4008 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4009 			device_printf(sc->sc_dev,
4010 			    "%s: failed to add MAC\n", __func__);
4011 			goto out;
4012 		}
4013 	}
4014 	sc->sc_firmware_state = 1;
4015 
4016 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4017 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4018 		device_printf(sc->sc_dev,
4019 		    "%s: failed update phy ctxt\n", __func__);
4020 		goto out;
4021 	}
4022 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4023 
4024 	if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4025 		device_printf(sc->sc_dev,
4026 		    "%s: binding update cmd\n", __func__);
4027 		goto out;
4028 	}
4029 	sc->sc_firmware_state = 2;
4030 	/*
4031 	 * Authentication becomes unreliable when powersaving is left enabled
4032 	 * here. Powersaving will be activated again when association has
4033 	 * finished or is aborted.
4034 	 */
4035 	iv->ps_disabled = TRUE;
4036 	error = iwm_mvm_power_update_mac(sc);
4037 	iv->ps_disabled = FALSE;
4038 	if (error != 0) {
4039 		device_printf(sc->sc_dev,
4040 		    "%s: failed to update power management\n",
4041 		    __func__);
4042 		goto out;
4043 	}
4044 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4045 		device_printf(sc->sc_dev,
4046 		    "%s: failed to add sta\n", __func__);
4047 		goto out;
4048 	}
4049 	sc->sc_firmware_state = 3;
4050 
4051 	/*
4052 	 * Prevent the FW from wandering off channel during association
4053 	 * by "protecting" the session with a time event.
4054 	 */
4055 	/* XXX duration is in units of TU, not MS */
4056 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4057 	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4058 
4059 	error = 0;
4060 out:
4061 	if (error != 0)
4062 		iv->iv_auth = 0;
4063 	ieee80211_free_node(ni);
4064 	return (error);
4065 }
4066 
4067 static struct ieee80211_node *
4068 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4069 {
4070 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4071 	    M_INTWAIT | M_ZERO);
4072 }
4073 
4074 static uint8_t
4075 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4076 {
4077 	uint8_t plcp = rate_n_flags & 0xff;
4078 	int i;
4079 
4080 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4081 		if (iwm_rates[i].plcp == plcp)
4082 			return iwm_rates[i].rate;
4083 	}
4084 	return 0;
4085 }
4086 
4087 uint8_t
4088 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4089 {
4090 	int i;
4091 	uint8_t rval;
4092 
4093 	for (i = 0; i < rs->rs_nrates; i++) {
4094 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4095 		if (rval == iwm_rates[ridx].rate)
4096 			return rs->rs_rates[i];
4097 	}
4098 
4099 	return 0;
4100 }
4101 
4102 static int
4103 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4104 {
4105 	int i;
4106 
4107 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4108 		if (iwm_rates[i].rate == rate)
4109 			return i;
4110 	}
4111 
4112 	device_printf(sc->sc_dev,
4113 	    "%s: WARNING: device rate for %u not found!\n",
4114 	    __func__, rate);
4115 
4116 	return -1;
4117 }
4118 
4119 static void
4120 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4121 {
4122 	struct ieee80211_node *ni = &in->in_ni;
4123 	struct iwm_lq_cmd *lq = &in->in_lq;
4124 	struct ieee80211_rateset *rs = &ni->ni_rates;
4125 	int nrates = rs->rs_nrates;
4126 	int i, ridx, tab = 0;
4127 	int txant = 0;
4128 
4129 	KKASSERT(rix >= 0 && rix < nrates);
4130 
4131 	if (nrates > nitems(lq->rs_table)) {
4132 		device_printf(sc->sc_dev,
4133 		    "%s: node supports %d rates, driver handles "
4134 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4135 		return;
4136 	}
4137 	if (nrates == 0) {
4138 		device_printf(sc->sc_dev,
4139 		    "%s: node supports 0 rates, odd!\n", __func__);
4140 		return;
4141 	}
4142 	nrates = imin(rix + 1, nrates);
4143 
4144 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4145 	    "%s: nrates=%d\n", __func__, nrates);
4146 
4147 	/* then construct a lq_cmd based on those */
4148 	memset(lq, 0, sizeof(*lq));
4149 	lq->sta_id = IWM_STATION_ID;
4150 
4151 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4152 	if (ni->ni_flags & IEEE80211_NODE_HT)
4153 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4154 
4155 	/*
4156 	 * are these used? (we don't do SISO or MIMO)
4157 	 * need to set them to non-zero, though, or we get an error.
4158 	 */
4159 	lq->single_stream_ant_msk = 1;
4160 	lq->dual_stream_ant_msk = 1;
4161 
4162 	/*
4163 	 * Build the actual rate selection table.
4164 	 * The lowest bits are the rates.  Additionally,
4165 	 * CCK needs bit 9 to be set.  The rest of the bits
4166 	 * we add to the table select the tx antenna
4167 	 * Note that we add the rates in the highest rate first
4168 	 * (opposite of ni_rates).
4169 	 */
4170 	for (i = 0; i < nrates; i++) {
4171 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4172 		int nextant;
4173 
4174 		/* Map 802.11 rate to HW rate index. */
4175 		ridx = iwm_rate2ridx(sc, rate);
4176 		if (ridx == -1)
4177 			continue;
4178 
4179 		if (txant == 0)
4180 			txant = iwm_mvm_get_valid_tx_ant(sc);
4181 		nextant = 1<<(ffs(txant)-1);
4182 		txant &= ~nextant;
4183 
4184 		tab = iwm_rates[ridx].plcp;
4185 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4186 		if (IWM_RIDX_IS_CCK(ridx))
4187 			tab |= IWM_RATE_MCS_CCK_MSK;
4188 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4189 		    "station rate i=%d, rate=%d, hw=%x\n",
4190 		    i, iwm_rates[ridx].rate, tab);
4191 		lq->rs_table[i] = htole32(tab);
4192 	}
4193 	/* then fill the rest with the lowest possible rate */
4194 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4195 		KASSERT(tab != 0, ("invalid tab"));
4196 		lq->rs_table[i] = htole32(tab);
4197 	}
4198 }
4199 
4200 static int
4201 iwm_media_change(struct ifnet *ifp)
4202 {
4203 	struct ieee80211vap *vap = ifp->if_softc;
4204 	struct ieee80211com *ic = vap->iv_ic;
4205 	struct iwm_softc *sc = ic->ic_softc;
4206 	int error;
4207 
4208 	error = ieee80211_media_change(ifp);
4209 	if (error != ENETRESET)
4210 		return error;
4211 
4212 	IWM_LOCK(sc);
4213 	if (ic->ic_nrunning > 0) {
4214 		iwm_stop(sc);
4215 		iwm_init(sc);
4216 	}
4217 	IWM_UNLOCK(sc);
4218 	return error;
4219 }
4220 
4221 static void
4222 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4223 {
4224 	struct iwm_vap *ivp = IWM_VAP(vap);
4225 	int error;
4226 
4227 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4228 	sc->sc_tx_timer = 0;
4229 
4230 	ivp->iv_auth = 0;
4231 	if (sc->sc_firmware_state == 3) {
4232 		iwm_xmit_queue_drain(sc);
4233 //		iwm_mvm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4234 		error = iwm_mvm_rm_sta(sc, vap, TRUE);
4235 		if (error) {
4236 			device_printf(sc->sc_dev,
4237 			    "%s: Failed to remove station: %d\n",
4238 			    __func__, error);
4239 		}
4240 	}
4241 	if (sc->sc_firmware_state == 3) {
4242 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4243 		if (error) {
4244 			device_printf(sc->sc_dev,
4245 			    "%s: Failed to change mac context: %d\n",
4246 			    __func__, error);
4247 		}
4248 	}
4249 	if (sc->sc_firmware_state == 3) {
4250 		error = iwm_mvm_sf_update(sc, vap, FALSE);
4251 		if (error) {
4252 			device_printf(sc->sc_dev,
4253 			    "%s: Failed to update smart FIFO: %d\n",
4254 			    __func__, error);
4255 		}
4256 	}
4257 	if (sc->sc_firmware_state == 3) {
4258 		error = iwm_mvm_rm_sta_id(sc, vap);
4259 		if (error) {
4260 			device_printf(sc->sc_dev,
4261 			    "%s: Failed to remove station id: %d\n",
4262 			    __func__, error);
4263 		}
4264 	}
4265 	if (sc->sc_firmware_state == 3) {
4266 		error = iwm_mvm_update_quotas(sc, NULL);
4267 		if (error) {
4268 			device_printf(sc->sc_dev,
4269 			    "%s: Failed to update PHY quota: %d\n",
4270 			    __func__, error);
4271 		}
4272 	}
4273 	if (sc->sc_firmware_state == 3) {
4274 		/* XXX Might need to specify bssid correctly. */
4275 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4276 		if (error) {
4277 			device_printf(sc->sc_dev,
4278 			    "%s: Failed to change mac context: %d\n",
4279 			    __func__, error);
4280 		}
4281 	}
4282 	if (sc->sc_firmware_state == 3) {
4283 		sc->sc_firmware_state = 2;
4284 	}
4285 	if (sc->sc_firmware_state > 1) {
4286 		error = iwm_mvm_binding_remove_vif(sc, ivp);
4287 		if (error) {
4288 			device_printf(sc->sc_dev,
4289 			    "%s: Failed to remove channel ctx: %d\n",
4290 			    __func__, error);
4291 		}
4292 	}
4293 	if (sc->sc_firmware_state > 1) {
4294 		sc->sc_firmware_state = 1;
4295 	}
4296 	ivp->phy_ctxt = NULL;
4297 	if (sc->sc_firmware_state > 0) {
4298 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4299 		if (error) {
4300 			device_printf(sc->sc_dev,
4301 			    "%s: Failed to change mac context: %d\n",
4302 			    __func__, error);
4303 		}
4304 	}
4305 	if (sc->sc_firmware_state > 0) {
4306 		error = iwm_mvm_power_update_mac(sc);
4307 		if (error != 0) {
4308 			device_printf(sc->sc_dev,
4309 			    "%s: failed to update power management\n",
4310 			    __func__);
4311 		}
4312 	}
4313 	sc->sc_firmware_state = 0;
4314 }
4315 
4316 static int
4317 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4318 {
4319 	struct iwm_vap *ivp = IWM_VAP(vap);
4320 	struct ieee80211com *ic = vap->iv_ic;
4321 	struct iwm_softc *sc = ic->ic_softc;
4322 	struct iwm_node *in;
4323 	int error;
4324 
4325 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4326 	    "switching state %s -> %s arg=0x%x\n",
4327 	    ieee80211_state_name[vap->iv_state],
4328 	    ieee80211_state_name[nstate],
4329 	    arg);
4330 
4331 	IEEE80211_UNLOCK(ic);
4332 	IWM_LOCK(sc);
4333 
4334 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4335 	    (nstate == IEEE80211_S_AUTH ||
4336 	     nstate == IEEE80211_S_ASSOC ||
4337 	     nstate == IEEE80211_S_RUN)) {
4338 		/* Stop blinking for a scan, when authenticating. */
4339 		iwm_led_blink_stop(sc);
4340 	}
4341 
4342 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4343 		iwm_mvm_led_disable(sc);
4344 		/* disable beacon filtering if we're hopping out of RUN */
4345 		iwm_mvm_disable_beacon_filter(sc);
4346 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4347 			in->in_assoc = 0;
4348 	}
4349 
4350 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4351 	     vap->iv_state == IEEE80211_S_ASSOC ||
4352 	     vap->iv_state == IEEE80211_S_RUN) &&
4353 	    (nstate == IEEE80211_S_INIT ||
4354 	     nstate == IEEE80211_S_SCAN ||
4355 	     nstate == IEEE80211_S_AUTH)) {
4356 		iwm_mvm_stop_session_protection(sc, ivp);
4357 	}
4358 
4359 	if ((vap->iv_state == IEEE80211_S_RUN ||
4360 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4361 	    nstate == IEEE80211_S_INIT) {
4362 		/*
4363 		 * In this case, iv_newstate() wants to send an 80211 frame on
4364 		 * the network that we are leaving. So we need to call it,
4365 		 * before tearing down all the firmware state.
4366 		 */
4367 		IWM_UNLOCK(sc);
4368 		IEEE80211_LOCK(ic);
4369 		ivp->iv_newstate(vap, nstate, arg);
4370 		IEEE80211_UNLOCK(ic);
4371 		IWM_LOCK(sc);
4372 		iwm_bring_down_firmware(sc, vap);
4373 		IWM_UNLOCK(sc);
4374 		IEEE80211_LOCK(ic);
4375 		return 0;
4376 	}
4377 
4378 	switch (nstate) {
4379 	case IEEE80211_S_INIT:
4380 	case IEEE80211_S_SCAN:
4381 		break;
4382 
4383 	case IEEE80211_S_AUTH:
4384 		iwm_bring_down_firmware(sc, vap);
4385 		if ((error = iwm_auth(vap, sc)) != 0) {
4386 			device_printf(sc->sc_dev,
4387 			    "%s: could not move to auth state: %d\n",
4388 			    __func__, error);
4389 			iwm_bring_down_firmware(sc, vap);
4390 			IWM_UNLOCK(sc);
4391 			IEEE80211_LOCK(ic);
4392 			return 1;
4393 		}
4394 		break;
4395 
4396 	case IEEE80211_S_ASSOC:
4397 		/*
4398 		 * EBS may be disabled due to previous failures reported by FW.
4399 		 * Reset EBS status here assuming environment has been changed.
4400 		 */
4401 		sc->last_ebs_successful = TRUE;
4402 		break;
4403 
4404 	case IEEE80211_S_RUN:
4405 		in = IWM_NODE(vap->iv_bss);
4406 		/* Update the association state, now we have it all */
4407 		/* (eg associd comes in at this point */
4408 		error = iwm_mvm_update_sta(sc, in);
4409 		if (error != 0) {
4410 			device_printf(sc->sc_dev,
4411 			    "%s: failed to update STA\n", __func__);
4412 			IWM_UNLOCK(sc);
4413 			IEEE80211_LOCK(ic);
4414 			return error;
4415 		}
4416 		in->in_assoc = 1;
4417 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4418 		if (error != 0) {
4419 			device_printf(sc->sc_dev,
4420 			    "%s: failed to update MAC: %d\n", __func__, error);
4421 		}
4422 
4423 		iwm_mvm_sf_update(sc, vap, FALSE);
4424 		iwm_mvm_enable_beacon_filter(sc, ivp);
4425 		iwm_mvm_power_update_mac(sc);
4426 		iwm_mvm_update_quotas(sc, ivp);
4427 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4428 		iwm_setrates(sc, in, rix);
4429 
4430 		if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4431 			device_printf(sc->sc_dev,
4432 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4433 		}
4434 
4435 		iwm_mvm_led_enable(sc);
4436 		break;
4437 
4438 	default:
4439 		break;
4440 	}
4441 	IWM_UNLOCK(sc);
4442 	IEEE80211_LOCK(ic);
4443 
4444 	return (ivp->iv_newstate(vap, nstate, arg));
4445 }
4446 
4447 void
4448 iwm_endscan_cb(void *arg, int pending)
4449 {
4450 	struct iwm_softc *sc = arg;
4451 	struct ieee80211com *ic = &sc->sc_ic;
4452 
4453 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4454 	    "%s: scan ended\n",
4455 	    __func__);
4456 
4457 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4458 }
4459 
4460 static int
4461 iwm_send_bt_init_conf(struct iwm_softc *sc)
4462 {
4463 	struct iwm_bt_coex_cmd bt_cmd;
4464 
4465 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4466 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4467 
4468 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4469 	    &bt_cmd);
4470 }
4471 
4472 static boolean_t
4473 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4474 {
4475 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4476 	boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa,
4477 					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4478 
4479 	if (iwm_lar_disable)
4480 		return FALSE;
4481 
4482 	/*
4483 	 * Enable LAR only if it is supported by the FW (TLV) &&
4484 	 * enabled in the NVM
4485 	 */
4486 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4487 		return nvm_lar && tlv_lar;
4488 	else
4489 		return tlv_lar;
4490 }
4491 
4492 static boolean_t
4493 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4494 {
4495 	return fw_has_api(&sc->sc_fw.ucode_capa,
4496 			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4497 	       fw_has_capa(&sc->sc_fw.ucode_capa,
4498 			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4499 }
4500 
4501 static int
4502 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4503 {
4504 	struct iwm_mcc_update_cmd mcc_cmd;
4505 	struct iwm_host_cmd hcmd = {
4506 		.id = IWM_MCC_UPDATE_CMD,
4507 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4508 		.data = { &mcc_cmd },
4509 	};
4510 	int ret;
4511 #ifdef IWM_DEBUG
4512 	struct iwm_rx_packet *pkt;
4513 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4514 	struct iwm_mcc_update_resp *mcc_resp;
4515 	int n_channels;
4516 	uint16_t mcc;
4517 #endif
4518 	int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa,
4519 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4520 
4521 	if (!iwm_mvm_is_lar_supported(sc)) {
4522 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4523 		    __func__);
4524 		return 0;
4525 	}
4526 
4527 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4528 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4529 	if (iwm_mvm_is_wifi_mcc_supported(sc))
4530 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4531 	else
4532 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4533 
4534 	if (resp_v2)
4535 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4536 	else
4537 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4538 
4539 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4540 	    "send MCC update to FW with '%c%c' src = %d\n",
4541 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4542 
4543 	ret = iwm_send_cmd(sc, &hcmd);
4544 	if (ret)
4545 		return ret;
4546 
4547 #ifdef IWM_DEBUG
4548 	pkt = hcmd.resp_pkt;
4549 
4550 	/* Extract MCC response */
4551 	if (resp_v2) {
4552 		mcc_resp = (void *)pkt->data;
4553 		mcc = mcc_resp->mcc;
4554 		n_channels =  le32toh(mcc_resp->n_channels);
4555 	} else {
4556 		mcc_resp_v1 = (void *)pkt->data;
4557 		mcc = mcc_resp_v1->mcc;
4558 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4559 	}
4560 
4561 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4562 	if (mcc == 0)
4563 		mcc = 0x3030;  /* "00" - world */
4564 
4565 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4566 	    "regulatory domain '%c%c' (%d channels available)\n",
4567 	    mcc >> 8, mcc & 0xff, n_channels);
4568 #endif
4569 	iwm_free_resp(sc, &hcmd);
4570 
4571 	return 0;
4572 }
4573 
4574 static void
4575 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4576 {
4577 	struct iwm_host_cmd cmd = {
4578 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4579 		.len = { sizeof(uint32_t), },
4580 		.data = { &backoff, },
4581 	};
4582 
4583 	if (iwm_send_cmd(sc, &cmd) != 0) {
4584 		device_printf(sc->sc_dev,
4585 		    "failed to change thermal tx backoff\n");
4586 	}
4587 }
4588 
4589 static int
4590 iwm_init_hw(struct iwm_softc *sc)
4591 {
4592 	struct ieee80211com *ic = &sc->sc_ic;
4593 	int error, i, ac;
4594 
4595 	sc->sf_state = IWM_SF_UNINIT;
4596 
4597 	if ((error = iwm_start_hw(sc)) != 0) {
4598 		kprintf("iwm_start_hw: failed %d\n", error);
4599 		return error;
4600 	}
4601 
4602 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4603 		kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4604 		return error;
4605 	}
4606 
4607 	/*
4608 	 * should stop and start HW since that INIT
4609 	 * image just loaded
4610 	 */
4611 	iwm_stop_device(sc);
4612 	sc->sc_ps_disabled = FALSE;
4613 	if ((error = iwm_start_hw(sc)) != 0) {
4614 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4615 		return error;
4616 	}
4617 
4618 	/* omstart, this time with the regular firmware */
4619 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4620 	if (error) {
4621 		device_printf(sc->sc_dev, "could not load firmware\n");
4622 		goto error;
4623 	}
4624 
4625 	error = iwm_mvm_sf_update(sc, NULL, FALSE);
4626 	if (error)
4627 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4628 
4629 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4630 		device_printf(sc->sc_dev, "bt init conf failed\n");
4631 		goto error;
4632 	}
4633 
4634 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4635 	if (error != 0) {
4636 		device_printf(sc->sc_dev, "antenna config failed\n");
4637 		goto error;
4638 	}
4639 
4640 	/* Send phy db control command and then phy db calibration */
4641 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4642 		goto error;
4643 
4644 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4645 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4646 		goto error;
4647 	}
4648 
4649 	/* Add auxiliary station for scanning */
4650 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4651 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4652 		goto error;
4653 	}
4654 
4655 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4656 		/*
4657 		 * The channel used here isn't relevant as it's
4658 		 * going to be overwritten in the other flows.
4659 		 * For now use the first channel we have.
4660 		 */
4661 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4662 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4663 			goto error;
4664 	}
4665 
4666 	/* Initialize tx backoffs to the minimum. */
4667 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4668 		iwm_mvm_tt_tx_backoff(sc, 0);
4669 
4670 	if (iwm_mvm_config_ltr(sc) != 0)
4671 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4672 
4673 	error = iwm_mvm_power_update_device(sc);
4674 	if (error)
4675 		goto error;
4676 
4677 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4678 		goto error;
4679 
4680 	if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4681 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4682 			goto error;
4683 	}
4684 
4685 	/* Enable Tx queues. */
4686 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4687 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4688 		    iwm_mvm_ac_to_tx_fifo[ac]);
4689 		if (error)
4690 			goto error;
4691 	}
4692 
4693 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4694 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4695 		goto error;
4696 	}
4697 
4698 	return 0;
4699 
4700  error:
4701 	iwm_stop_device(sc);
4702 	return error;
4703 }
4704 
4705 /* Allow multicast from our BSSID. */
4706 static int
4707 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4708 {
4709 	struct ieee80211_node *ni = vap->iv_bss;
4710 	struct iwm_mcast_filter_cmd *cmd;
4711 	size_t size;
4712 	int error;
4713 
4714 	size = roundup(sizeof(*cmd), 4);
4715 	cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4716 	if (cmd == NULL)
4717 		return ENOMEM;
4718 	cmd->filter_own = 1;
4719 	cmd->port_id = 0;
4720 	cmd->count = 0;
4721 	cmd->pass_all = 1;
4722 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4723 
4724 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4725 	    IWM_CMD_SYNC, size, cmd);
4726 	kfree(cmd, M_DEVBUF);
4727 
4728 	return (error);
4729 }
4730 
4731 /*
4732  * ifnet interfaces
4733  */
4734 
4735 static void
4736 iwm_init(struct iwm_softc *sc)
4737 {
4738 	int error;
4739 
4740 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4741 		return;
4742 	}
4743 	sc->sc_generation++;
4744 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4745 
4746 	if ((error = iwm_init_hw(sc)) != 0) {
4747 		kprintf("iwm_init_hw failed %d\n", error);
4748 		iwm_stop(sc);
4749 		return;
4750 	}
4751 
4752 	/*
4753 	 * Ok, firmware loaded and we are jogging
4754 	 */
4755 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4756 }
4757 
4758 static int
4759 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4760 {
4761 	struct iwm_softc *sc;
4762 	int error;
4763 
4764 	sc = ic->ic_softc;
4765 
4766 	IWM_LOCK(sc);
4767 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4768 		IWM_UNLOCK(sc);
4769 		return (ENXIO);
4770 	}
4771 	error = mbufq_enqueue(&sc->sc_snd, m);
4772 	if (error) {
4773 		IWM_UNLOCK(sc);
4774 		return (error);
4775 	}
4776 	iwm_start(sc);
4777 	IWM_UNLOCK(sc);
4778 	return (0);
4779 }
4780 
4781 /*
4782  * Dequeue packets from sendq and call send.
4783  */
4784 static void
4785 iwm_start(struct iwm_softc *sc)
4786 {
4787 	struct ieee80211_node *ni;
4788 	struct mbuf *m;
4789 	int ac = 0;
4790 
4791 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4792 	while (sc->qfullmsk == 0 &&
4793 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4794 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4795 		if (iwm_tx(sc, m, ni, ac) != 0) {
4796 			if_inc_counter(ni->ni_vap->iv_ifp,
4797 			    IFCOUNTER_OERRORS, 1);
4798 			ieee80211_free_node(ni);
4799 			continue;
4800 		}
4801 		if (sc->sc_tx_timer == 0) {
4802 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4803 			    sc);
4804 		}
4805 		sc->sc_tx_timer = 15;
4806 	}
4807 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4808 }
4809 
4810 static void
4811 iwm_stop(struct iwm_softc *sc)
4812 {
4813 
4814 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4815 	sc->sc_flags |= IWM_FLAG_STOPPED;
4816 	sc->sc_generation++;
4817 	iwm_led_blink_stop(sc);
4818 	sc->sc_tx_timer = 0;
4819 	iwm_stop_device(sc);
4820 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4821 }
4822 
4823 static void
4824 iwm_watchdog(void *arg)
4825 {
4826 	struct iwm_softc *sc = arg;
4827 
4828 	if (sc->sc_attached == 0)
4829 		return;
4830 
4831 	if (sc->sc_tx_timer > 0) {
4832 		if (--sc->sc_tx_timer == 0) {
4833 			device_printf(sc->sc_dev, "device timeout\n");
4834 #ifdef IWM_DEBUG
4835 			iwm_nic_error(sc);
4836 #endif
4837 			iwm_stop(sc);
4838 #if defined(__DragonFly__)
4839 			++sc->sc_ic.ic_oerrors;
4840 #else
4841 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4842 #endif
4843 			return;
4844 		}
4845 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4846 	}
4847 }
4848 
4849 static void
4850 iwm_parent(struct ieee80211com *ic)
4851 {
4852 	struct iwm_softc *sc = ic->ic_softc;
4853 	int startall = 0;
4854 
4855 	IWM_LOCK(sc);
4856 	if (ic->ic_nrunning > 0) {
4857 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4858 			iwm_init(sc);
4859 			startall = 1;
4860 		}
4861 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4862 		iwm_stop(sc);
4863 	IWM_UNLOCK(sc);
4864 	if (startall)
4865 		ieee80211_start_all(ic);
4866 }
4867 
4868 /*
4869  * The interrupt side of things
4870  */
4871 
4872 /*
4873  * error dumping routines are from iwlwifi/mvm/utils.c
4874  */
4875 
4876 /*
4877  * Note: This structure is read from the device with IO accesses,
4878  * and the reading already does the endian conversion. As it is
4879  * read with uint32_t-sized accesses, any members with a different size
4880  * need to be ordered correctly though!
4881  */
4882 struct iwm_error_event_table {
4883 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4884 	uint32_t error_id;		/* type of error */
4885 	uint32_t trm_hw_status0;	/* TRM HW status */
4886 	uint32_t trm_hw_status1;	/* TRM HW status */
4887 	uint32_t blink2;		/* branch link */
4888 	uint32_t ilink1;		/* interrupt link */
4889 	uint32_t ilink2;		/* interrupt link */
4890 	uint32_t data1;		/* error-specific data */
4891 	uint32_t data2;		/* error-specific data */
4892 	uint32_t data3;		/* error-specific data */
4893 	uint32_t bcon_time;		/* beacon timer */
4894 	uint32_t tsf_low;		/* network timestamp function timer */
4895 	uint32_t tsf_hi;		/* network timestamp function timer */
4896 	uint32_t gp1;		/* GP1 timer register */
4897 	uint32_t gp2;		/* GP2 timer register */
4898 	uint32_t fw_rev_type;	/* firmware revision type */
4899 	uint32_t major;		/* uCode version major */
4900 	uint32_t minor;		/* uCode version minor */
4901 	uint32_t hw_ver;		/* HW Silicon version */
4902 	uint32_t brd_ver;		/* HW board version */
4903 	uint32_t log_pc;		/* log program counter */
4904 	uint32_t frame_ptr;		/* frame pointer */
4905 	uint32_t stack_ptr;		/* stack pointer */
4906 	uint32_t hcmd;		/* last host command header */
4907 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4908 				 * rxtx_flag */
4909 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4910 				 * host_flag */
4911 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4912 				 * enc_flag */
4913 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4914 				 * time_flag */
4915 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4916 				 * wico interrupt */
4917 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4918 	uint32_t wait_event;		/* wait event() caller address */
4919 	uint32_t l2p_control;	/* L2pControlField */
4920 	uint32_t l2p_duration;	/* L2pDurationField */
4921 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4922 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4923 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4924 				 * (LMPM_PMG_SEL) */
4925 	uint32_t u_timestamp;	/* indicate when the date and time of the
4926 				 * compilation */
4927 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4928 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4929 
4930 /*
4931  * UMAC error struct - relevant starting from family 8000 chip.
4932  * Note: This structure is read from the device with IO accesses,
4933  * and the reading already does the endian conversion. As it is
4934  * read with u32-sized accesses, any members with a different size
4935  * need to be ordered correctly though!
4936  */
4937 struct iwm_umac_error_event_table {
4938 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4939 	uint32_t error_id;	/* type of error */
4940 	uint32_t blink1;	/* branch link */
4941 	uint32_t blink2;	/* branch link */
4942 	uint32_t ilink1;	/* interrupt link */
4943 	uint32_t ilink2;	/* interrupt link */
4944 	uint32_t data1;		/* error-specific data */
4945 	uint32_t data2;		/* error-specific data */
4946 	uint32_t data3;		/* error-specific data */
4947 	uint32_t umac_major;
4948 	uint32_t umac_minor;
4949 	uint32_t frame_pointer;	/* core register 27*/
4950 	uint32_t stack_pointer;	/* core register 28 */
4951 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4952 	uint32_t nic_isr_pref;	/* ISR status register */
4953 } __packed;
4954 
4955 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4956 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4957 
4958 #ifdef IWM_DEBUG
4959 struct {
4960 	const char *name;
4961 	uint8_t num;
4962 } advanced_lookup[] = {
4963 	{ "NMI_INTERRUPT_WDG", 0x34 },
4964 	{ "SYSASSERT", 0x35 },
4965 	{ "UCODE_VERSION_MISMATCH", 0x37 },
4966 	{ "BAD_COMMAND", 0x38 },
4967 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4968 	{ "FATAL_ERROR", 0x3D },
4969 	{ "NMI_TRM_HW_ERR", 0x46 },
4970 	{ "NMI_INTERRUPT_TRM", 0x4C },
4971 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4972 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4973 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4974 	{ "NMI_INTERRUPT_HOST", 0x66 },
4975 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
4976 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
4977 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4978 	{ "ADVANCED_SYSASSERT", 0 },
4979 };
4980 
4981 static const char *
4982 iwm_desc_lookup(uint32_t num)
4983 {
4984 	int i;
4985 
4986 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4987 		if (advanced_lookup[i].num == num)
4988 			return advanced_lookup[i].name;
4989 
4990 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4991 	return advanced_lookup[i].name;
4992 }
4993 
4994 static void
4995 iwm_nic_umac_error(struct iwm_softc *sc)
4996 {
4997 	struct iwm_umac_error_event_table table;
4998 	uint32_t base;
4999 
5000 	base = sc->umac_error_event_table;
5001 
5002 	if (base < 0x800000) {
5003 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5004 		    base);
5005 		return;
5006 	}
5007 
5008 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5009 		device_printf(sc->sc_dev, "reading errlog failed\n");
5010 		return;
5011 	}
5012 
5013 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5014 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5015 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5016 		    sc->sc_flags, table.valid);
5017 	}
5018 
5019 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5020 		iwm_desc_lookup(table.error_id));
5021 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5022 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5023 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5024 	    table.ilink1);
5025 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5026 	    table.ilink2);
5027 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5028 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5029 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5030 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5031 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5032 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5033 	    table.frame_pointer);
5034 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5035 	    table.stack_pointer);
5036 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5037 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5038 	    table.nic_isr_pref);
5039 }
5040 
5041 /*
5042  * Support for dumping the error log seemed like a good idea ...
5043  * but it's mostly hex junk and the only sensible thing is the
5044  * hw/ucode revision (which we know anyway).  Since it's here,
5045  * I'll just leave it in, just in case e.g. the Intel guys want to
5046  * help us decipher some "ADVANCED_SYSASSERT" later.
5047  */
5048 static void
5049 iwm_nic_error(struct iwm_softc *sc)
5050 {
5051 	struct iwm_error_event_table table;
5052 	uint32_t base;
5053 
5054 	device_printf(sc->sc_dev, "dumping device error log\n");
5055 	base = sc->error_event_table[0];
5056 	if (base < 0x800000) {
5057 		device_printf(sc->sc_dev,
5058 		    "Invalid error log pointer 0x%08x\n", base);
5059 		return;
5060 	}
5061 
5062 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5063 		device_printf(sc->sc_dev, "reading errlog failed\n");
5064 		return;
5065 	}
5066 
5067 	if (!table.valid) {
5068 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5069 		return;
5070 	}
5071 
5072 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5073 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5074 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5075 		    sc->sc_flags, table.valid);
5076 	}
5077 
5078 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5079 	    iwm_desc_lookup(table.error_id));
5080 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5081 	    table.trm_hw_status0);
5082 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5083 	    table.trm_hw_status1);
5084 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5085 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5086 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5087 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5088 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5089 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5090 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5091 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5092 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5093 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5094 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5095 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5096 	    table.fw_rev_type);
5097 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5098 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5099 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5100 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5101 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5102 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5103 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5104 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5105 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5106 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5107 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5108 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5109 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5110 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5111 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5112 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5113 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5114 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5115 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5116 
5117 	if (sc->umac_error_event_table)
5118 		iwm_nic_umac_error(sc);
5119 }
5120 #endif
5121 
5122 static void
5123 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5124 {
5125 	struct ieee80211com *ic = &sc->sc_ic;
5126 	struct iwm_cmd_response *cresp;
5127 	struct mbuf *m1;
5128 	uint32_t offset = 0;
5129 	uint32_t maxoff = IWM_RBUF_SIZE;
5130 	uint32_t nextoff;
5131 	boolean_t stolen = FALSE;
5132 
5133 #define HAVEROOM(a)	\
5134     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5135 
5136 	while (HAVEROOM(offset)) {
5137 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5138 		    offset);
5139 		int qid, idx, code, len;
5140 
5141 		qid = pkt->hdr.qid;
5142 		idx = pkt->hdr.idx;
5143 
5144 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5145 
5146 		/*
5147 		 * randomly get these from the firmware, no idea why.
5148 		 * they at least seem harmless, so just ignore them for now
5149 		 */
5150 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5151 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5152 			break;
5153 		}
5154 
5155 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5156 		    "rx packet qid=%d idx=%d type=%x\n",
5157 		    qid & ~0x80, pkt->hdr.idx, code);
5158 
5159 		len = iwm_rx_packet_len(pkt);
5160 		len += sizeof(uint32_t); /* account for status word */
5161 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5162 
5163 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5164 
5165 		switch (code) {
5166 		case IWM_REPLY_RX_PHY_CMD:
5167 			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5168 			break;
5169 
5170 		case IWM_REPLY_RX_MPDU_CMD: {
5171 			/*
5172 			 * If this is the last frame in the RX buffer, we
5173 			 * can directly feed the mbuf to the sharks here.
5174 			 */
5175 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5176 			    struct iwm_rx_packet *, nextoff);
5177 			if (!HAVEROOM(nextoff) ||
5178 			    (nextpkt->hdr.code == 0 &&
5179 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5180 			     nextpkt->hdr.idx == 0) ||
5181 			    (nextpkt->len_n_flags ==
5182 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5183 				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5184 					stolen = FALSE;
5185 					/* Make sure we abort the loop */
5186 					nextoff = maxoff;
5187 				}
5188 				break;
5189 			}
5190 
5191 			/*
5192 			 * Use m_copym instead of m_split, because that
5193 			 * makes it easier to keep a valid rx buffer in
5194 			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5195 			 *
5196 			 * We need to start m_copym() at offset 0, to get the
5197 			 * M_PKTHDR flag preserved.
5198 			 */
5199 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5200 			if (m1) {
5201 				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5202 					stolen = TRUE;
5203 				else
5204 					m_freem(m1);
5205 			}
5206 			break;
5207 		}
5208 
5209 		case IWM_TX_CMD:
5210 			iwm_mvm_rx_tx_cmd(sc, pkt);
5211 			break;
5212 
5213 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5214 			struct iwm_missed_beacons_notif *resp;
5215 			int missed;
5216 
5217 			/* XXX look at mac_id to determine interface ID */
5218 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5219 
5220 			resp = (void *)pkt->data;
5221 			missed = le32toh(resp->consec_missed_beacons);
5222 
5223 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5224 			    "%s: MISSED_BEACON: mac_id=%d, "
5225 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5226 			    "num_rx=%d\n",
5227 			    __func__,
5228 			    le32toh(resp->mac_id),
5229 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5230 			    le32toh(resp->consec_missed_beacons),
5231 			    le32toh(resp->num_expected_beacons),
5232 			    le32toh(resp->num_recvd_beacons));
5233 
5234 			/* Be paranoid */
5235 			if (vap == NULL)
5236 				break;
5237 
5238 			/* XXX no net80211 locking? */
5239 			if (vap->iv_state == IEEE80211_S_RUN &&
5240 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5241 				if (missed > vap->iv_bmissthreshold) {
5242 					/* XXX bad locking; turn into task */
5243 					IWM_UNLOCK(sc);
5244 					ieee80211_beacon_miss(ic);
5245 					IWM_LOCK(sc);
5246 				}
5247 			}
5248 
5249 			break; }
5250 
5251 		case IWM_MFUART_LOAD_NOTIFICATION:
5252 			break;
5253 
5254 		case IWM_MVM_ALIVE:
5255 			break;
5256 
5257 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5258 			break;
5259 
5260 		case IWM_STATISTICS_NOTIFICATION:
5261 			iwm_mvm_handle_rx_statistics(sc, pkt);
5262 			break;
5263 
5264 		case IWM_NVM_ACCESS_CMD:
5265 		case IWM_MCC_UPDATE_CMD:
5266 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5267 				memcpy(sc->sc_cmd_resp,
5268 				    pkt, sizeof(sc->sc_cmd_resp));
5269 			}
5270 			break;
5271 
5272 		case IWM_MCC_CHUB_UPDATE_CMD: {
5273 			struct iwm_mcc_chub_notif *notif;
5274 			notif = (void *)pkt->data;
5275 
5276 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5277 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5278 			sc->sc_fw_mcc[2] = '\0';
5279 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5280 			    "fw source %d sent CC '%s'\n",
5281 			    notif->source_id, sc->sc_fw_mcc);
5282 			break;
5283 		}
5284 
5285 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5286 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5287 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5288 			struct iwm_dts_measurement_notif_v1 *notif;
5289 
5290 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5291 				device_printf(sc->sc_dev,
5292 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5293 				break;
5294 			}
5295 			notif = (void *)pkt->data;
5296 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5297 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5298 			    notif->temp);
5299 			break;
5300 		}
5301 
5302 		case IWM_PHY_CONFIGURATION_CMD:
5303 		case IWM_TX_ANT_CONFIGURATION_CMD:
5304 		case IWM_ADD_STA:
5305 		case IWM_MAC_CONTEXT_CMD:
5306 		case IWM_REPLY_SF_CFG_CMD:
5307 		case IWM_POWER_TABLE_CMD:
5308 		case IWM_LTR_CONFIG:
5309 		case IWM_PHY_CONTEXT_CMD:
5310 		case IWM_BINDING_CONTEXT_CMD:
5311 		case IWM_TIME_EVENT_CMD:
5312 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5313 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5314 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5315 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5316 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5317 		case IWM_REPLY_BEACON_FILTERING_CMD:
5318 		case IWM_MAC_PM_POWER_TABLE:
5319 		case IWM_TIME_QUOTA_CMD:
5320 		case IWM_REMOVE_STA:
5321 		case IWM_TXPATH_FLUSH:
5322 		case IWM_LQ_CMD:
5323 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5324 				 IWM_FW_PAGING_BLOCK_CMD):
5325 		case IWM_BT_CONFIG:
5326 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5327 			cresp = (void *)pkt->data;
5328 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5329 				memcpy(sc->sc_cmd_resp,
5330 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5331 			}
5332 			break;
5333 
5334 		/* ignore */
5335 		case IWM_PHY_DB_CMD:
5336 			break;
5337 
5338 		case IWM_INIT_COMPLETE_NOTIF:
5339 			break;
5340 
5341 		case IWM_SCAN_OFFLOAD_COMPLETE:
5342 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5343 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5344 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5345 				ieee80211_runtask(ic, &sc->sc_es_task);
5346 			}
5347 			break;
5348 
5349 		case IWM_SCAN_ITERATION_COMPLETE: {
5350 			struct iwm_lmac_scan_complete_notif *notif;
5351 			notif = (void *)pkt->data;
5352 			break;
5353 		}
5354 
5355 		case IWM_SCAN_COMPLETE_UMAC:
5356 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5357 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5358 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5359 				ieee80211_runtask(ic, &sc->sc_es_task);
5360 			}
5361 			break;
5362 
5363 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5364 			struct iwm_umac_scan_iter_complete_notif *notif;
5365 			notif = (void *)pkt->data;
5366 
5367 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5368 			    "complete, status=0x%x, %d channels scanned\n",
5369 			    notif->status, notif->scanned_channels);
5370 			break;
5371 		}
5372 
5373 		case IWM_REPLY_ERROR: {
5374 			struct iwm_error_resp *resp;
5375 			resp = (void *)pkt->data;
5376 
5377 			device_printf(sc->sc_dev,
5378 			    "firmware error 0x%x, cmd 0x%x\n",
5379 			    le32toh(resp->error_type),
5380 			    resp->cmd_id);
5381 			break;
5382 		}
5383 
5384 		case IWM_TIME_EVENT_NOTIFICATION:
5385 			iwm_mvm_rx_time_event_notif(sc, pkt);
5386 			break;
5387 
5388 		/*
5389 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5390 		 * messages. Just ignore them for now.
5391 		 */
5392 		case IWM_DEBUG_LOG_MSG:
5393 			break;
5394 
5395 		case IWM_MCAST_FILTER_CMD:
5396 			break;
5397 
5398 		case IWM_SCD_QUEUE_CFG: {
5399 			struct iwm_scd_txq_cfg_rsp *rsp;
5400 			rsp = (void *)pkt->data;
5401 
5402 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5403 			    "queue cfg token=0x%x sta_id=%d "
5404 			    "tid=%d scd_queue=%d\n",
5405 			    rsp->token, rsp->sta_id, rsp->tid,
5406 			    rsp->scd_queue);
5407 			break;
5408 		}
5409 
5410 		default:
5411 			device_printf(sc->sc_dev,
5412 			    "frame %d/%d %x UNHANDLED (this should "
5413 			    "not happen)\n", qid & ~0x80, idx,
5414 			    pkt->len_n_flags);
5415 			break;
5416 		}
5417 
5418 		/*
5419 		 * Why test bit 0x80?  The Linux driver:
5420 		 *
5421 		 * There is one exception:  uCode sets bit 15 when it
5422 		 * originates the response/notification, i.e. when the
5423 		 * response/notification is not a direct response to a
5424 		 * command sent by the driver.  For example, uCode issues
5425 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5426 		 * it is not a direct response to any driver command.
5427 		 *
5428 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5429 		 * uses a slightly different format for pkt->hdr, and "qid"
5430 		 * is actually the upper byte of a two-byte field.
5431 		 */
5432 		if (!(qid & (1 << 7)))
5433 			iwm_cmd_done(sc, pkt);
5434 
5435 		offset = nextoff;
5436 	}
5437 	if (stolen)
5438 		m_freem(m);
5439 #undef HAVEROOM
5440 }
5441 
5442 /*
5443  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5444  * Basic structure from if_iwn
5445  */
5446 static void
5447 iwm_notif_intr(struct iwm_softc *sc)
5448 {
5449 	uint16_t hw;
5450 
5451 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5452 	    BUS_DMASYNC_POSTREAD);
5453 
5454 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5455 
5456 	/*
5457 	 * Process responses
5458 	 */
5459 	while (sc->rxq.cur != hw) {
5460 		struct iwm_rx_ring *ring = &sc->rxq;
5461 		struct iwm_rx_data *data = &ring->data[ring->cur];
5462 
5463 		bus_dmamap_sync(ring->data_dmat, data->map,
5464 		    BUS_DMASYNC_POSTREAD);
5465 
5466 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5467 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5468 		iwm_handle_rxb(sc, data->m);
5469 
5470 		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5471 	}
5472 
5473 	/*
5474 	 * Tell the firmware that it can reuse the ring entries that
5475 	 * we have just processed.
5476 	 * Seems like the hardware gets upset unless we align
5477 	 * the write by 8??
5478 	 */
5479 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5480 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5481 }
5482 
5483 static void
5484 iwm_intr(void *arg)
5485 {
5486 	struct iwm_softc *sc = arg;
5487 	int handled = 0;
5488 	int r1, r2, rv = 0;
5489 	int isperiodic = 0;
5490 
5491 #if defined(__DragonFly__)
5492 	if (sc->sc_mem == NULL) {
5493 		kprintf("iwm_intr: detached\n");
5494 		return;
5495 	}
5496 #endif
5497 	IWM_LOCK(sc);
5498 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5499 
5500 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5501 		uint32_t *ict = sc->ict_dma.vaddr;
5502 		int tmp;
5503 
5504 		tmp = htole32(ict[sc->ict_cur]);
5505 		if (!tmp)
5506 			goto out_ena;
5507 
5508 		/*
5509 		 * ok, there was something.  keep plowing until we have all.
5510 		 */
5511 		r1 = r2 = 0;
5512 		while (tmp) {
5513 			r1 |= tmp;
5514 			ict[sc->ict_cur] = 0;
5515 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5516 			tmp = htole32(ict[sc->ict_cur]);
5517 		}
5518 
5519 		/* this is where the fun begins.  don't ask */
5520 		if (r1 == 0xffffffff)
5521 			r1 = 0;
5522 
5523 		/* i am not expected to understand this */
5524 		if (r1 & 0xc0000)
5525 			r1 |= 0x8000;
5526 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5527 	} else {
5528 		r1 = IWM_READ(sc, IWM_CSR_INT);
5529 		/* "hardware gone" (where, fishing?) */
5530 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5531 			goto out;
5532 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5533 	}
5534 	if (r1 == 0 && r2 == 0) {
5535 		goto out_ena;
5536 	}
5537 
5538 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5539 
5540 	/* Safely ignore these bits for debug checks below */
5541 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5542 
5543 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5544 		int i;
5545 		struct ieee80211com *ic = &sc->sc_ic;
5546 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5547 
5548 #ifdef IWM_DEBUG
5549 		iwm_nic_error(sc);
5550 #endif
5551 		/* Dump driver status (TX and RX rings) while we're here. */
5552 		device_printf(sc->sc_dev, "driver status:\n");
5553 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5554 			struct iwm_tx_ring *ring = &sc->txq[i];
5555 			device_printf(sc->sc_dev,
5556 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5557 			    "queued=%-3d\n",
5558 			    i, ring->qid, ring->cur, ring->queued);
5559 		}
5560 		device_printf(sc->sc_dev,
5561 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5562 		device_printf(sc->sc_dev,
5563 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5564 
5565 		/* Reset our firmware state tracking. */
5566 		sc->sc_firmware_state = 0;
5567 		/* Don't stop the device; just do a VAP restart */
5568 		IWM_UNLOCK(sc);
5569 
5570 		if (vap == NULL) {
5571 			kprintf("%s: null vap\n", __func__);
5572 			return;
5573 		}
5574 
5575 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5576 		    "restarting\n", __func__, vap->iv_state);
5577 
5578 		ieee80211_restart_all(ic);
5579 		return;
5580 	}
5581 
5582 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5583 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5584 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5585 		iwm_stop(sc);
5586 		rv = 1;
5587 		goto out;
5588 	}
5589 
5590 	/* firmware chunk loaded */
5591 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5592 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5593 		handled |= IWM_CSR_INT_BIT_FH_TX;
5594 		sc->sc_fw_chunk_done = 1;
5595 		wakeup(&sc->sc_fw);
5596 	}
5597 
5598 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5599 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5600 		if (iwm_check_rfkill(sc)) {
5601 			device_printf(sc->sc_dev,
5602 			    "%s: rfkill switch, disabling interface\n",
5603 			    __func__);
5604 			iwm_stop(sc);
5605 		}
5606 	}
5607 
5608 	/*
5609 	 * The Linux driver uses periodic interrupts to avoid races.
5610 	 * We cargo-cult like it's going out of fashion.
5611 	 */
5612 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5613 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5614 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5615 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5616 			IWM_WRITE_1(sc,
5617 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5618 		isperiodic = 1;
5619 	}
5620 
5621 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5622 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5623 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5624 
5625 		iwm_notif_intr(sc);
5626 
5627 		/* enable periodic interrupt, see above */
5628 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5629 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5630 			    IWM_CSR_INT_PERIODIC_ENA);
5631 	}
5632 
5633 	if (__predict_false(r1 & ~handled))
5634 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5635 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5636 	rv = 1;
5637 
5638  out_ena:
5639 	iwm_restore_interrupts(sc);
5640  out:
5641 	IWM_UNLOCK(sc);
5642 	return;
5643 }
5644 
5645 /*
5646  * Autoconf glue-sniffing
5647  */
5648 #define	PCI_VENDOR_INTEL		0x8086
5649 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5650 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5651 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5652 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5653 #define	PCI_PRODUCT_INTEL_WL_3168	0x24fb
5654 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5655 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5656 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5657 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5658 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5659 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5660 #define	PCI_PRODUCT_INTEL_WL_8265	0x24fd
5661 
5662 static const struct iwm_devices {
5663 	uint16_t		device;
5664 	const struct iwm_cfg	*cfg;
5665 } iwm_devices[] = {
5666 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5667 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5668 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5669 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5670 	{ PCI_PRODUCT_INTEL_WL_3168,   &iwm3168_cfg },
5671 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5672 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5673 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5674 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5675 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5676 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5677 	{ PCI_PRODUCT_INTEL_WL_8265,   &iwm8265_cfg },
5678 };
5679 
5680 static int
5681 iwm_probe(device_t dev)
5682 {
5683 	int i;
5684 
5685 	for (i = 0; i < nitems(iwm_devices); i++) {
5686 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5687 		    pci_get_device(dev) == iwm_devices[i].device) {
5688 			device_set_desc(dev, iwm_devices[i].cfg->name);
5689 			return (BUS_PROBE_DEFAULT);
5690 		}
5691 	}
5692 
5693 	return (ENXIO);
5694 }
5695 
5696 static int
5697 iwm_dev_check(device_t dev)
5698 {
5699 	struct iwm_softc *sc;
5700 	uint16_t devid;
5701 	int i;
5702 
5703 	sc = device_get_softc(dev);
5704 
5705 	devid = pci_get_device(dev);
5706 	for (i = 0; i < NELEM(iwm_devices); i++) {
5707 		if (iwm_devices[i].device == devid) {
5708 			sc->cfg = iwm_devices[i].cfg;
5709 			return (0);
5710 		}
5711 	}
5712 	device_printf(dev, "unknown adapter type\n");
5713 	return ENXIO;
5714 }
5715 
5716 /* PCI registers */
5717 #define PCI_CFG_RETRY_TIMEOUT	0x041
5718 
5719 static int
5720 iwm_pci_attach(device_t dev)
5721 {
5722 	struct iwm_softc *sc;
5723 	int count, error, rid;
5724 	uint16_t reg;
5725 #if defined(__DragonFly__)
5726 	int irq_flags;
5727 #endif
5728 
5729 	sc = device_get_softc(dev);
5730 
5731 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5732 	 * PCI Tx retries from interfering with C3 CPU state */
5733 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5734 
5735 	/* Enable bus-mastering and hardware bug workaround. */
5736 	pci_enable_busmaster(dev);
5737 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5738 	/* if !MSI */
5739 	if (reg & PCIM_STATUS_INTxSTATE) {
5740 		reg &= ~PCIM_STATUS_INTxSTATE;
5741 	}
5742 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5743 
5744 	rid = PCIR_BAR(0);
5745 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5746 	    RF_ACTIVE);
5747 	if (sc->sc_mem == NULL) {
5748 		device_printf(sc->sc_dev, "can't map mem space\n");
5749 		return (ENXIO);
5750 	}
5751 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5752 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5753 
5754 	/* Install interrupt handler. */
5755 	count = 1;
5756 	rid = 0;
5757 #if defined(__DragonFly__)
5758 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5759 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5760 #else
5761 	if (pci_alloc_msi(dev, &count) == 0)
5762 		rid = 1;
5763 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5764 	    (rid != 0 ? 0 : RF_SHAREABLE));
5765 #endif
5766 	if (sc->sc_irq == NULL) {
5767 		device_printf(dev, "can't map interrupt\n");
5768 			return (ENXIO);
5769 	}
5770 #if defined(__DragonFly__)
5771 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5772 			       iwm_intr, sc, &sc->sc_ih,
5773 			       &wlan_global_serializer);
5774 #else
5775 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5776 	    NULL, iwm_intr, sc, &sc->sc_ih);
5777 #endif
5778 	if (sc->sc_ih == NULL) {
5779 		device_printf(dev, "can't establish interrupt");
5780 #if defined(__DragonFly__)
5781                 pci_release_msi(dev);
5782 #endif
5783 			return (ENXIO);
5784 	}
5785 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5786 
5787 	return (0);
5788 }
5789 
5790 static void
5791 iwm_pci_detach(device_t dev)
5792 {
5793 	struct iwm_softc *sc = device_get_softc(dev);
5794 
5795 	if (sc->sc_irq != NULL) {
5796 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5797 		bus_release_resource(dev, SYS_RES_IRQ,
5798 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5799 		pci_release_msi(dev);
5800 #if defined(__DragonFly__)
5801 		sc->sc_irq = NULL;
5802 #endif
5803         }
5804 	if (sc->sc_mem != NULL) {
5805 		bus_release_resource(dev, SYS_RES_MEMORY,
5806 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5807 #if defined(__DragonFly__)
5808 		sc->sc_mem = NULL;
5809 #endif
5810 	}
5811 }
5812 
5813 
5814 
5815 static int
5816 iwm_attach(device_t dev)
5817 {
5818 	struct iwm_softc *sc = device_get_softc(dev);
5819 	struct ieee80211com *ic = &sc->sc_ic;
5820 	int error;
5821 	int txq_i, i;
5822 
5823 	sc->sc_dev = dev;
5824 	sc->sc_attached = 1;
5825 	IWM_LOCK_INIT(sc);
5826 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5827 #if defined(__DragonFly__)
5828 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5829 #else
5830 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5831 #endif
5832 	callout_init(&sc->sc_led_blink_to);
5833 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5834 
5835 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5836 	if (sc->sc_notif_wait == NULL) {
5837 		device_printf(dev, "failed to init notification wait struct\n");
5838 		goto fail;
5839 	}
5840 
5841 	sc->sf_state = IWM_SF_UNINIT;
5842 
5843 	/* Init phy db */
5844 	sc->sc_phy_db = iwm_phy_db_init(sc);
5845 	if (!sc->sc_phy_db) {
5846 		device_printf(dev, "Cannot init phy_db\n");
5847 		goto fail;
5848 	}
5849 
5850 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5851 	sc->last_ebs_successful = TRUE;
5852 
5853 	/* PCI attach */
5854 	error = iwm_pci_attach(dev);
5855 	if (error != 0)
5856 		goto fail;
5857 
5858 	sc->sc_wantresp = -1;
5859 
5860 	/* Match device id */
5861 	error = iwm_dev_check(dev);
5862 	if (error != 0)
5863 		goto fail;
5864 
5865 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5866 	/*
5867 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5868 	 * changed, and now the revision step also includes bit 0-1 (no more
5869 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5870 	 * in the old format.
5871 	 */
5872 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5873 		int ret;
5874 		uint32_t hw_step;
5875 
5876 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5877 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5878 
5879 		if (iwm_prepare_card_hw(sc) != 0) {
5880 			device_printf(dev, "could not initialize hardware\n");
5881 			goto fail;
5882 		}
5883 
5884 		/*
5885 		 * In order to recognize C step the driver should read the
5886 		 * chip version id located at the AUX bus MISC address.
5887 		 */
5888 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5889 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5890 		DELAY(2);
5891 
5892 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5893 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5894 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5895 				   25000);
5896 		if (!ret) {
5897 			device_printf(sc->sc_dev,
5898 			    "Failed to wake up the nic\n");
5899 			goto fail;
5900 		}
5901 
5902 		if (iwm_nic_lock(sc)) {
5903 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5904 			hw_step |= IWM_ENABLE_WFPM;
5905 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5906 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5907 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5908 			if (hw_step == 0x3)
5909 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5910 						(IWM_SILICON_C_STEP << 2);
5911 			iwm_nic_unlock(sc);
5912 		} else {
5913 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5914 			goto fail;
5915 		}
5916 	}
5917 
5918 	/* special-case 7265D, it has the same PCI IDs. */
5919 	if (sc->cfg == &iwm7265_cfg &&
5920 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5921 		sc->cfg = &iwm7265d_cfg;
5922 	}
5923 
5924 	/* Allocate DMA memory for firmware transfers. */
5925 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5926 		device_printf(dev, "could not allocate memory for firmware\n");
5927 		goto fail;
5928 	}
5929 
5930 	/* Allocate "Keep Warm" page. */
5931 	if ((error = iwm_alloc_kw(sc)) != 0) {
5932 		device_printf(dev, "could not allocate keep warm page\n");
5933 		goto fail;
5934 	}
5935 
5936 	/* We use ICT interrupts */
5937 	if ((error = iwm_alloc_ict(sc)) != 0) {
5938 		device_printf(dev, "could not allocate ICT table\n");
5939 		goto fail;
5940 	}
5941 
5942 	/* Allocate TX scheduler "rings". */
5943 	if ((error = iwm_alloc_sched(sc)) != 0) {
5944 		device_printf(dev, "could not allocate TX scheduler rings\n");
5945 		goto fail;
5946 	}
5947 
5948 	/* Allocate TX rings */
5949 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5950 		if ((error = iwm_alloc_tx_ring(sc,
5951 		    &sc->txq[txq_i], txq_i)) != 0) {
5952 			device_printf(dev,
5953 			    "could not allocate TX ring %d\n",
5954 			    txq_i);
5955 			goto fail;
5956 		}
5957 	}
5958 
5959 	/* Allocate RX ring. */
5960 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5961 		device_printf(dev, "could not allocate RX ring\n");
5962 		goto fail;
5963 	}
5964 
5965 	/* Clear pending interrupts. */
5966 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5967 
5968 	ic->ic_softc = sc;
5969 	ic->ic_name = device_get_nameunit(sc->sc_dev);
5970 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
5971 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
5972 
5973 	/* Set device capabilities. */
5974 	ic->ic_caps =
5975 	    IEEE80211_C_STA |
5976 	    IEEE80211_C_WPA |		/* WPA/RSN */
5977 	    IEEE80211_C_WME |
5978 	    IEEE80211_C_PMGT |
5979 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
5980 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
5981 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
5982 	    ;
5983 	/* Advertise full-offload scanning */
5984 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5985 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5986 		sc->sc_phyctxt[i].id = i;
5987 		sc->sc_phyctxt[i].color = 0;
5988 		sc->sc_phyctxt[i].ref = 0;
5989 		sc->sc_phyctxt[i].channel = NULL;
5990 	}
5991 
5992 	/* Default noise floor */
5993 	sc->sc_noise = -96;
5994 
5995 	/* Max RSSI */
5996 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5997 
5998 #ifdef IWM_DEBUG
5999 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6000 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6001 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6002 #endif
6003 
6004 	error = iwm_read_firmware(sc);
6005 	if (error) {
6006 		goto fail;
6007 	} else if (sc->sc_fw.fw_fp == NULL) {
6008 		/*
6009 		 * XXX Add a solution for properly deferring firmware load
6010 		 *     during bootup.
6011 		 */
6012 		goto fail;
6013 	} else {
6014 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6015 		sc->sc_preinit_hook.ich_arg = sc;
6016 		sc->sc_preinit_hook.ich_desc = "iwm";
6017 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6018 			device_printf(dev,
6019 			    "config_intrhook_establish failed\n");
6020 			goto fail;
6021 		}
6022 	}
6023 
6024 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6025 	    "<-%s\n", __func__);
6026 
6027 	return 0;
6028 
6029 	/* Free allocated memory if something failed during attachment. */
6030 fail:
6031 	iwm_detach_local(sc, 0);
6032 
6033 	return ENXIO;
6034 }
6035 
6036 static int
6037 iwm_is_valid_ether_addr(uint8_t *addr)
6038 {
6039 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6040 
6041 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6042 		return (FALSE);
6043 
6044 	return (TRUE);
6045 }
6046 
6047 static int
6048 iwm_wme_update(struct ieee80211com *ic)
6049 {
6050 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6051 	struct iwm_softc *sc = ic->ic_softc;
6052 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6053 	struct iwm_vap *ivp = IWM_VAP(vap);
6054 	struct iwm_node *in;
6055 	struct wmeParams tmp[WME_NUM_AC];
6056 	int aci, error;
6057 
6058 	if (vap == NULL)
6059 		return (0);
6060 
6061 	IEEE80211_LOCK(ic);
6062 	for (aci = 0; aci < WME_NUM_AC; aci++)
6063 		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6064 	IEEE80211_UNLOCK(ic);
6065 
6066 	IWM_LOCK(sc);
6067 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6068 		const struct wmeParams *ac = &tmp[aci];
6069 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6070 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6071 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6072 		ivp->queue_params[aci].edca_txop =
6073 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6074 	}
6075 	ivp->have_wme = TRUE;
6076 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6077 		in = IWM_NODE(vap->iv_bss);
6078 		if (in->in_assoc) {
6079 			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6080 				device_printf(sc->sc_dev,
6081 				    "%s: failed to update MAC\n", __func__);
6082 			}
6083 		}
6084 	}
6085 	IWM_UNLOCK(sc);
6086 
6087 	return (0);
6088 #undef IWM_EXP2
6089 }
6090 
6091 static void
6092 iwm_preinit(void *arg)
6093 {
6094 	struct iwm_softc *sc = arg;
6095 	device_t dev = sc->sc_dev;
6096 	struct ieee80211com *ic = &sc->sc_ic;
6097 	int error;
6098 
6099 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6100 	    "->%s\n", __func__);
6101 
6102 	IWM_LOCK(sc);
6103 	if ((error = iwm_start_hw(sc)) != 0) {
6104 		device_printf(dev, "could not initialize hardware\n");
6105 		IWM_UNLOCK(sc);
6106 		goto fail;
6107 	}
6108 
6109 	error = iwm_run_init_mvm_ucode(sc, 1);
6110 	iwm_stop_device(sc);
6111 	if (error) {
6112 		IWM_UNLOCK(sc);
6113 		goto fail;
6114 	}
6115 	device_printf(dev,
6116 	    "hw rev 0x%x, fw ver %s, address %s\n",
6117 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6118 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6119 
6120 	/* not all hardware can do 5GHz band */
6121 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6122 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6123 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6124 	IWM_UNLOCK(sc);
6125 
6126 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6127 	    ic->ic_channels);
6128 
6129 	/*
6130 	 * At this point we've committed - if we fail to do setup,
6131 	 * we now also have to tear down the net80211 state.
6132 	 */
6133 	ieee80211_ifattach(ic);
6134 	ic->ic_vap_create = iwm_vap_create;
6135 	ic->ic_vap_delete = iwm_vap_delete;
6136 	ic->ic_raw_xmit = iwm_raw_xmit;
6137 	ic->ic_node_alloc = iwm_node_alloc;
6138 	ic->ic_scan_start = iwm_scan_start;
6139 	ic->ic_scan_end = iwm_scan_end;
6140 	ic->ic_update_mcast = iwm_update_mcast;
6141 	ic->ic_getradiocaps = iwm_init_channel_map;
6142 	ic->ic_set_channel = iwm_set_channel;
6143 	ic->ic_scan_curchan = iwm_scan_curchan;
6144 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6145 	ic->ic_wme.wme_update = iwm_wme_update;
6146 	ic->ic_parent = iwm_parent;
6147 	ic->ic_transmit = iwm_transmit;
6148 	iwm_radiotap_attach(sc);
6149 	if (bootverbose)
6150 		ieee80211_announce(ic);
6151 
6152 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6153 	    "<-%s\n", __func__);
6154 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6155 
6156 	return;
6157 fail:
6158 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6159 	iwm_detach_local(sc, 0);
6160 }
6161 
6162 /*
6163  * Attach the interface to 802.11 radiotap.
6164  */
6165 static void
6166 iwm_radiotap_attach(struct iwm_softc *sc)
6167 {
6168         struct ieee80211com *ic = &sc->sc_ic;
6169 
6170 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6171 	    "->%s begin\n", __func__);
6172         ieee80211_radiotap_attach(ic,
6173             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6174                 IWM_TX_RADIOTAP_PRESENT,
6175             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6176                 IWM_RX_RADIOTAP_PRESENT);
6177 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6178 	    "->%s end\n", __func__);
6179 }
6180 
6181 static struct ieee80211vap *
6182 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6183     enum ieee80211_opmode opmode, int flags,
6184     const uint8_t bssid[IEEE80211_ADDR_LEN],
6185     const uint8_t mac[IEEE80211_ADDR_LEN])
6186 {
6187 	struct iwm_vap *ivp;
6188 	struct ieee80211vap *vap;
6189 
6190 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6191 		return NULL;
6192 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6193 	vap = &ivp->iv_vap;
6194 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6195 	vap->iv_bmissthreshold = 10;            /* override default */
6196 	/* Override with driver methods. */
6197 	ivp->iv_newstate = vap->iv_newstate;
6198 	vap->iv_newstate = iwm_newstate;
6199 
6200 	ivp->id = IWM_DEFAULT_MACID;
6201 	ivp->color = IWM_DEFAULT_COLOR;
6202 
6203 	ivp->have_wme = FALSE;
6204 	ivp->ps_disabled = FALSE;
6205 
6206 	ieee80211_ratectl_init(vap);
6207 	/* Complete setup. */
6208 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6209 	    mac);
6210 	ic->ic_opmode = opmode;
6211 
6212 	return vap;
6213 }
6214 
6215 static void
6216 iwm_vap_delete(struct ieee80211vap *vap)
6217 {
6218 	struct iwm_vap *ivp = IWM_VAP(vap);
6219 
6220 	ieee80211_ratectl_deinit(vap);
6221 	ieee80211_vap_detach(vap);
6222 	kfree(ivp, M_80211_VAP);
6223 }
6224 
6225 static void
6226 iwm_xmit_queue_drain(struct iwm_softc *sc)
6227 {
6228 	struct mbuf *m;
6229 	struct ieee80211_node *ni;
6230 
6231 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6232 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6233 		ieee80211_free_node(ni);
6234 		m_freem(m);
6235 	}
6236 }
6237 
6238 static void
6239 iwm_scan_start(struct ieee80211com *ic)
6240 {
6241 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6242 	struct iwm_softc *sc = ic->ic_softc;
6243 	int error;
6244 
6245 	IWM_LOCK(sc);
6246 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6247 		/* This should not be possible */
6248 		device_printf(sc->sc_dev,
6249 		    "%s: Previous scan not completed yet\n", __func__);
6250 	}
6251 	if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6252 		error = iwm_mvm_umac_scan(sc);
6253 	else
6254 		error = iwm_mvm_lmac_scan(sc);
6255 	if (error != 0) {
6256 		device_printf(sc->sc_dev, "could not initiate scan\n");
6257 		IWM_UNLOCK(sc);
6258 		ieee80211_cancel_scan(vap);
6259 	} else {
6260 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6261 		iwm_led_blink_start(sc);
6262 		IWM_UNLOCK(sc);
6263 	}
6264 }
6265 
6266 static void
6267 iwm_scan_end(struct ieee80211com *ic)
6268 {
6269 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6270 	struct iwm_softc *sc = ic->ic_softc;
6271 
6272 	IWM_LOCK(sc);
6273 	iwm_led_blink_stop(sc);
6274 	if (vap->iv_state == IEEE80211_S_RUN)
6275 		iwm_mvm_led_enable(sc);
6276 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6277 		/*
6278 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6279 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6280 		 * taskqueue.
6281 		 */
6282 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6283 		iwm_mvm_scan_stop_wait(sc);
6284 	}
6285 	IWM_UNLOCK(sc);
6286 
6287 	/*
6288 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6289 	 * This is to make sure that it won't call ieee80211_scan_done
6290 	 * when we have already started the next scan.
6291 	 */
6292 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6293 }
6294 
6295 static void
6296 iwm_update_mcast(struct ieee80211com *ic)
6297 {
6298 }
6299 
6300 static void
6301 iwm_set_channel(struct ieee80211com *ic)
6302 {
6303 }
6304 
6305 static void
6306 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6307 {
6308 }
6309 
6310 static void
6311 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6312 {
6313 	return;
6314 }
6315 
6316 void
6317 iwm_init_task(void *arg1)
6318 {
6319 	struct iwm_softc *sc = arg1;
6320 
6321 	IWM_LOCK(sc);
6322 	while (sc->sc_flags & IWM_FLAG_BUSY) {
6323 #if defined(__DragonFly__)
6324 		lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6325 #else
6326 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6327 #endif
6328 }
6329 	sc->sc_flags |= IWM_FLAG_BUSY;
6330 	iwm_stop(sc);
6331 	if (sc->sc_ic.ic_nrunning > 0)
6332 		iwm_init(sc);
6333 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6334 	wakeup(&sc->sc_flags);
6335 	IWM_UNLOCK(sc);
6336 }
6337 
6338 static int
6339 iwm_resume(device_t dev)
6340 {
6341 	struct iwm_softc *sc = device_get_softc(dev);
6342 	int do_reinit = 0;
6343 
6344 	/*
6345 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6346 	 * PCI Tx retries from interfering with C3 CPU state.
6347 	 */
6348 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6349 
6350 	if (!sc->sc_attached)
6351 		return 0;
6352 
6353 	iwm_init_task(device_get_softc(dev));
6354 
6355 	IWM_LOCK(sc);
6356 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6357 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6358 		do_reinit = 1;
6359 	}
6360 	IWM_UNLOCK(sc);
6361 
6362 	if (do_reinit)
6363 		ieee80211_resume_all(&sc->sc_ic);
6364 
6365 	return 0;
6366 }
6367 
6368 static int
6369 iwm_suspend(device_t dev)
6370 {
6371 	int do_stop = 0;
6372 	struct iwm_softc *sc = device_get_softc(dev);
6373 
6374 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6375 
6376 	if (!sc->sc_attached)
6377 		return (0);
6378 
6379 	ieee80211_suspend_all(&sc->sc_ic);
6380 
6381 	if (do_stop) {
6382 		IWM_LOCK(sc);
6383 		iwm_stop(sc);
6384 		sc->sc_flags |= IWM_FLAG_SCANNING;
6385 		IWM_UNLOCK(sc);
6386 	}
6387 
6388 	return (0);
6389 }
6390 
6391 static int
6392 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6393 {
6394 	struct iwm_fw_info *fw = &sc->sc_fw;
6395 	device_t dev = sc->sc_dev;
6396 	int i;
6397 
6398 	if (!sc->sc_attached)
6399 		return 0;
6400 	sc->sc_attached = 0;
6401 	if (do_net80211) {
6402 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6403 	}
6404 	iwm_stop_device(sc);
6405 	if (do_net80211) {
6406 		IWM_LOCK(sc);
6407 		iwm_xmit_queue_drain(sc);
6408 		IWM_UNLOCK(sc);
6409 		ieee80211_ifdetach(&sc->sc_ic);
6410 	}
6411 	callout_drain(&sc->sc_led_blink_to);
6412 	callout_drain(&sc->sc_watchdog_to);
6413 
6414 	iwm_phy_db_free(sc->sc_phy_db);
6415 	sc->sc_phy_db = NULL;
6416 
6417 	iwm_free_nvm_data(sc->nvm_data);
6418 
6419 	/* Free descriptor rings */
6420 	iwm_free_rx_ring(sc, &sc->rxq);
6421 	for (i = 0; i < nitems(sc->txq); i++)
6422 		iwm_free_tx_ring(sc, &sc->txq[i]);
6423 
6424 	/* Free firmware */
6425 	if (fw->fw_fp != NULL)
6426 		iwm_fw_info_free(fw);
6427 
6428 	/* Free scheduler */
6429 	iwm_dma_contig_free(&sc->sched_dma);
6430 	iwm_dma_contig_free(&sc->ict_dma);
6431 	iwm_dma_contig_free(&sc->kw_dma);
6432 	iwm_dma_contig_free(&sc->fw_dma);
6433 
6434 	iwm_free_fw_paging(sc);
6435 
6436 	/* Finished with the hardware - detach things */
6437 	iwm_pci_detach(dev);
6438 
6439 	if (sc->sc_notif_wait != NULL) {
6440 		iwm_notification_wait_free(sc->sc_notif_wait);
6441 		sc->sc_notif_wait = NULL;
6442 	}
6443 
6444 	IWM_LOCK_DESTROY(sc);
6445 
6446 	return (0);
6447 }
6448 
6449 static int
6450 iwm_detach(device_t dev)
6451 {
6452 	struct iwm_softc *sc = device_get_softc(dev);
6453 
6454 	return (iwm_detach_local(sc, 1));
6455 }
6456 
6457 static device_method_t iwm_pci_methods[] = {
6458         /* Device interface */
6459         DEVMETHOD(device_probe,         iwm_probe),
6460         DEVMETHOD(device_attach,        iwm_attach),
6461         DEVMETHOD(device_detach,        iwm_detach),
6462         DEVMETHOD(device_suspend,       iwm_suspend),
6463         DEVMETHOD(device_resume,        iwm_resume),
6464 
6465         DEVMETHOD_END
6466 };
6467 
6468 static driver_t iwm_pci_driver = {
6469         "iwm",
6470         iwm_pci_methods,
6471         sizeof (struct iwm_softc)
6472 };
6473 
6474 static devclass_t iwm_devclass;
6475 
6476 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6477 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6478 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6479 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6480