xref: /dragonfly/sys/dev/netif/iwm/if_iwm.c (revision 806343b9)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *				DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *	 changes to remove per-device network interface (DragonFly has not
110  *	 caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *	malloc -> kmalloc	(in particular, changing improper M_NOWAIT
114  *				specifications to M_INTWAIT.  We still don't
115  *				understand why FreeBSD uses M_NOWAIT for
116  *				critical must-not-fail kmalloc()s).
117  *	free -> kfree
118  *	printf -> kprintf
119  *	(bug fix) memset in iwm_reset_rx_ring.
120  *	(debug)   added several kprintf()s on error
121  *
122  *	header file paths (DFly allows localized path specifications).
123  *	minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *	(safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *	packet counters
128  *	msleep -> lksleep
129  *	mtx -> lk  (mtx functions -> lockmgr functions)
130  *	callout differences
131  *	taskqueue differences
132  *	MSI differences
133  *	bus_setup_intr() differences
134  *	minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138 
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150 
151 #include <machine/endian.h>
152 
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155 
156 #include <net/bpf.h>
157 
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164 
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169 
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174 
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sf.h"
189 #include "if_iwm_sta.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 #include "if_iwm_fw.h"
193 
194 const uint8_t iwm_nvm_channels[] = {
195 	/* 2.4 GHz */
196 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
197 	/* 5 GHz */
198 	36, 40, 44, 48, 52, 56, 60, 64,
199 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
200 	149, 153, 157, 161, 165
201 };
202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
203     "IWM_NUM_CHANNELS is too small");
204 
205 const uint8_t iwm_nvm_channels_8000[] = {
206 	/* 2.4 GHz */
207 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
208 	/* 5 GHz */
209 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
210 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
211 	149, 153, 157, 161, 165, 169, 173, 177, 181
212 };
213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
214     "IWM_NUM_CHANNELS_8000 is too small");
215 
216 #define IWM_NUM_2GHZ_CHANNELS	14
217 #define IWM_N_HW_ADDR_MASK	0xF
218 
219 /*
220  * XXX For now, there's simply a fixed set of rate table entries
221  * that are populated.
222  */
223 const struct iwm_rate {
224 	uint8_t rate;
225 	uint8_t plcp;
226 } iwm_rates[] = {
227 	{   2,	IWM_RATE_1M_PLCP  },
228 	{   4,	IWM_RATE_2M_PLCP  },
229 	{  11,	IWM_RATE_5M_PLCP  },
230 	{  22,	IWM_RATE_11M_PLCP },
231 	{  12,	IWM_RATE_6M_PLCP  },
232 	{  18,	IWM_RATE_9M_PLCP  },
233 	{  24,	IWM_RATE_12M_PLCP },
234 	{  36,	IWM_RATE_18M_PLCP },
235 	{  48,	IWM_RATE_24M_PLCP },
236 	{  72,	IWM_RATE_36M_PLCP },
237 	{  96,	IWM_RATE_48M_PLCP },
238 	{ 108,	IWM_RATE_54M_PLCP },
239 };
240 #define IWM_RIDX_CCK	0
241 #define IWM_RIDX_OFDM	4
242 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
245 
246 struct iwm_nvm_section {
247 	uint16_t length;
248 	uint8_t *data;
249 };
250 
251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
252 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
253 
254 struct iwm_mvm_alive_data {
255 	int valid;
256 	uint32_t scd_base_addr;
257 };
258 
259 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
260 static int	iwm_firmware_store_section(struct iwm_softc *,
261                                            enum iwm_ucode_type,
262                                            const uint8_t *, size_t);
263 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
264 static void	iwm_fw_info_free(struct iwm_fw_info *);
265 static int	iwm_read_firmware(struct iwm_softc *);
266 #if !defined(__DragonFly__)
267 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
268 #endif
269 static int	iwm_alloc_fwmem(struct iwm_softc *);
270 static int	iwm_alloc_sched(struct iwm_softc *);
271 static int	iwm_alloc_kw(struct iwm_softc *);
272 static int	iwm_alloc_ict(struct iwm_softc *);
273 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
277                                   int);
278 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 static void	iwm_enable_interrupts(struct iwm_softc *);
281 static void	iwm_restore_interrupts(struct iwm_softc *);
282 static void	iwm_disable_interrupts(struct iwm_softc *);
283 static void	iwm_ict_reset(struct iwm_softc *);
284 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
285 static void	iwm_stop_device(struct iwm_softc *);
286 static void	iwm_mvm_nic_config(struct iwm_softc *);
287 static int	iwm_nic_rx_init(struct iwm_softc *);
288 static int	iwm_nic_tx_init(struct iwm_softc *);
289 static int	iwm_nic_init(struct iwm_softc *);
290 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
291 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292                                    uint16_t, uint8_t *, uint16_t *);
293 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294 				     uint16_t *, uint32_t);
295 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
296 static void	iwm_add_channel_band(struct iwm_softc *,
297 		    struct ieee80211_channel[], int, int *, int, size_t,
298 		    const uint8_t[]);
299 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
300 		    struct ieee80211_channel[]);
301 static struct iwm_nvm_data *
302 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
303 			   const uint16_t *, const uint16_t *,
304 			   const uint16_t *, const uint16_t *,
305 			   const uint16_t *);
306 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
307 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
308 					       struct iwm_nvm_data *,
309 					       const uint16_t *,
310 					       const uint16_t *);
311 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312 			    const uint16_t *);
313 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
314 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315 				  const uint16_t *);
316 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
317 				   const uint16_t *);
318 static void	iwm_set_radio_cfg(const struct iwm_softc *,
319 				  struct iwm_nvm_data *, uint32_t);
320 static struct iwm_nvm_data *
321 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
322 static int	iwm_nvm_init(struct iwm_softc *);
323 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
324 				      const struct iwm_fw_desc *);
325 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
326 					     bus_addr_t, uint32_t);
327 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
328 						const struct iwm_fw_img *,
329 						int, int *);
330 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
331 					   const struct iwm_fw_img *,
332 					   int, int *);
333 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
334 					       const struct iwm_fw_img *);
335 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
336 					  const struct iwm_fw_img *);
337 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
338 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
339 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
340 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
341                                               enum iwm_ucode_type);
342 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
343 static int	iwm_mvm_config_ltr(struct iwm_softc *sc);
344 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
345 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
346 					    struct iwm_rx_phy_info *);
347 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
348                                       struct iwm_rx_packet *);
349 static int	iwm_get_noise(struct iwm_softc *,
350 		    const struct iwm_mvm_statistics_rx_non_phy *);
351 static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
352 		    struct iwm_rx_packet *);
353 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
354 				    uint32_t, boolean_t);
355 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
356                                          struct iwm_rx_packet *,
357 				         struct iwm_node *);
358 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
359 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
360 #if 0
361 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
362                                  uint16_t);
363 #endif
364 static uint8_t	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
365 			struct mbuf *, struct iwm_tx_cmd *);
366 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
367                        struct ieee80211_node *, int);
368 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
369 			     const struct ieee80211_bpf_params *);
370 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
371 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
372 static struct ieee80211_node *
373 		iwm_node_alloc(struct ieee80211vap *,
374 		               const uint8_t[IEEE80211_ADDR_LEN]);
375 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
376 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
377 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
378 static int	iwm_media_change(struct ifnet *);
379 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
380 static void	iwm_endscan_cb(void *, int);
381 static int	iwm_send_bt_init_conf(struct iwm_softc *);
382 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
383 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
384 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
385 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
386 static int	iwm_init_hw(struct iwm_softc *);
387 static void	iwm_init(struct iwm_softc *);
388 static void	iwm_start(struct iwm_softc *);
389 static void	iwm_stop(struct iwm_softc *);
390 static void	iwm_watchdog(void *);
391 static void	iwm_parent(struct ieee80211com *);
392 #ifdef IWM_DEBUG
393 static const char *
394 		iwm_desc_lookup(uint32_t);
395 static void	iwm_nic_error(struct iwm_softc *);
396 static void	iwm_nic_umac_error(struct iwm_softc *);
397 #endif
398 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
399 static void	iwm_notif_intr(struct iwm_softc *);
400 static void	iwm_intr(void *);
401 static int	iwm_attach(device_t);
402 static int	iwm_is_valid_ether_addr(uint8_t *);
403 static void	iwm_preinit(void *);
404 static int	iwm_detach_local(struct iwm_softc *sc, int);
405 static void	iwm_init_task(void *);
406 static void	iwm_radiotap_attach(struct iwm_softc *);
407 static struct ieee80211vap *
408 		iwm_vap_create(struct ieee80211com *,
409 		               const char [IFNAMSIZ], int,
410 		               enum ieee80211_opmode, int,
411 		               const uint8_t [IEEE80211_ADDR_LEN],
412 		               const uint8_t [IEEE80211_ADDR_LEN]);
413 static void	iwm_vap_delete(struct ieee80211vap *);
414 static void	iwm_xmit_queue_drain(struct iwm_softc *);
415 static void	iwm_scan_start(struct ieee80211com *);
416 static void	iwm_scan_end(struct ieee80211com *);
417 static void	iwm_update_mcast(struct ieee80211com *);
418 static void	iwm_set_channel(struct ieee80211com *);
419 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
420 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
421 static int	iwm_detach(device_t);
422 
423 #if defined(__DragonFly__)
424 static int	iwm_msi_enable = 1;
425 
426 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
427 #endif
428 
429 static int	iwm_lar_disable = 0;
430 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
431 
432 /*
433  * Firmware parser.
434  */
435 
436 static int
437 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
438 {
439 	const struct iwm_fw_cscheme_list *l = (const void *)data;
440 
441 	if (dlen < sizeof(*l) ||
442 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
443 		return EINVAL;
444 
445 	/* we don't actually store anything for now, always use s/w crypto */
446 
447 	return 0;
448 }
449 
450 static int
451 iwm_firmware_store_section(struct iwm_softc *sc,
452     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
453 {
454 	struct iwm_fw_img *fws;
455 	struct iwm_fw_desc *fwone;
456 
457 	if (type >= IWM_UCODE_TYPE_MAX)
458 		return EINVAL;
459 	if (dlen < sizeof(uint32_t))
460 		return EINVAL;
461 
462 	fws = &sc->sc_fw.img[type];
463 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
464 		return EINVAL;
465 
466 	fwone = &fws->sec[fws->fw_count];
467 
468 	/* first 32bit are device load offset */
469 	memcpy(&fwone->offset, data, sizeof(uint32_t));
470 
471 	/* rest is data */
472 	fwone->data = data + sizeof(uint32_t);
473 	fwone->len = dlen - sizeof(uint32_t);
474 
475 	fws->fw_count++;
476 
477 	return 0;
478 }
479 
480 #define IWM_DEFAULT_SCAN_CHANNELS 40
481 
482 struct iwm_tlv_calib_data {
483 	uint32_t ucode_type;
484 	struct iwm_tlv_calib_ctrl calib;
485 } __packed;
486 
487 static int
488 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
489 {
490 	const struct iwm_tlv_calib_data *def_calib = data;
491 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
492 
493 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
494 		device_printf(sc->sc_dev,
495 		    "Wrong ucode_type %u for default "
496 		    "calibration.\n", ucode_type);
497 		return EINVAL;
498 	}
499 
500 	sc->sc_default_calib[ucode_type].flow_trigger =
501 	    def_calib->calib.flow_trigger;
502 	sc->sc_default_calib[ucode_type].event_trigger =
503 	    def_calib->calib.event_trigger;
504 
505 	return 0;
506 }
507 
508 static int
509 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
510 			struct iwm_ucode_capabilities *capa)
511 {
512 	const struct iwm_ucode_api *ucode_api = (const void *)data;
513 	uint32_t api_index = le32toh(ucode_api->api_index);
514 	uint32_t api_flags = le32toh(ucode_api->api_flags);
515 	int i;
516 
517 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
518 		device_printf(sc->sc_dev,
519 		    "api flags index %d larger than supported by driver\n",
520 		    api_index);
521 		/* don't return an error so we can load FW that has more bits */
522 		return 0;
523 	}
524 
525 	for (i = 0; i < 32; i++) {
526 		if (api_flags & (1U << i))
527 			setbit(capa->enabled_api, i + 32 * api_index);
528 	}
529 
530 	return 0;
531 }
532 
533 static int
534 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
535 			   struct iwm_ucode_capabilities *capa)
536 {
537 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
538 	uint32_t api_index = le32toh(ucode_capa->api_index);
539 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
540 	int i;
541 
542 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
543 		device_printf(sc->sc_dev,
544 		    "capa flags index %d larger than supported by driver\n",
545 		    api_index);
546 		/* don't return an error so we can load FW that has more bits */
547 		return 0;
548 	}
549 
550 	for (i = 0; i < 32; i++) {
551 		if (api_flags & (1U << i))
552 			setbit(capa->enabled_capa, i + 32 * api_index);
553 	}
554 
555 	return 0;
556 }
557 
558 static void
559 iwm_fw_info_free(struct iwm_fw_info *fw)
560 {
561 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
562 	fw->fw_fp = NULL;
563 	memset(fw->img, 0, sizeof(fw->img));
564 }
565 
566 static int
567 iwm_read_firmware(struct iwm_softc *sc)
568 {
569 	struct iwm_fw_info *fw = &sc->sc_fw;
570 	const struct iwm_tlv_ucode_header *uhdr;
571 	const struct iwm_ucode_tlv *tlv;
572 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
573 	enum iwm_ucode_tlv_type tlv_type;
574 	const struct firmware *fwp;
575 	const uint8_t *data;
576 	uint32_t tlv_len;
577 	uint32_t usniffer_img;
578 	const uint8_t *tlv_data;
579 	uint32_t paging_mem_size;
580 	int num_of_cpus;
581 	int error = 0;
582 	size_t len;
583 
584 	/*
585 	 * Load firmware into driver memory.
586 	 * fw_fp will be set.
587 	 */
588 	fwp = firmware_get(sc->cfg->fw_name);
589 	if (fwp == NULL) {
590 		device_printf(sc->sc_dev,
591 		    "could not read firmware %s (error %d)\n",
592 		    sc->cfg->fw_name, error);
593 		goto out;
594 	}
595 	fw->fw_fp = fwp;
596 
597 	/* (Re-)Initialize default values. */
598 	capa->flags = 0;
599 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
600 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
601 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
602 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
603 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
604 
605 	/*
606 	 * Parse firmware contents
607 	 */
608 
609 	uhdr = (const void *)fw->fw_fp->data;
610 	if (*(const uint32_t *)fw->fw_fp->data != 0
611 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
612 		device_printf(sc->sc_dev, "invalid firmware %s\n",
613 		    sc->cfg->fw_name);
614 		error = EINVAL;
615 		goto out;
616 	}
617 
618 	ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
619 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
620 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
621 	    IWM_UCODE_API(le32toh(uhdr->ver)));
622 	data = uhdr->data;
623 	len = fw->fw_fp->datasize - sizeof(*uhdr);
624 
625 	while (len >= sizeof(*tlv)) {
626 		len -= sizeof(*tlv);
627 		tlv = (const void *)data;
628 
629 		tlv_len = le32toh(tlv->length);
630 		tlv_type = le32toh(tlv->type);
631 		tlv_data = tlv->data;
632 
633 		if (len < tlv_len) {
634 			device_printf(sc->sc_dev,
635 			    "firmware too short: %zu bytes\n",
636 			    len);
637 			error = EINVAL;
638 			goto parse_out;
639 		}
640 		len -= roundup2(tlv_len, 4);
641 		data += sizeof(tlv) + roundup2(tlv_len, 4);
642 
643 		switch ((int)tlv_type) {
644 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
645 			if (tlv_len != sizeof(uint32_t)) {
646 				device_printf(sc->sc_dev,
647 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
648 				    __func__, tlv_len);
649 				error = EINVAL;
650 				goto parse_out;
651 			}
652 			capa->max_probe_length =
653 			    le32_to_cpup((const uint32_t *)tlv_data);
654 			/* limit it to something sensible */
655 			if (capa->max_probe_length >
656 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
657 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
658 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
659 				    "ridiculous\n", __func__);
660 				error = EINVAL;
661 				goto parse_out;
662 			}
663 			break;
664 		case IWM_UCODE_TLV_PAN:
665 			if (tlv_len) {
666 				device_printf(sc->sc_dev,
667 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
668 				    __func__, tlv_len);
669 				error = EINVAL;
670 				goto parse_out;
671 			}
672 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
673 			break;
674 		case IWM_UCODE_TLV_FLAGS:
675 			if (tlv_len < sizeof(uint32_t)) {
676 				device_printf(sc->sc_dev,
677 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
678 				    __func__, tlv_len);
679 				error = EINVAL;
680 				goto parse_out;
681 			}
682 			if (tlv_len % sizeof(uint32_t)) {
683 				device_printf(sc->sc_dev,
684 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
685 				    __func__, tlv_len);
686 				error = EINVAL;
687 				goto parse_out;
688 			}
689 			/*
690 			 * Apparently there can be many flags, but Linux driver
691 			 * parses only the first one, and so do we.
692 			 *
693 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
694 			 * Intentional or a bug?  Observations from
695 			 * current firmware file:
696 			 *  1) TLV_PAN is parsed first
697 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
698 			 * ==> this resets TLV_PAN to itself... hnnnk
699 			 */
700 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
701 			break;
702 		case IWM_UCODE_TLV_CSCHEME:
703 			if ((error = iwm_store_cscheme(sc,
704 			    tlv_data, tlv_len)) != 0) {
705 				device_printf(sc->sc_dev,
706 				    "%s: iwm_store_cscheme(): returned %d\n",
707 				    __func__, error);
708 				goto parse_out;
709 			}
710 			break;
711 		case IWM_UCODE_TLV_NUM_OF_CPU:
712 			if (tlv_len != sizeof(uint32_t)) {
713 				device_printf(sc->sc_dev,
714 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
715 				    __func__, tlv_len);
716 				error = EINVAL;
717 				goto parse_out;
718 			}
719 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
720 			if (num_of_cpus == 2) {
721 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
722 					TRUE;
723 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
724 					TRUE;
725 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
726 					TRUE;
727 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
728 				device_printf(sc->sc_dev,
729 				    "%s: Driver supports only 1 or 2 CPUs\n",
730 				    __func__);
731 				error = EINVAL;
732 				goto parse_out;
733 			}
734 			break;
735 		case IWM_UCODE_TLV_SEC_RT:
736 			if ((error = iwm_firmware_store_section(sc,
737 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
738 				device_printf(sc->sc_dev,
739 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
740 				    __func__, error);
741 				goto parse_out;
742 			}
743 			break;
744 		case IWM_UCODE_TLV_SEC_INIT:
745 			if ((error = iwm_firmware_store_section(sc,
746 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
747 				device_printf(sc->sc_dev,
748 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
749 				    __func__, error);
750 				goto parse_out;
751 			}
752 			break;
753 		case IWM_UCODE_TLV_SEC_WOWLAN:
754 			if ((error = iwm_firmware_store_section(sc,
755 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
756 				device_printf(sc->sc_dev,
757 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
758 				    __func__, error);
759 				goto parse_out;
760 			}
761 			break;
762 		case IWM_UCODE_TLV_DEF_CALIB:
763 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
764 				device_printf(sc->sc_dev,
765 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
766 				    __func__, tlv_len,
767 				    sizeof(struct iwm_tlv_calib_data));
768 				error = EINVAL;
769 				goto parse_out;
770 			}
771 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
772 				device_printf(sc->sc_dev,
773 				    "%s: iwm_set_default_calib() failed: %d\n",
774 				    __func__, error);
775 				goto parse_out;
776 			}
777 			break;
778 		case IWM_UCODE_TLV_PHY_SKU:
779 			if (tlv_len != sizeof(uint32_t)) {
780 				error = EINVAL;
781 				device_printf(sc->sc_dev,
782 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
783 				    __func__, tlv_len);
784 				goto parse_out;
785 			}
786 			sc->sc_fw.phy_config =
787 			    le32_to_cpup((const uint32_t *)tlv_data);
788 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
789 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
790 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
791 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
792 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
793 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
794 			break;
795 
796 		case IWM_UCODE_TLV_API_CHANGES_SET: {
797 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
798 				error = EINVAL;
799 				goto parse_out;
800 			}
801 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
802 				error = EINVAL;
803 				goto parse_out;
804 			}
805 			break;
806 		}
807 
808 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
809 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
810 				error = EINVAL;
811 				goto parse_out;
812 			}
813 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
814 				error = EINVAL;
815 				goto parse_out;
816 			}
817 			break;
818 		}
819 
820 		case 48: /* undocumented TLV */
821 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
822 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
823 			/* ignore, not used by current driver */
824 			break;
825 
826 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
827 			if ((error = iwm_firmware_store_section(sc,
828 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
829 			    tlv_len)) != 0)
830 				goto parse_out;
831 			break;
832 
833 		case IWM_UCODE_TLV_PAGING:
834 			if (tlv_len != sizeof(uint32_t)) {
835 				error = EINVAL;
836 				goto parse_out;
837 			}
838 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
839 
840 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
841 			    "%s: Paging: paging enabled (size = %u bytes)\n",
842 			    __func__, paging_mem_size);
843 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
844 				device_printf(sc->sc_dev,
845 					"%s: Paging: driver supports up to %u bytes for paging image\n",
846 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
847 				error = EINVAL;
848 				goto out;
849 			}
850 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
851 				device_printf(sc->sc_dev,
852 				    "%s: Paging: image isn't multiple %u\n",
853 				    __func__, IWM_FW_PAGING_SIZE);
854 				error = EINVAL;
855 				goto out;
856 			}
857 
858 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
859 			    paging_mem_size;
860 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
861 			sc->sc_fw.img[usniffer_img].paging_mem_size =
862 			    paging_mem_size;
863 			break;
864 
865 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
866 			if (tlv_len != sizeof(uint32_t)) {
867 				error = EINVAL;
868 				goto parse_out;
869 			}
870 			capa->n_scan_channels =
871 			    le32_to_cpup((const uint32_t *)tlv_data);
872 			break;
873 
874 		case IWM_UCODE_TLV_FW_VERSION:
875 			if (tlv_len != sizeof(uint32_t) * 3) {
876 				error = EINVAL;
877 				goto parse_out;
878 			}
879 			ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
880 			    "%d.%d.%d",
881 			    le32toh(((const uint32_t *)tlv_data)[0]),
882 			    le32toh(((const uint32_t *)tlv_data)[1]),
883 			    le32toh(((const uint32_t *)tlv_data)[2]));
884 			break;
885 
886 		case IWM_UCODE_TLV_FW_MEM_SEG:
887 			break;
888 
889 		default:
890 			device_printf(sc->sc_dev,
891 			    "%s: unknown firmware section %d, abort\n",
892 			    __func__, tlv_type);
893 			error = EINVAL;
894 			goto parse_out;
895 		}
896 	}
897 
898 	KASSERT(error == 0, ("unhandled error"));
899 
900  parse_out:
901 	if (error) {
902 		device_printf(sc->sc_dev, "firmware parse error %d, "
903 		    "section type %d\n", error, tlv_type);
904 	}
905 
906  out:
907 	if (error) {
908 		if (fw->fw_fp != NULL)
909 			iwm_fw_info_free(fw);
910 	}
911 
912 	return error;
913 }
914 
915 /*
916  * DMA resource routines
917  */
918 
919 /* fwmem is used to load firmware onto the card */
920 static int
921 iwm_alloc_fwmem(struct iwm_softc *sc)
922 {
923 	/* Must be aligned on a 16-byte boundary. */
924 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
925 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
926 }
927 
928 /* tx scheduler rings.  not used? */
929 static int
930 iwm_alloc_sched(struct iwm_softc *sc)
931 {
932 	/* TX scheduler rings must be aligned on a 1KB boundary. */
933 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
934 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
935 }
936 
937 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
938 static int
939 iwm_alloc_kw(struct iwm_softc *sc)
940 {
941 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
942 }
943 
944 /* interrupt cause table */
945 static int
946 iwm_alloc_ict(struct iwm_softc *sc)
947 {
948 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
949 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
950 }
951 
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
954 {
955 	bus_size_t size;
956 	int i, error;
957 
958 	ring->cur = 0;
959 
960 	/* Allocate RX descriptors (256-byte aligned). */
961 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963 	if (error != 0) {
964 		device_printf(sc->sc_dev,
965 		    "could not allocate RX ring DMA memory\n");
966 		goto fail;
967 	}
968 	ring->desc = ring->desc_dma.vaddr;
969 
970 	/* Allocate RX status area (16-byte aligned). */
971 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972 	    sizeof(*ring->stat), 16);
973 	if (error != 0) {
974 		device_printf(sc->sc_dev,
975 		    "could not allocate RX status DMA memory\n");
976 		goto fail;
977 	}
978 	ring->stat = ring->stat_dma.vaddr;
979 
980         /* Create RX buffer DMA tag. */
981 #if defined(__DragonFly__)
982         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
983 				   0,
984 				   BUS_SPACE_MAXADDR_32BIT,
985 				   BUS_SPACE_MAXADDR,
986 				   NULL, NULL,
987 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
988 				   BUS_DMA_NOWAIT, &ring->data_dmat);
989 #else
990         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
991             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
992             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
993 #endif
994         if (error != 0) {
995                 device_printf(sc->sc_dev,
996                     "%s: could not create RX buf DMA tag, error %d\n",
997                     __func__, error);
998                 goto fail;
999         }
1000 
1001 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1002 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1003 	if (error != 0) {
1004 		device_printf(sc->sc_dev,
1005 		    "%s: could not create RX buf DMA map, error %d\n",
1006 		    __func__, error);
1007 		goto fail;
1008 	}
1009 	/*
1010 	 * Allocate and map RX buffers.
1011 	 */
1012 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1013 		struct iwm_rx_data *data = &ring->data[i];
1014 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1015 		if (error != 0) {
1016 			device_printf(sc->sc_dev,
1017 			    "%s: could not create RX buf DMA map, error %d\n",
1018 			    __func__, error);
1019 			goto fail;
1020 		}
1021 		data->m = NULL;
1022 
1023 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1024 			goto fail;
1025 		}
1026 	}
1027 	return 0;
1028 
1029 fail:	iwm_free_rx_ring(sc, ring);
1030 	return error;
1031 }
1032 
1033 static void
1034 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1035 {
1036 	/* Reset the ring state */
1037 	ring->cur = 0;
1038 
1039 	/*
1040 	 * The hw rx ring index in shared memory must also be cleared,
1041 	 * otherwise the discrepancy can cause reprocessing chaos.
1042 	 */
1043 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1044 }
1045 
1046 static void
1047 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1048 {
1049 	int i;
1050 
1051 	iwm_dma_contig_free(&ring->desc_dma);
1052 	iwm_dma_contig_free(&ring->stat_dma);
1053 
1054 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1055 		struct iwm_rx_data *data = &ring->data[i];
1056 
1057 		if (data->m != NULL) {
1058 			bus_dmamap_sync(ring->data_dmat, data->map,
1059 			    BUS_DMASYNC_POSTREAD);
1060 			bus_dmamap_unload(ring->data_dmat, data->map);
1061 			m_freem(data->m);
1062 			data->m = NULL;
1063 		}
1064 		if (data->map != NULL) {
1065 			bus_dmamap_destroy(ring->data_dmat, data->map);
1066 			data->map = NULL;
1067 		}
1068 	}
1069 	if (ring->spare_map != NULL) {
1070 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1071 		ring->spare_map = NULL;
1072 	}
1073 	if (ring->data_dmat != NULL) {
1074 		bus_dma_tag_destroy(ring->data_dmat);
1075 		ring->data_dmat = NULL;
1076 	}
1077 }
1078 
1079 static int
1080 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1081 {
1082 	bus_addr_t paddr;
1083 	bus_size_t size;
1084 	size_t maxsize;
1085 	int nsegments;
1086 	int i, error;
1087 
1088 	ring->qid = qid;
1089 	ring->queued = 0;
1090 	ring->cur = 0;
1091 
1092 	/* Allocate TX descriptors (256-byte aligned). */
1093 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1094 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1095 	if (error != 0) {
1096 		device_printf(sc->sc_dev,
1097 		    "could not allocate TX ring DMA memory\n");
1098 		goto fail;
1099 	}
1100 	ring->desc = ring->desc_dma.vaddr;
1101 
1102 	/*
1103 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1104 	 * to allocate commands space for other rings.
1105 	 */
1106 	if (qid > IWM_MVM_CMD_QUEUE)
1107 		return 0;
1108 
1109 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1110 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1111 	if (error != 0) {
1112 		device_printf(sc->sc_dev,
1113 		    "could not allocate TX cmd DMA memory\n");
1114 		goto fail;
1115 	}
1116 	ring->cmd = ring->cmd_dma.vaddr;
1117 
1118 	/* FW commands may require more mapped space than packets. */
1119 	if (qid == IWM_MVM_CMD_QUEUE) {
1120 		maxsize = IWM_RBUF_SIZE;
1121 		nsegments = 1;
1122 	} else {
1123 		maxsize = MCLBYTES;
1124 		nsegments = IWM_MAX_SCATTER - 2;
1125 	}
1126 
1127 #if defined(__DragonFly__)
1128 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1129 				   0,
1130 				   BUS_SPACE_MAXADDR_32BIT,
1131 				   BUS_SPACE_MAXADDR,
1132 				   NULL, NULL,
1133 				   maxsize, nsegments, maxsize,
1134 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1135 #else
1136 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1137 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1138             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1139 #endif
1140 	if (error != 0) {
1141 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1142 		goto fail;
1143 	}
1144 
1145 	paddr = ring->cmd_dma.paddr;
1146 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1147 		struct iwm_tx_data *data = &ring->data[i];
1148 
1149 		data->cmd_paddr = paddr;
1150 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1151 		    + offsetof(struct iwm_tx_cmd, scratch);
1152 		paddr += sizeof(struct iwm_device_cmd);
1153 
1154 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1155 		if (error != 0) {
1156 			device_printf(sc->sc_dev,
1157 			    "could not create TX buf DMA map\n");
1158 			goto fail;
1159 		}
1160 	}
1161 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1162 	    ("invalid physical address"));
1163 	return 0;
1164 
1165 fail:	iwm_free_tx_ring(sc, ring);
1166 	return error;
1167 }
1168 
1169 static void
1170 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1171 {
1172 	int i;
1173 
1174 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1175 		struct iwm_tx_data *data = &ring->data[i];
1176 
1177 		if (data->m != NULL) {
1178 			bus_dmamap_sync(ring->data_dmat, data->map,
1179 			    BUS_DMASYNC_POSTWRITE);
1180 			bus_dmamap_unload(ring->data_dmat, data->map);
1181 			m_freem(data->m);
1182 			data->m = NULL;
1183 		}
1184 	}
1185 	/* Clear TX descriptors. */
1186 	memset(ring->desc, 0, ring->desc_dma.size);
1187 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1188 	    BUS_DMASYNC_PREWRITE);
1189 	sc->qfullmsk &= ~(1 << ring->qid);
1190 	ring->queued = 0;
1191 	ring->cur = 0;
1192 
1193 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1194 		iwm_pcie_clear_cmd_in_flight(sc);
1195 }
1196 
1197 static void
1198 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1199 {
1200 	int i;
1201 
1202 	iwm_dma_contig_free(&ring->desc_dma);
1203 	iwm_dma_contig_free(&ring->cmd_dma);
1204 
1205 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1206 		struct iwm_tx_data *data = &ring->data[i];
1207 
1208 		if (data->m != NULL) {
1209 			bus_dmamap_sync(ring->data_dmat, data->map,
1210 			    BUS_DMASYNC_POSTWRITE);
1211 			bus_dmamap_unload(ring->data_dmat, data->map);
1212 			m_freem(data->m);
1213 			data->m = NULL;
1214 		}
1215 		if (data->map != NULL) {
1216 			bus_dmamap_destroy(ring->data_dmat, data->map);
1217 			data->map = NULL;
1218 		}
1219 	}
1220 	if (ring->data_dmat != NULL) {
1221 		bus_dma_tag_destroy(ring->data_dmat);
1222 		ring->data_dmat = NULL;
1223 	}
1224 }
1225 
1226 /*
1227  * High-level hardware frobbing routines
1228  */
1229 
1230 static void
1231 iwm_enable_interrupts(struct iwm_softc *sc)
1232 {
1233 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1234 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1235 }
1236 
1237 static void
1238 iwm_restore_interrupts(struct iwm_softc *sc)
1239 {
1240 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1241 }
1242 
1243 static void
1244 iwm_disable_interrupts(struct iwm_softc *sc)
1245 {
1246 	/* disable interrupts */
1247 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1248 
1249 	/* acknowledge all interrupts */
1250 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1251 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1252 }
1253 
1254 static void
1255 iwm_ict_reset(struct iwm_softc *sc)
1256 {
1257 	iwm_disable_interrupts(sc);
1258 
1259 	/* Reset ICT table. */
1260 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1261 	sc->ict_cur = 0;
1262 
1263 	/* Set physical address of ICT table (4KB aligned). */
1264 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1265 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1266 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1267 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1268 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1269 
1270 	/* Switch to ICT interrupt mode in driver. */
1271 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1272 
1273 	/* Re-enable interrupts. */
1274 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1275 	iwm_enable_interrupts(sc);
1276 }
1277 
1278 /*
1279  * Since this .. hard-resets things, it's time to actually
1280  * mark the first vap (if any) as having no mac context.
1281  * It's annoying, but since the driver is potentially being
1282  * stop/start'ed whilst active (thanks openbsd port!) we
1283  * have to correctly track this.
1284  */
1285 static void
1286 iwm_stop_device(struct iwm_softc *sc)
1287 {
1288 	struct ieee80211com *ic = &sc->sc_ic;
1289 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1290 	int chnl, qid;
1291 	uint32_t mask = 0;
1292 
1293 	/* tell the device to stop sending interrupts */
1294 	iwm_disable_interrupts(sc);
1295 
1296 	/*
1297 	 * FreeBSD-local: mark the first vap as not-uploaded,
1298 	 * so the next transition through auth/assoc
1299 	 * will correctly populate the MAC context.
1300 	 */
1301 	if (vap) {
1302 		struct iwm_vap *iv = IWM_VAP(vap);
1303 		iv->phy_ctxt = NULL;
1304 		iv->is_uploaded = 0;
1305 	}
1306 	sc->sc_firmware_state = 0;
1307 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1308 
1309 	/* device going down, Stop using ICT table */
1310 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1311 
1312 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1313 
1314 	if (iwm_nic_lock(sc)) {
1315 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1316 
1317 		/* Stop each Tx DMA channel */
1318 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1319 			IWM_WRITE(sc,
1320 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1321 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1322 		}
1323 
1324 		/* Wait for DMA channels to be idle */
1325 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1326 		    5000)) {
1327 			device_printf(sc->sc_dev,
1328 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1329 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1330 		}
1331 		iwm_nic_unlock(sc);
1332 	}
1333 	iwm_pcie_rx_stop(sc);
1334 
1335 	/* Stop RX ring. */
1336 	iwm_reset_rx_ring(sc, &sc->rxq);
1337 
1338 	/* Reset all TX rings. */
1339 	for (qid = 0; qid < nitems(sc->txq); qid++)
1340 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1341 
1342 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1343 		/* Power-down device's busmaster DMA clocks */
1344 		if (iwm_nic_lock(sc)) {
1345 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1346 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1347 			iwm_nic_unlock(sc);
1348 		}
1349 		DELAY(5);
1350 	}
1351 
1352 	/* Make sure (redundant) we've released our request to stay awake */
1353 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1354 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1355 
1356 	/* Stop the device, and put it in low power state */
1357 	iwm_apm_stop(sc);
1358 
1359 	/* stop and reset the on-board processor */
1360 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1361 	DELAY(1000);
1362 
1363 	/*
1364 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1365 	 * This is a bug in certain verions of the hardware.
1366 	 * Certain devices also keep sending HW RF kill interrupt all
1367 	 * the time, unless the interrupt is ACKed even if the interrupt
1368 	 * should be masked. Re-ACK all the interrupts here.
1369 	 */
1370 	iwm_disable_interrupts(sc);
1371 
1372 	/*
1373 	 * Even if we stop the HW, we still want the RF kill
1374 	 * interrupt
1375 	 */
1376 	iwm_enable_rfkill_int(sc);
1377 	iwm_check_rfkill(sc);
1378 }
1379 
1380 static void
1381 iwm_mvm_nic_config(struct iwm_softc *sc)
1382 {
1383 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1384 	uint32_t reg_val = 0;
1385 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1386 
1387 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1388 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1389 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1390 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1391 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1392 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1393 
1394 	/* SKU control */
1395 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1396 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1397 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1398 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1399 
1400 	/* radio configuration */
1401 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1402 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1403 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1404 
1405 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1406 
1407 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1408 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1409 	    radio_cfg_step, radio_cfg_dash);
1410 
1411 	/*
1412 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1413 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1414 	 * to lose ownership and not being able to obtain it back.
1415 	 */
1416 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1417 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1418 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1419 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1420 	}
1421 }
1422 
1423 static int
1424 iwm_nic_rx_init(struct iwm_softc *sc)
1425 {
1426 	/*
1427 	 * Initialize RX ring.  This is from the iwn driver.
1428 	 */
1429 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1430 
1431 	/* Stop Rx DMA */
1432 	iwm_pcie_rx_stop(sc);
1433 
1434 	if (!iwm_nic_lock(sc))
1435 		return EBUSY;
1436 
1437 	/* reset and flush pointers */
1438 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1439 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1440 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1441 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1442 
1443 	/* Set physical address of RX ring (256-byte aligned). */
1444 	IWM_WRITE(sc,
1445 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1446 
1447 	/* Set physical address of RX status (16-byte aligned). */
1448 	IWM_WRITE(sc,
1449 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1450 
1451 #if defined(__DragonFly__)
1452 	/* Force serialization (probably not needed but don't trust the HW) */
1453 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1454 #endif
1455 
1456 	/* Enable Rx DMA
1457 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1458 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1459 	 *      the credit mechanism in 5000 HW RX FIFO
1460 	 * Direct rx interrupts to hosts
1461 	 * Rx buffer size 4 or 8k or 12k
1462 	 * RB timeout 0x10
1463 	 * 256 RBDs
1464 	 */
1465 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1466 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1467 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1468 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1469 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1470 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1471 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1472 
1473 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1474 
1475 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1476 	if (sc->cfg->host_interrupt_operation_mode)
1477 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1478 
1479 	/*
1480 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1481 	 *
1482 	 * This value should initially be 0 (before preparing any
1483 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1484 	 */
1485 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1486 
1487 	iwm_nic_unlock(sc);
1488 
1489 	return 0;
1490 }
1491 
1492 static int
1493 iwm_nic_tx_init(struct iwm_softc *sc)
1494 {
1495 	int qid;
1496 
1497 	if (!iwm_nic_lock(sc))
1498 		return EBUSY;
1499 
1500 	/* Deactivate TX scheduler. */
1501 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1502 
1503 	/* Set physical address of "keep warm" page (16-byte aligned). */
1504 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1505 
1506 	/* Initialize TX rings. */
1507 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1508 		struct iwm_tx_ring *txq = &sc->txq[qid];
1509 
1510 		/* Set physical address of TX ring (256-byte aligned). */
1511 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1512 		    txq->desc_dma.paddr >> 8);
1513 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1514 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1515 		    __func__,
1516 		    qid, txq->desc,
1517 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1518 	}
1519 
1520 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1521 
1522 	iwm_nic_unlock(sc);
1523 
1524 	return 0;
1525 }
1526 
1527 static int
1528 iwm_nic_init(struct iwm_softc *sc)
1529 {
1530 	int error;
1531 
1532 	iwm_apm_init(sc);
1533 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1534 		iwm_set_pwr(sc);
1535 
1536 	iwm_mvm_nic_config(sc);
1537 
1538 	if ((error = iwm_nic_rx_init(sc)) != 0)
1539 		return error;
1540 
1541 	/*
1542 	 * Ditto for TX, from iwn
1543 	 */
1544 	if ((error = iwm_nic_tx_init(sc)) != 0)
1545 		return error;
1546 
1547 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1548 	    "%s: shadow registers enabled\n", __func__);
1549 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1550 
1551 	return 0;
1552 }
1553 
1554 int
1555 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1556 {
1557 	if (!iwm_nic_lock(sc)) {
1558 		device_printf(sc->sc_dev,
1559 		    "%s: cannot enable txq %d\n",
1560 		    __func__,
1561 		    qid);
1562 		return EBUSY;
1563 	}
1564 
1565 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1566 
1567 	if (qid == IWM_MVM_CMD_QUEUE) {
1568 		/* unactivate before configuration */
1569 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1570 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1571 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1572 
1573 		iwm_nic_unlock(sc);
1574 
1575 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1576 
1577 		if (!iwm_nic_lock(sc)) {
1578 			device_printf(sc->sc_dev,
1579 			    "%s: cannot enable txq %d\n", __func__, qid);
1580 			return EBUSY;
1581 		}
1582 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1583 		iwm_nic_unlock(sc);
1584 
1585 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1586 		/* Set scheduler window size and frame limit. */
1587 		iwm_write_mem32(sc,
1588 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1589 		    sizeof(uint32_t),
1590 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1591 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1592 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1593 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1594 
1595 		if (!iwm_nic_lock(sc)) {
1596 			device_printf(sc->sc_dev,
1597 			    "%s: cannot enable txq %d\n", __func__, qid);
1598 			return EBUSY;
1599 		}
1600 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1601 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1602 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1603 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1604 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1605 	} else {
1606 		struct iwm_scd_txq_cfg_cmd cmd;
1607 		int error;
1608 
1609 		iwm_nic_unlock(sc);
1610 
1611 		memset(&cmd, 0, sizeof(cmd));
1612 		cmd.scd_queue = qid;
1613 		cmd.enable = 1;
1614 		cmd.sta_id = sta_id;
1615 		cmd.tx_fifo = fifo;
1616 		cmd.aggregate = 0;
1617 		cmd.window = IWM_FRAME_LIMIT;
1618 
1619 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1620 		    sizeof(cmd), &cmd);
1621 		if (error) {
1622 			device_printf(sc->sc_dev,
1623 			    "cannot enable txq %d\n", qid);
1624 			return error;
1625 		}
1626 
1627 		if (!iwm_nic_lock(sc))
1628 			return EBUSY;
1629 	}
1630 
1631 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1632 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1633 
1634 	iwm_nic_unlock(sc);
1635 
1636 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1637 	    __func__, qid, fifo);
1638 
1639 	return 0;
1640 }
1641 
1642 static int
1643 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1644 {
1645 	int error, chnl;
1646 
1647 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1648 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1649 
1650 	if (!iwm_nic_lock(sc))
1651 		return EBUSY;
1652 
1653 	iwm_ict_reset(sc);
1654 
1655 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1656 	if (scd_base_addr != 0 &&
1657 	    scd_base_addr != sc->scd_base_addr) {
1658 		device_printf(sc->sc_dev,
1659 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1660 		    __func__, sc->scd_base_addr, scd_base_addr);
1661 	}
1662 
1663 	iwm_nic_unlock(sc);
1664 
1665 	/* reset context data, TX status and translation data */
1666 	error = iwm_write_mem(sc,
1667 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1668 	    NULL, clear_dwords);
1669 	if (error)
1670 		return EBUSY;
1671 
1672 	if (!iwm_nic_lock(sc))
1673 		return EBUSY;
1674 
1675 	/* Set physical address of TX scheduler rings (1KB aligned). */
1676 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1677 
1678 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1679 
1680 	iwm_nic_unlock(sc);
1681 
1682 	/* enable command channel */
1683 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1684 	if (error)
1685 		return error;
1686 
1687 	if (!iwm_nic_lock(sc))
1688 		return EBUSY;
1689 
1690 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1691 
1692 	/* Enable DMA channels. */
1693 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1694 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1695 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1696 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1697 	}
1698 
1699 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1700 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1701 
1702 	iwm_nic_unlock(sc);
1703 
1704 	/* Enable L1-Active */
1705 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1706 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1707 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1708 	}
1709 
1710 	return error;
1711 }
1712 
1713 /*
1714  * NVM read access and content parsing.  We do not support
1715  * external NVM or writing NVM.
1716  * iwlwifi/mvm/nvm.c
1717  */
1718 
1719 /* Default NVM size to read */
1720 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1721 
1722 #define IWM_NVM_WRITE_OPCODE 1
1723 #define IWM_NVM_READ_OPCODE 0
1724 
1725 /* load nvm chunk response */
1726 enum {
1727 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1728 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1729 };
1730 
1731 static int
1732 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1733 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1734 {
1735 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1736 		.offset = htole16(offset),
1737 		.length = htole16(length),
1738 		.type = htole16(section),
1739 		.op_code = IWM_NVM_READ_OPCODE,
1740 	};
1741 	struct iwm_nvm_access_resp *nvm_resp;
1742 	struct iwm_rx_packet *pkt;
1743 	struct iwm_host_cmd cmd = {
1744 		.id = IWM_NVM_ACCESS_CMD,
1745 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1746 		.data = { &nvm_access_cmd, },
1747 	};
1748 	int ret, bytes_read, offset_read;
1749 	uint8_t *resp_data;
1750 
1751 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1752 
1753 	ret = iwm_send_cmd(sc, &cmd);
1754 	if (ret) {
1755 		device_printf(sc->sc_dev,
1756 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1757 		return ret;
1758 	}
1759 
1760 	pkt = cmd.resp_pkt;
1761 
1762 	/* Extract NVM response */
1763 	nvm_resp = (void *)pkt->data;
1764 	ret = le16toh(nvm_resp->status);
1765 	bytes_read = le16toh(nvm_resp->length);
1766 	offset_read = le16toh(nvm_resp->offset);
1767 	resp_data = nvm_resp->data;
1768 	if (ret) {
1769 		if ((offset != 0) &&
1770 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1771 			/*
1772 			 * meaning of NOT_VALID_ADDRESS:
1773 			 * driver try to read chunk from address that is
1774 			 * multiple of 2K and got an error since addr is empty.
1775 			 * meaning of (offset != 0): driver already
1776 			 * read valid data from another chunk so this case
1777 			 * is not an error.
1778 			 */
1779 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1780 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1781 				    offset);
1782 			*len = 0;
1783 			ret = 0;
1784 		} else {
1785 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1786 				    "NVM access command failed with status %d\n", ret);
1787 			ret = EIO;
1788 		}
1789 		goto exit;
1790 	}
1791 
1792 	if (offset_read != offset) {
1793 		device_printf(sc->sc_dev,
1794 		    "NVM ACCESS response with invalid offset %d\n",
1795 		    offset_read);
1796 		ret = EINVAL;
1797 		goto exit;
1798 	}
1799 
1800 	if (bytes_read > length) {
1801 		device_printf(sc->sc_dev,
1802 		    "NVM ACCESS response with too much data "
1803 		    "(%d bytes requested, %d bytes received)\n",
1804 		    length, bytes_read);
1805 		ret = EINVAL;
1806 		goto exit;
1807 	}
1808 
1809 	/* Write data to NVM */
1810 	memcpy(data + offset, resp_data, bytes_read);
1811 	*len = bytes_read;
1812 
1813  exit:
1814 	iwm_free_resp(sc, &cmd);
1815 	return ret;
1816 }
1817 
1818 /*
1819  * Reads an NVM section completely.
1820  * NICs prior to 7000 family don't have a real NVM, but just read
1821  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1822  * by uCode, we need to manually check in this case that we don't
1823  * overflow and try to read more than the EEPROM size.
1824  * For 7000 family NICs, we supply the maximal size we can read, and
1825  * the uCode fills the response with as much data as we can,
1826  * without overflowing, so no check is needed.
1827  */
1828 static int
1829 iwm_nvm_read_section(struct iwm_softc *sc,
1830 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1831 {
1832 	uint16_t seglen, length, offset = 0;
1833 	int ret;
1834 
1835 	/* Set nvm section read length */
1836 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1837 
1838 	seglen = length;
1839 
1840 	/* Read the NVM until exhausted (reading less than requested) */
1841 	while (seglen == length) {
1842 		/* Check no memory assumptions fail and cause an overflow */
1843 		if ((size_read + offset + length) >
1844 		    sc->cfg->eeprom_size) {
1845 			device_printf(sc->sc_dev,
1846 			    "EEPROM size is too small for NVM\n");
1847 			return ENOBUFS;
1848 		}
1849 
1850 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1851 		if (ret) {
1852 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1853 				    "Cannot read NVM from section %d offset %d, length %d\n",
1854 				    section, offset, length);
1855 			return ret;
1856 		}
1857 		offset += seglen;
1858 	}
1859 
1860 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1861 		    "NVM section %d read completed\n", section);
1862 	*len = offset;
1863 	return 0;
1864 }
1865 
1866 /* NVM offsets (in words) definitions */
1867 enum iwm_nvm_offsets {
1868 	/* NVM HW-Section offset (in words) definitions */
1869 	IWM_HW_ADDR = 0x15,
1870 
1871 /* NVM SW-Section offset (in words) definitions */
1872 	IWM_NVM_SW_SECTION = 0x1C0,
1873 	IWM_NVM_VERSION = 0,
1874 	IWM_RADIO_CFG = 1,
1875 	IWM_SKU = 2,
1876 	IWM_N_HW_ADDRS = 3,
1877 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1878 
1879 /* NVM calibration section offset (in words) definitions */
1880 	IWM_NVM_CALIB_SECTION = 0x2B8,
1881 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1882 };
1883 
1884 enum iwm_8000_nvm_offsets {
1885 	/* NVM HW-Section offset (in words) definitions */
1886 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1887 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1888 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1889 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1890 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1891 
1892 	/* NVM SW-Section offset (in words) definitions */
1893 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1894 	IWM_NVM_VERSION_8000 = 0,
1895 	IWM_RADIO_CFG_8000 = 0,
1896 	IWM_SKU_8000 = 2,
1897 	IWM_N_HW_ADDRS_8000 = 3,
1898 
1899 	/* NVM REGULATORY -Section offset (in words) definitions */
1900 	IWM_NVM_CHANNELS_8000 = 0,
1901 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1902 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1903 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1904 
1905 	/* NVM calibration section offset (in words) definitions */
1906 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1907 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1908 };
1909 
1910 /* SKU Capabilities (actual values from NVM definition) */
1911 enum nvm_sku_bits {
1912 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1913 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1914 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1915 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1916 };
1917 
1918 /* radio config bits (actual values from NVM definition) */
1919 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1920 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1921 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1922 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1923 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1924 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1925 
1926 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1927 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1928 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1929 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1930 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1931 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1932 
1933 /**
1934  * enum iwm_nvm_channel_flags - channel flags in NVM
1935  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1936  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1937  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1938  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1939  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1940  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1941  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1942  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1943  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1944  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1945  */
1946 enum iwm_nvm_channel_flags {
1947 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1948 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1949 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1950 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1951 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1952 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1953 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1954 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1955 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1956 };
1957 
1958 /*
1959  * Translate EEPROM flags to net80211.
1960  */
1961 static uint32_t
1962 iwm_eeprom_channel_flags(uint16_t ch_flags)
1963 {
1964 	uint32_t nflags;
1965 
1966 	nflags = 0;
1967 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1968 		nflags |= IEEE80211_CHAN_PASSIVE;
1969 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1970 		nflags |= IEEE80211_CHAN_NOADHOC;
1971 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1972 		nflags |= IEEE80211_CHAN_DFS;
1973 		/* Just in case. */
1974 		nflags |= IEEE80211_CHAN_NOADHOC;
1975 	}
1976 
1977 	return (nflags);
1978 }
1979 
1980 static void
1981 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1982     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1983     const uint8_t bands[])
1984 {
1985 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1986 	uint32_t nflags;
1987 	uint16_t ch_flags;
1988 	uint8_t ieee;
1989 	int error;
1990 
1991 	for (; ch_idx < ch_num; ch_idx++) {
1992 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1993 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1994 			ieee = iwm_nvm_channels[ch_idx];
1995 		else
1996 			ieee = iwm_nvm_channels_8000[ch_idx];
1997 
1998 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1999 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2000 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2001 			    ieee, ch_flags,
2002 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2003 			    "5.2" : "2.4");
2004 			continue;
2005 		}
2006 
2007 		nflags = iwm_eeprom_channel_flags(ch_flags);
2008 		error = ieee80211_add_channel(chans, maxchans, nchans,
2009 		    ieee, 0, 0, nflags, bands);
2010 		if (error != 0)
2011 			break;
2012 
2013 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2014 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2015 		    ieee, ch_flags,
2016 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2017 		    "5.2" : "2.4");
2018 	}
2019 }
2020 
2021 static void
2022 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2023     struct ieee80211_channel chans[])
2024 {
2025 	struct iwm_softc *sc = ic->ic_softc;
2026 	struct iwm_nvm_data *data = sc->nvm_data;
2027 	uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2028 	size_t ch_num;
2029 
2030 	memset(bands, 0, sizeof(bands));
2031 	/* 1-13: 11b/g channels. */
2032 	setbit(bands, IEEE80211_MODE_11B);
2033 	setbit(bands, IEEE80211_MODE_11G);
2034 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2035 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2036 
2037 	/* 14: 11b channel only. */
2038 	clrbit(bands, IEEE80211_MODE_11G);
2039 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2040 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2041 
2042 	if (data->sku_cap_band_52GHz_enable) {
2043 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2044 			ch_num = nitems(iwm_nvm_channels);
2045 		else
2046 			ch_num = nitems(iwm_nvm_channels_8000);
2047 		memset(bands, 0, sizeof(bands));
2048 		setbit(bands, IEEE80211_MODE_11A);
2049 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2050 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2051 	}
2052 }
2053 
2054 static void
2055 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2056 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2057 {
2058 	const uint8_t *hw_addr;
2059 
2060 	if (mac_override) {
2061 		static const uint8_t reserved_mac[] = {
2062 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2063 		};
2064 
2065 		hw_addr = (const uint8_t *)(mac_override +
2066 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2067 
2068 		/*
2069 		 * Store the MAC address from MAO section.
2070 		 * No byte swapping is required in MAO section
2071 		 */
2072 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2073 
2074 		/*
2075 		 * Force the use of the OTP MAC address in case of reserved MAC
2076 		 * address in the NVM, or if address is given but invalid.
2077 		 */
2078 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2079 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2080 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2081 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2082 			return;
2083 
2084 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2085 		    "%s: mac address from nvm override section invalid\n",
2086 		    __func__);
2087 	}
2088 
2089 	if (nvm_hw) {
2090 		/* read the mac address from WFMP registers */
2091 		uint32_t mac_addr0 =
2092 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2093 		uint32_t mac_addr1 =
2094 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2095 
2096 		hw_addr = (const uint8_t *)&mac_addr0;
2097 		data->hw_addr[0] = hw_addr[3];
2098 		data->hw_addr[1] = hw_addr[2];
2099 		data->hw_addr[2] = hw_addr[1];
2100 		data->hw_addr[3] = hw_addr[0];
2101 
2102 		hw_addr = (const uint8_t *)&mac_addr1;
2103 		data->hw_addr[4] = hw_addr[1];
2104 		data->hw_addr[5] = hw_addr[0];
2105 
2106 		return;
2107 	}
2108 
2109 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2110 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2111 }
2112 
2113 static int
2114 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2115 	    const uint16_t *phy_sku)
2116 {
2117 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2118 		return le16_to_cpup(nvm_sw + IWM_SKU);
2119 
2120 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2121 }
2122 
2123 static int
2124 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2125 {
2126 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2127 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2128 	else
2129 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2130 						IWM_NVM_VERSION_8000));
2131 }
2132 
2133 static int
2134 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2135 		  const uint16_t *phy_sku)
2136 {
2137         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2138                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2139 
2140         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2141 }
2142 
2143 static int
2144 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2145 {
2146 	int n_hw_addr;
2147 
2148 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2149 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2150 
2151 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2152 
2153         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2154 }
2155 
2156 static void
2157 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2158 		  uint32_t radio_cfg)
2159 {
2160 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2161 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2162 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2163 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2164 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2165 		return;
2166 	}
2167 
2168 	/* set the radio configuration for family 8000 */
2169 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2170 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2171 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2172 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2173 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2174 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2175 }
2176 
2177 static int
2178 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2179 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2180 {
2181 #ifdef notyet /* for FAMILY 9000 */
2182 	if (cfg->mac_addr_from_csr) {
2183 		iwm_set_hw_address_from_csr(sc, data);
2184         } else
2185 #endif
2186 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2187 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2188 
2189 		/* The byte order is little endian 16 bit, meaning 214365 */
2190 		data->hw_addr[0] = hw_addr[1];
2191 		data->hw_addr[1] = hw_addr[0];
2192 		data->hw_addr[2] = hw_addr[3];
2193 		data->hw_addr[3] = hw_addr[2];
2194 		data->hw_addr[4] = hw_addr[5];
2195 		data->hw_addr[5] = hw_addr[4];
2196 	} else {
2197 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2198 	}
2199 
2200 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2201 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2202 		return EINVAL;
2203 	}
2204 
2205 	return 0;
2206 }
2207 
2208 static struct iwm_nvm_data *
2209 iwm_parse_nvm_data(struct iwm_softc *sc,
2210 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2211 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2212 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2213 {
2214 	struct iwm_nvm_data *data;
2215 	uint32_t sku, radio_cfg;
2216 	uint16_t lar_config;
2217 
2218 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2219 		data = kmalloc(sizeof(*data) +
2220 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2221 		    M_DEVBUF, M_WAITOK | M_ZERO);
2222 	} else {
2223 		data = kmalloc(sizeof(*data) +
2224 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2225 		    M_DEVBUF, M_WAITOK | M_ZERO);
2226 	}
2227 	if (!data)
2228 		return NULL;
2229 
2230 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2231 
2232 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2233 	iwm_set_radio_cfg(sc, data, radio_cfg);
2234 
2235 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2236 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2237 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2238 	data->sku_cap_11n_enable = 0;
2239 
2240 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2241 
2242 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2243 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2244 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2245 				       IWM_NVM_LAR_OFFSET_8000;
2246 
2247 		lar_config = le16_to_cpup(regulatory + lar_offset);
2248 		data->lar_enabled = !!(lar_config &
2249 				       IWM_NVM_LAR_ENABLED_8000);
2250 	}
2251 
2252 	/* If no valid mac address was found - bail out */
2253 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2254 		kfree(data, M_DEVBUF);
2255 		return NULL;
2256 	}
2257 
2258 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2259 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2260 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2261 	} else {
2262 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2263 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2264 	}
2265 
2266 	return data;
2267 }
2268 
2269 static void
2270 iwm_free_nvm_data(struct iwm_nvm_data *data)
2271 {
2272 	if (data != NULL)
2273 		kfree(data, M_DEVBUF);
2274 }
2275 
2276 static struct iwm_nvm_data *
2277 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2278 {
2279 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2280 
2281 	/* Checking for required sections */
2282 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2283 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2284 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2285 			device_printf(sc->sc_dev,
2286 			    "Can't parse empty OTP/NVM sections\n");
2287 			return NULL;
2288 		}
2289 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2290 		/* SW and REGULATORY sections are mandatory */
2291 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2292 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2293 			device_printf(sc->sc_dev,
2294 			    "Can't parse empty OTP/NVM sections\n");
2295 			return NULL;
2296 		}
2297 		/* MAC_OVERRIDE or at least HW section must exist */
2298 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2299 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2300 			device_printf(sc->sc_dev,
2301 			    "Can't parse mac_address, empty sections\n");
2302 			return NULL;
2303 		}
2304 
2305 		/* PHY_SKU section is mandatory in B0 */
2306 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2307 			device_printf(sc->sc_dev,
2308 			    "Can't parse phy_sku in B0, empty sections\n");
2309 			return NULL;
2310 		}
2311 	} else {
2312 		panic("unknown device family %d\n", sc->cfg->device_family);
2313 	}
2314 
2315 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2316 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2317 	calib = (const uint16_t *)
2318 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2319 	regulatory = (const uint16_t *)
2320 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2321 	mac_override = (const uint16_t *)
2322 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2323 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2324 
2325 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2326 	    phy_sku, regulatory);
2327 }
2328 
2329 static int
2330 iwm_nvm_init(struct iwm_softc *sc)
2331 {
2332 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2333 	int i, ret, section;
2334 	uint32_t size_read = 0;
2335 	uint8_t *nvm_buffer, *temp;
2336 	uint16_t len;
2337 
2338 	memset(nvm_sections, 0, sizeof(nvm_sections));
2339 
2340 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2341 		return EINVAL;
2342 
2343 	/* load NVM values from nic */
2344 	/* Read From FW NVM */
2345 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2346 
2347 	nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2348 	    M_INTWAIT | M_ZERO);
2349 	if (!nvm_buffer)
2350 		return ENOMEM;
2351 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2352 		/* we override the constness for initial read */
2353 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2354 					   &len, size_read);
2355 		if (ret)
2356 			continue;
2357 		size_read += len;
2358 		temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2359 		if (!temp) {
2360 			ret = ENOMEM;
2361 			break;
2362 		}
2363 		memcpy(temp, nvm_buffer, len);
2364 
2365 		nvm_sections[section].data = temp;
2366 		nvm_sections[section].length = len;
2367 	}
2368 	if (!size_read)
2369 		device_printf(sc->sc_dev, "OTP is blank\n");
2370 	kfree(nvm_buffer, M_DEVBUF);
2371 
2372 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2373 	if (!sc->nvm_data)
2374 		return EINVAL;
2375 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2376 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2377 
2378 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2379 		if (nvm_sections[i].data != NULL)
2380 			kfree(nvm_sections[i].data, M_DEVBUF);
2381 	}
2382 
2383 	return 0;
2384 }
2385 
2386 static int
2387 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2388 	const struct iwm_fw_desc *section)
2389 {
2390 	struct iwm_dma_info *dma = &sc->fw_dma;
2391 	uint8_t *v_addr;
2392 	bus_addr_t p_addr;
2393 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2394 	int ret = 0;
2395 
2396 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2397 		    "%s: [%d] uCode section being loaded...\n",
2398 		    __func__, section_num);
2399 
2400 	v_addr = dma->vaddr;
2401 	p_addr = dma->paddr;
2402 
2403 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2404 		uint32_t copy_size, dst_addr;
2405 		int extended_addr = FALSE;
2406 
2407 		copy_size = MIN(chunk_sz, section->len - offset);
2408 		dst_addr = section->offset + offset;
2409 
2410 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2411 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2412 			extended_addr = TRUE;
2413 
2414 		if (extended_addr)
2415 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2416 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2417 
2418 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2419 		    copy_size);
2420 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2421 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2422 						   copy_size);
2423 
2424 		if (extended_addr)
2425 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2426 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2427 
2428 		if (ret) {
2429 			device_printf(sc->sc_dev,
2430 			    "%s: Could not load the [%d] uCode section\n",
2431 			    __func__, section_num);
2432 			break;
2433 		}
2434 	}
2435 
2436 	return ret;
2437 }
2438 
2439 /*
2440  * ucode
2441  */
2442 static int
2443 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2444 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2445 {
2446 	int ret;
2447 
2448 	sc->sc_fw_chunk_done = 0;
2449 
2450 	if (!iwm_nic_lock(sc))
2451 		return EBUSY;
2452 
2453 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2454 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2455 
2456 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2457 	    dst_addr);
2458 
2459 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2460 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2461 
2462 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2463 	    (iwm_get_dma_hi_addr(phy_addr)
2464 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2465 
2466 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2467 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2468 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2469 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2470 
2471 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2472 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2473 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2474 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2475 
2476 	iwm_nic_unlock(sc);
2477 
2478 	/* wait up to 5s for this segment to load */
2479 	ret = 0;
2480 	while (!sc->sc_fw_chunk_done) {
2481 #if defined(__DragonFly__)
2482 		ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2483 #else
2484 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2485 #endif
2486 		if (ret)
2487 			break;
2488 	}
2489 
2490 	if (ret != 0) {
2491 		device_printf(sc->sc_dev,
2492 		    "fw chunk addr 0x%x len %d failed to load\n",
2493 		    dst_addr, byte_cnt);
2494 		return ETIMEDOUT;
2495 	}
2496 
2497 	return 0;
2498 }
2499 
2500 static int
2501 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2502 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2503 {
2504 	int shift_param;
2505 	int i, ret = 0, sec_num = 0x1;
2506 	uint32_t val, last_read_idx = 0;
2507 
2508 	if (cpu == 1) {
2509 		shift_param = 0;
2510 		*first_ucode_section = 0;
2511 	} else {
2512 		shift_param = 16;
2513 		(*first_ucode_section)++;
2514 	}
2515 
2516 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2517 		last_read_idx = i;
2518 
2519 		/*
2520 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2521 		 * CPU1 to CPU2.
2522 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2523 		 * CPU2 non paged to CPU2 paging sec.
2524 		 */
2525 		if (!image->sec[i].data ||
2526 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2527 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2528 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2529 				    "Break since Data not valid or Empty section, sec = %d\n",
2530 				    i);
2531 			break;
2532 		}
2533 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2534 		if (ret)
2535 			return ret;
2536 
2537 		/* Notify the ucode of the loaded section number and status */
2538 		if (iwm_nic_lock(sc)) {
2539 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2540 			val = val | (sec_num << shift_param);
2541 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2542 			sec_num = (sec_num << 1) | 0x1;
2543 			iwm_nic_unlock(sc);
2544 		}
2545 	}
2546 
2547 	*first_ucode_section = last_read_idx;
2548 
2549 	iwm_enable_interrupts(sc);
2550 
2551 	if (iwm_nic_lock(sc)) {
2552 		if (cpu == 1)
2553 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2554 		else
2555 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2556 		iwm_nic_unlock(sc);
2557 	}
2558 
2559 	return 0;
2560 }
2561 
2562 static int
2563 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2564 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2565 {
2566 	int shift_param;
2567 	int i, ret = 0;
2568 	uint32_t last_read_idx = 0;
2569 
2570 	if (cpu == 1) {
2571 		shift_param = 0;
2572 		*first_ucode_section = 0;
2573 	} else {
2574 		shift_param = 16;
2575 		(*first_ucode_section)++;
2576 	}
2577 
2578 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2579 		last_read_idx = i;
2580 
2581 		/*
2582 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2583 		 * CPU1 to CPU2.
2584 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2585 		 * CPU2 non paged to CPU2 paging sec.
2586 		 */
2587 		if (!image->sec[i].data ||
2588 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2589 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2590 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2591 				    "Break since Data not valid or Empty section, sec = %d\n",
2592 				     i);
2593 			break;
2594 		}
2595 
2596 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2597 		if (ret)
2598 			return ret;
2599 	}
2600 
2601 	*first_ucode_section = last_read_idx;
2602 
2603 	return 0;
2604 
2605 }
2606 
2607 static int
2608 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2609 {
2610 	int ret = 0;
2611 	int first_ucode_section;
2612 
2613 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2614 		     image->is_dual_cpus ? "Dual" : "Single");
2615 
2616 	/* load to FW the binary non secured sections of CPU1 */
2617 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2618 	if (ret)
2619 		return ret;
2620 
2621 	if (image->is_dual_cpus) {
2622 		/* set CPU2 header address */
2623 		if (iwm_nic_lock(sc)) {
2624 			iwm_write_prph(sc,
2625 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2626 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2627 			iwm_nic_unlock(sc);
2628 		}
2629 
2630 		/* load to FW the binary sections of CPU2 */
2631 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2632 						 &first_ucode_section);
2633 		if (ret)
2634 			return ret;
2635 	}
2636 
2637 	iwm_enable_interrupts(sc);
2638 
2639 	/* release CPU reset */
2640 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2641 
2642 	return 0;
2643 }
2644 
2645 int
2646 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2647 	const struct iwm_fw_img *image)
2648 {
2649 	int ret = 0;
2650 	int first_ucode_section;
2651 
2652 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2653 		    image->is_dual_cpus ? "Dual" : "Single");
2654 
2655 	/* configure the ucode to be ready to get the secured image */
2656 	/* release CPU reset */
2657 	if (iwm_nic_lock(sc)) {
2658 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2659 		    IWM_RELEASE_CPU_RESET_BIT);
2660 		iwm_nic_unlock(sc);
2661 	}
2662 
2663 	/* load to FW the binary Secured sections of CPU1 */
2664 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2665 	    &first_ucode_section);
2666 	if (ret)
2667 		return ret;
2668 
2669 	/* load to FW the binary sections of CPU2 */
2670 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2671 	    &first_ucode_section);
2672 }
2673 
2674 /* XXX Get rid of this definition */
2675 static inline void
2676 iwm_enable_fw_load_int(struct iwm_softc *sc)
2677 {
2678 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2679 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2680 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2681 }
2682 
2683 /* XXX Add proper rfkill support code */
2684 static int
2685 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2686 {
2687 	int ret;
2688 
2689 	/* This may fail if AMT took ownership of the device */
2690 	if (iwm_prepare_card_hw(sc)) {
2691 		device_printf(sc->sc_dev,
2692 		    "%s: Exit HW not ready\n", __func__);
2693 		ret = EIO;
2694 		goto out;
2695 	}
2696 
2697 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2698 
2699 	iwm_disable_interrupts(sc);
2700 
2701 	/* make sure rfkill handshake bits are cleared */
2702 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2703 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2704 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2705 
2706 	/* clear (again), then enable host interrupts */
2707 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2708 
2709 	ret = iwm_nic_init(sc);
2710 	if (ret) {
2711 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2712 		goto out;
2713 	}
2714 
2715 	/*
2716 	 * Now, we load the firmware and don't want to be interrupted, even
2717 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2718 	 * FH_TX interrupt which is needed to load the firmware). If the
2719 	 * RF-Kill switch is toggled, we will find out after having loaded
2720 	 * the firmware and return the proper value to the caller.
2721 	 */
2722 	iwm_enable_fw_load_int(sc);
2723 
2724 	/* really make sure rfkill handshake bits are cleared */
2725 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2726 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2727 
2728 	/* Load the given image to the HW */
2729 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2730 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2731 	else
2732 		ret = iwm_pcie_load_given_ucode(sc, fw);
2733 
2734 	/* XXX re-check RF-Kill state */
2735 
2736 out:
2737 	return ret;
2738 }
2739 
2740 static int
2741 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2742 {
2743 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2744 		.valid = htole32(valid_tx_ant),
2745 	};
2746 
2747 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2748 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2749 }
2750 
2751 static int
2752 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2753 {
2754 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2755 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2756 
2757 	/* Set parameters */
2758 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2759 	phy_cfg_cmd.calib_control.event_trigger =
2760 	    sc->sc_default_calib[ucode_type].event_trigger;
2761 	phy_cfg_cmd.calib_control.flow_trigger =
2762 	    sc->sc_default_calib[ucode_type].flow_trigger;
2763 
2764 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2765 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2766 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2767 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2768 }
2769 
2770 static int
2771 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2772 {
2773 	struct iwm_mvm_alive_data *alive_data = data;
2774 	struct iwm_mvm_alive_resp_v3 *palive3;
2775 	struct iwm_mvm_alive_resp *palive;
2776 	struct iwm_umac_alive *umac;
2777 	struct iwm_lmac_alive *lmac1;
2778 	struct iwm_lmac_alive *lmac2 = NULL;
2779 	uint16_t status;
2780 
2781 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2782 		palive = (void *)pkt->data;
2783 		umac = &palive->umac_data;
2784 		lmac1 = &palive->lmac_data[0];
2785 		lmac2 = &palive->lmac_data[1];
2786 		status = le16toh(palive->status);
2787 	} else {
2788 		palive3 = (void *)pkt->data;
2789 		umac = &palive3->umac_data;
2790 		lmac1 = &palive3->lmac_data;
2791 		status = le16toh(palive3->status);
2792 	}
2793 
2794 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2795 	if (lmac2)
2796 		sc->error_event_table[1] =
2797 			le32toh(lmac2->error_event_table_ptr);
2798 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2799 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2800 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2801 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2802 	if (sc->umac_error_event_table)
2803 		sc->support_umac_log = TRUE;
2804 
2805 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2806 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2807 		    status, lmac1->ver_type, lmac1->ver_subtype);
2808 
2809 	if (lmac2)
2810 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2811 
2812 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2813 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2814 		    le32toh(umac->umac_major),
2815 		    le32toh(umac->umac_minor));
2816 
2817 	return TRUE;
2818 }
2819 
2820 static int
2821 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2822 	struct iwm_rx_packet *pkt, void *data)
2823 {
2824 	struct iwm_phy_db *phy_db = data;
2825 
2826 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2827 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2828 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2829 			    __func__, pkt->hdr.code);
2830 		}
2831 		return TRUE;
2832 	}
2833 
2834 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2835 		device_printf(sc->sc_dev,
2836 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2837 	}
2838 
2839 	return FALSE;
2840 }
2841 
2842 static int
2843 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2844 	enum iwm_ucode_type ucode_type)
2845 {
2846 	struct iwm_notification_wait alive_wait;
2847 	struct iwm_mvm_alive_data alive_data;
2848 	const struct iwm_fw_img *fw;
2849 	enum iwm_ucode_type old_type = sc->cur_ucode;
2850 	int error;
2851 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2852 
2853 	fw = &sc->sc_fw.img[ucode_type];
2854 	sc->cur_ucode = ucode_type;
2855 	sc->ucode_loaded = FALSE;
2856 
2857 	memset(&alive_data, 0, sizeof(alive_data));
2858 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2859 				   alive_cmd, NELEM(alive_cmd),
2860 				   iwm_alive_fn, &alive_data);
2861 
2862 	error = iwm_start_fw(sc, fw);
2863 	if (error) {
2864 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2865 		sc->cur_ucode = old_type;
2866 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2867 		return error;
2868 	}
2869 
2870 	/*
2871 	 * Some things may run in the background now, but we
2872 	 * just wait for the ALIVE notification here.
2873 	 */
2874 	IWM_UNLOCK(sc);
2875 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2876 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2877 	IWM_LOCK(sc);
2878 	if (error) {
2879 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2880 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2881 			if (iwm_nic_lock(sc)) {
2882 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2883 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2884 				iwm_nic_unlock(sc);
2885 			}
2886 			device_printf(sc->sc_dev,
2887 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2888 			    a, b);
2889 		}
2890 		sc->cur_ucode = old_type;
2891 		return error;
2892 	}
2893 
2894 	if (!alive_data.valid) {
2895 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2896 		    __func__);
2897 		sc->cur_ucode = old_type;
2898 		return EIO;
2899 	}
2900 
2901 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2902 
2903 	/*
2904 	 * configure and operate fw paging mechanism.
2905 	 * driver configures the paging flow only once, CPU2 paging image
2906 	 * included in the IWM_UCODE_INIT image.
2907 	 */
2908 	if (fw->paging_mem_size) {
2909 		error = iwm_save_fw_paging(sc, fw);
2910 		if (error) {
2911 			device_printf(sc->sc_dev,
2912 			    "%s: failed to save the FW paging image\n",
2913 			    __func__);
2914 			return error;
2915 		}
2916 
2917 		error = iwm_send_paging_cmd(sc, fw);
2918 		if (error) {
2919 			device_printf(sc->sc_dev,
2920 			    "%s: failed to send the paging cmd\n", __func__);
2921 			iwm_free_fw_paging(sc);
2922 			return error;
2923 		}
2924 	}
2925 
2926 	if (!error)
2927 		sc->ucode_loaded = TRUE;
2928 	return error;
2929 }
2930 
2931 /*
2932  * mvm misc bits
2933  */
2934 
2935 static int
2936 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2937 {
2938 	struct iwm_notification_wait calib_wait;
2939 	static const uint16_t init_complete[] = {
2940 		IWM_INIT_COMPLETE_NOTIF,
2941 		IWM_CALIB_RES_NOTIF_PHY_DB
2942 	};
2943 	int ret;
2944 
2945 	/* do not operate with rfkill switch turned on */
2946 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2947 		device_printf(sc->sc_dev,
2948 		    "radio is disabled by hardware switch\n");
2949 		return EPERM;
2950 	}
2951 
2952 	iwm_init_notification_wait(sc->sc_notif_wait,
2953 				   &calib_wait,
2954 				   init_complete,
2955 				   NELEM(init_complete),
2956 				   iwm_wait_phy_db_entry,
2957 				   sc->sc_phy_db);
2958 
2959 	/* Will also start the device */
2960 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2961 	if (ret) {
2962 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2963 		    ret);
2964 		goto error;
2965 	}
2966 
2967 	if (justnvm) {
2968 		/* Read nvm */
2969 		ret = iwm_nvm_init(sc);
2970 		if (ret) {
2971 			device_printf(sc->sc_dev, "failed to read nvm\n");
2972 			goto error;
2973 		}
2974 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2975 		goto error;
2976 	}
2977 
2978 	ret = iwm_send_bt_init_conf(sc);
2979 	if (ret) {
2980 		device_printf(sc->sc_dev,
2981 		    "failed to send bt coex configuration: %d\n", ret);
2982 		goto error;
2983 	}
2984 
2985 	/* Send TX valid antennas before triggering calibrations */
2986 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2987 	if (ret) {
2988 		device_printf(sc->sc_dev,
2989 		    "failed to send antennas before calibration: %d\n", ret);
2990 		goto error;
2991 	}
2992 
2993 	/*
2994 	 * Send phy configurations command to init uCode
2995 	 * to start the 16.0 uCode init image internal calibrations.
2996 	 */
2997 	ret = iwm_send_phy_cfg_cmd(sc);
2998 	if (ret) {
2999 		device_printf(sc->sc_dev,
3000 		    "%s: Failed to run INIT calibrations: %d\n",
3001 		    __func__, ret);
3002 		goto error;
3003 	}
3004 
3005 	/*
3006 	 * Nothing to do but wait for the init complete notification
3007 	 * from the firmware.
3008 	 */
3009 	IWM_UNLOCK(sc);
3010 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3011 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3012 	IWM_LOCK(sc);
3013 
3014 
3015 	goto out;
3016 
3017 error:
3018 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3019 out:
3020 	return ret;
3021 }
3022 
3023 static int
3024 iwm_mvm_config_ltr(struct iwm_softc *sc)
3025 {
3026 	struct iwm_ltr_config_cmd cmd = {
3027 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3028 	};
3029 
3030 	if (!sc->sc_ltr_enabled)
3031 		return 0;
3032 
3033 	return iwm_mvm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3034 }
3035 
3036 /*
3037  * receive side
3038  */
3039 
3040 /* (re)stock rx ring, called at init-time and at runtime */
3041 static int
3042 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3043 {
3044 	struct iwm_rx_ring *ring = &sc->rxq;
3045 	struct iwm_rx_data *data = &ring->data[idx];
3046 	struct mbuf *m;
3047 	bus_dmamap_t dmamap;
3048 	bus_dma_segment_t seg;
3049 	int nsegs, error;
3050 
3051 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3052 	if (m == NULL)
3053 		return ENOBUFS;
3054 
3055 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3056 #if defined(__DragonFly__)
3057 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3058 	    m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3059 #else
3060 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3061 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3062 #endif
3063 	if (error != 0) {
3064 		device_printf(sc->sc_dev,
3065 		    "%s: can't map mbuf, error %d\n", __func__, error);
3066 		m_freem(m);
3067 		return error;
3068 	}
3069 
3070 	if (data->m != NULL)
3071 		bus_dmamap_unload(ring->data_dmat, data->map);
3072 
3073 	/* Swap ring->spare_map with data->map */
3074 	dmamap = data->map;
3075 	data->map = ring->spare_map;
3076 	ring->spare_map = dmamap;
3077 
3078 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3079 	data->m = m;
3080 
3081 	/* Update RX descriptor. */
3082 	KKASSERT((seg.ds_addr & 255) == 0);
3083 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3084 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3085 	    BUS_DMASYNC_PREWRITE);
3086 
3087 	return 0;
3088 }
3089 
3090 /*
3091  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3092  * values are reported by the fw as positive values - need to negate
3093  * to obtain their dBM.  Account for missing antennas by replacing 0
3094  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3095  */
3096 static int
3097 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3098 {
3099 	int energy_a, energy_b, energy_c, max_energy;
3100 	uint32_t val;
3101 
3102 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3103 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3104 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3105 	energy_a = energy_a ? -energy_a : -256;
3106 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3107 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3108 	energy_b = energy_b ? -energy_b : -256;
3109 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3110 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3111 	energy_c = energy_c ? -energy_c : -256;
3112 	max_energy = MAX(energy_a, energy_b);
3113 	max_energy = MAX(max_energy, energy_c);
3114 
3115 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3116 	    "energy In A %d B %d C %d , and max %d\n",
3117 	    energy_a, energy_b, energy_c, max_energy);
3118 
3119 	return max_energy;
3120 }
3121 
3122 static void
3123 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3124 {
3125 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3126 
3127 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3128 
3129 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3130 }
3131 
3132 /*
3133  * Retrieve the average noise (in dBm) among receivers.
3134  */
3135 static int
3136 iwm_get_noise(struct iwm_softc *sc,
3137 	const struct iwm_mvm_statistics_rx_non_phy *stats)
3138 {
3139 	int i, total, nbant, noise;
3140 
3141 	total = nbant = noise = 0;
3142 	for (i = 0; i < 3; i++) {
3143 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3144 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3145 		    __func__, i, noise);
3146 
3147 		if (noise) {
3148 			total += noise;
3149 			nbant++;
3150 		}
3151 	}
3152 
3153 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3154 	    __func__, nbant, total);
3155 #if 0
3156 	/* There should be at least one antenna but check anyway. */
3157 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3158 #else
3159 	/* For now, just hard-code it to -96 to be safe */
3160 	return (-96);
3161 #endif
3162 }
3163 
3164 static void
3165 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3166 {
3167 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3168 
3169 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3170 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3171 }
3172 
3173 /*
3174  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3175  *
3176  * Handles the actual data of the Rx packet from the fw
3177  */
3178 static boolean_t
3179 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3180 	boolean_t stolen)
3181 {
3182 	struct ieee80211com *ic = &sc->sc_ic;
3183 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3184 	struct ieee80211_frame *wh;
3185 	struct ieee80211_node *ni;
3186 	struct ieee80211_rx_stats rxs;
3187 	struct iwm_rx_phy_info *phy_info;
3188 	struct iwm_rx_mpdu_res_start *rx_res;
3189 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3190 	uint32_t len;
3191 	uint32_t rx_pkt_status;
3192 	int rssi;
3193 
3194 	phy_info = &sc->sc_last_phy_info;
3195 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3196 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3197 	len = le16toh(rx_res->byte_count);
3198 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3199 
3200 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3201 		device_printf(sc->sc_dev,
3202 		    "dsp size out of range [0,20]: %d\n",
3203 		    phy_info->cfg_phy_cnt);
3204 		return FALSE;
3205 	}
3206 
3207 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3208 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3209 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3210 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3211 		return FALSE; /* drop */
3212 	}
3213 
3214 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3215 	/* Note: RSSI is absolute (ie a -ve value) */
3216 	if (rssi < IWM_MIN_DBM)
3217 		rssi = IWM_MIN_DBM;
3218 	else if (rssi > IWM_MAX_DBM)
3219 		rssi = IWM_MAX_DBM;
3220 
3221 	/* Map it to relative value */
3222 	rssi = rssi - sc->sc_noise;
3223 
3224 	/* replenish ring for the buffer we're going to feed to the sharks */
3225 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3226 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3227 		    __func__);
3228 		return FALSE;
3229 	}
3230 
3231 	m->m_data = pkt->data + sizeof(*rx_res);
3232 	m->m_pkthdr.len = m->m_len = len;
3233 
3234 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3235 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3236 
3237 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3238 
3239 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3240 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3241 	    __func__,
3242 	    le16toh(phy_info->channel),
3243 	    le16toh(phy_info->phy_flags));
3244 
3245 	/*
3246 	 * Populate an RX state struct with the provided information.
3247 	 */
3248 	bzero(&rxs, sizeof(rxs));
3249 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3250 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3251 	rxs.c_ieee = le16toh(phy_info->channel);
3252 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3253 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3254 	} else {
3255 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3256 	}
3257 	/* rssi is in 1/2db units */
3258 	rxs.rssi = rssi * 2;
3259 	rxs.nf = sc->sc_noise;
3260 
3261 	if (ieee80211_radiotap_active_vap(vap)) {
3262 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3263 
3264 		tap->wr_flags = 0;
3265 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3266 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3267 		tap->wr_chan_freq = htole16(rxs.c_freq);
3268 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3269 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3270 		tap->wr_dbm_antsignal = (int8_t)rssi;
3271 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3272 		tap->wr_tsft = phy_info->system_timestamp;
3273 		switch (phy_info->rate) {
3274 		/* CCK rates. */
3275 		case  10: tap->wr_rate =   2; break;
3276 		case  20: tap->wr_rate =   4; break;
3277 		case  55: tap->wr_rate =  11; break;
3278 		case 110: tap->wr_rate =  22; break;
3279 		/* OFDM rates. */
3280 		case 0xd: tap->wr_rate =  12; break;
3281 		case 0xf: tap->wr_rate =  18; break;
3282 		case 0x5: tap->wr_rate =  24; break;
3283 		case 0x7: tap->wr_rate =  36; break;
3284 		case 0x9: tap->wr_rate =  48; break;
3285 		case 0xb: tap->wr_rate =  72; break;
3286 		case 0x1: tap->wr_rate =  96; break;
3287 		case 0x3: tap->wr_rate = 108; break;
3288 		/* Unknown rate: should not happen. */
3289 		default:  tap->wr_rate =   0;
3290 		}
3291 	}
3292 
3293 	IWM_UNLOCK(sc);
3294 	if (ni != NULL) {
3295 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3296 		ieee80211_input_mimo(ni, m, &rxs);
3297 		ieee80211_free_node(ni);
3298 	} else {
3299 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3300 		ieee80211_input_mimo_all(ic, m, &rxs);
3301 	}
3302 	IWM_LOCK(sc);
3303 
3304 	return TRUE;
3305 }
3306 
3307 static int
3308 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3309 	struct iwm_node *in)
3310 {
3311 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3312 	struct ieee80211_node *ni = &in->in_ni;
3313 	struct ieee80211vap *vap = ni->ni_vap;
3314 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3315 	int failack = tx_resp->failure_frame;
3316 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3317 	boolean_t rate_matched;
3318 	uint8_t tx_resp_rate;
3319 	int ret;
3320 
3321 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3322 
3323 	/* Update rate control statistics. */
3324 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3325 	    __func__,
3326 	    (int) le16toh(tx_resp->status.status),
3327 	    (int) le16toh(tx_resp->status.sequence),
3328 	    tx_resp->frame_count,
3329 	    tx_resp->bt_kill_count,
3330 	    tx_resp->failure_rts,
3331 	    tx_resp->failure_frame,
3332 	    le32toh(tx_resp->initial_rate),
3333 	    (int) le16toh(tx_resp->wireless_media_time));
3334 
3335 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3336 
3337 	/* For rate control, ignore frames sent at different initial rate */
3338 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3339 
3340 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3341 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3342 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3343 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3344 	}
3345 
3346 	if (status != IWM_TX_STATUS_SUCCESS &&
3347 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3348 		if (rate_matched) {
3349 			ieee80211_ratectl_tx_complete(vap, ni,
3350 			    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3351 		}
3352 		ret = 1;
3353 	} else {
3354 		if (rate_matched) {
3355 			ieee80211_ratectl_tx_complete(vap, ni,
3356 			    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3357 		}
3358 		ret = 0;
3359 	}
3360 
3361 	if (rate_matched) {
3362 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3363 		new_rate = vap->iv_bss->ni_txrate;
3364 		if (new_rate != 0 && new_rate != cur_rate) {
3365 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3366 			iwm_setrates(sc, in, rix);
3367 			iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3368 		}
3369 	}
3370 
3371 	return ret;
3372 }
3373 
3374 static void
3375 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3376 {
3377 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3378 	int idx = cmd_hdr->idx;
3379 	int qid = cmd_hdr->qid;
3380 	struct iwm_tx_ring *ring = &sc->txq[qid];
3381 	struct iwm_tx_data *txd = &ring->data[idx];
3382 	struct iwm_node *in = txd->in;
3383 	struct mbuf *m = txd->m;
3384 	int status;
3385 
3386 	KASSERT(txd->done == 0, ("txd not done"));
3387 	KASSERT(txd->in != NULL, ("txd without node"));
3388 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3389 
3390 	sc->sc_tx_timer = 0;
3391 
3392 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3393 
3394 	/* Unmap and free mbuf. */
3395 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3396 	bus_dmamap_unload(ring->data_dmat, txd->map);
3397 
3398 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3399 	    "free txd %p, in %p\n", txd, txd->in);
3400 	txd->done = 1;
3401 	txd->m = NULL;
3402 	txd->in = NULL;
3403 
3404 	ieee80211_tx_complete(&in->in_ni, m, status);
3405 
3406 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3407 		sc->qfullmsk &= ~(1 << ring->qid);
3408 		if (sc->qfullmsk == 0) {
3409 			iwm_start(sc);
3410 		}
3411 	}
3412 }
3413 
3414 /*
3415  * transmit side
3416  */
3417 
3418 /*
3419  * Process a "command done" firmware notification.  This is where we wakeup
3420  * processes waiting for a synchronous command completion.
3421  * from if_iwn
3422  */
3423 static void
3424 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3425 {
3426 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3427 	struct iwm_tx_data *data;
3428 
3429 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3430 		return;	/* Not a command ack. */
3431 	}
3432 
3433 	data = &ring->data[pkt->hdr.idx];
3434 
3435 	/* If the command was mapped in an mbuf, free it. */
3436 	if (data->m != NULL) {
3437 		bus_dmamap_sync(ring->data_dmat, data->map,
3438 		    BUS_DMASYNC_POSTWRITE);
3439 		bus_dmamap_unload(ring->data_dmat, data->map);
3440 		m_freem(data->m);
3441 		data->m = NULL;
3442 	}
3443 	wakeup(&ring->desc[pkt->hdr.idx]);
3444 
3445 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3446 		device_printf(sc->sc_dev,
3447 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3448 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3449 		/* XXX call iwm_force_nmi() */
3450 	}
3451 
3452 	KKASSERT(ring->queued > 0);
3453 	ring->queued--;
3454 	if (ring->queued == 0)
3455 		iwm_pcie_clear_cmd_in_flight(sc);
3456 }
3457 
3458 #if 0
3459 /*
3460  * necessary only for block ack mode
3461  */
3462 void
3463 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3464 	uint16_t len)
3465 {
3466 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3467 	uint16_t w_val;
3468 
3469 	scd_bc_tbl = sc->sched_dma.vaddr;
3470 
3471 	len += 8; /* magic numbers came naturally from paris */
3472 	len = roundup(len, 4) / 4;
3473 
3474 	w_val = htole16(sta_id << 12 | len);
3475 
3476 	/* Update TX scheduler. */
3477 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3478 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3479 	    BUS_DMASYNC_PREWRITE);
3480 
3481 	/* I really wonder what this is ?!? */
3482 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3483 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3484 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3485 		    BUS_DMASYNC_PREWRITE);
3486 	}
3487 }
3488 #endif
3489 
3490 /*
3491  * Fill in the rate related information for a transmit command.
3492  */
3493 static uint8_t
3494 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3495 	struct mbuf *m, struct iwm_tx_cmd *tx)
3496 {
3497 	struct ieee80211com *ic = &sc->sc_ic;
3498 	struct ieee80211_node *ni = &in->in_ni;
3499 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3500 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3501 	const struct iwm_rate *rinfo;
3502 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3503 	int ridx, rate_flags;
3504 
3505 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3506 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3507 
3508 	if (type == IEEE80211_FC0_TYPE_MGT) {
3509 		ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3510 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3511 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3512 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3513                 ridx = iwm_rate2ridx(sc, tp->mcastrate);
3514 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3515 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3516         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3517                 ridx = iwm_rate2ridx(sc, tp->ucastrate);
3518 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3519 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3520         } else if (m->m_flags & M_EAPOL) {
3521                 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3522 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3523 		    "%s: EAPOL (%d)\n", __func__, tp->mgmtrate);
3524 	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3525 		/* This is the index into the programmed table */
3526 		tx->initial_rate_index = 0;
3527 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3528 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA (%d)\n",
3529 		    __func__, ni->ni_txrate);
3530 		return ni->ni_txrate;
3531 	} else {
3532 		ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3533 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3534 		    "%s: DEFAULT (%d)\n", __func__, tp->mgmtrate);
3535 	}
3536 
3537 	/*
3538 	 * Sanity check ridx, and provide fallback. If the rate lookup
3539 	 * ever fails, iwm_rate2ridx() will already print an error message.
3540 	 */
3541 	if (ridx < 0 || ridx > IWM_RIDX_MAX) {
3542 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
3543 			/*
3544 			 * XXX this assumes the mode is either 11a or not 11a;
3545 			 * definitely won't work for 11n.
3546 			 */
3547 			ridx = IWM_RIDX_OFDM;
3548 		} else {
3549 			ridx = IWM_RIDX_CCK;
3550 		}
3551 	}
3552 
3553 	rinfo = &iwm_rates[ridx];
3554 
3555 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3556 	    "%s: frame type=%d, ridx=%d, rate=%d, CCK=%d\n",
3557 	    __func__, type, ridx, rinfo->rate, !! (IWM_RIDX_IS_CCK(ridx)));
3558 
3559 	/* XXX TODO: hard-coded TX antenna? */
3560 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3561 	if (IWM_RIDX_IS_CCK(ridx))
3562 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3563 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3564 
3565 	return rinfo->rate;
3566 }
3567 
3568 #define TB0_SIZE 16
3569 static int
3570 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3571 {
3572 	struct ieee80211com *ic = &sc->sc_ic;
3573 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3574 	struct iwm_node *in = IWM_NODE(ni);
3575 	struct iwm_tx_ring *ring;
3576 	struct iwm_tx_data *data;
3577 	struct iwm_tfd *desc;
3578 	struct iwm_device_cmd *cmd;
3579 	struct iwm_tx_cmd *tx;
3580 	struct ieee80211_frame *wh;
3581 	struct ieee80211_key *k = NULL;
3582 #if !defined(__DragonFly__)
3583 	struct mbuf *m1;
3584 #endif
3585 	uint32_t flags;
3586 	u_int hdrlen;
3587 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3588 	int nsegs;
3589 	uint8_t rate, tid, type;
3590 	int i, totlen, error, pad;
3591 
3592 	wh = mtod(m, struct ieee80211_frame *);
3593 	hdrlen = ieee80211_anyhdrsize(wh);
3594 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3595 	tid = 0;
3596 	ring = &sc->txq[ac];
3597 	desc = &ring->desc[ring->cur];
3598 	memset(desc, 0, sizeof(*desc));
3599 	data = &ring->data[ring->cur];
3600 
3601 	/* Fill out iwm_tx_cmd to send to the firmware */
3602 	cmd = &ring->cmd[ring->cur];
3603 	cmd->hdr.code = IWM_TX_CMD;
3604 	cmd->hdr.flags = 0;
3605 	cmd->hdr.qid = ring->qid;
3606 	cmd->hdr.idx = ring->cur;
3607 
3608 	tx = (void *)cmd->data;
3609 	memset(tx, 0, sizeof(*tx));
3610 
3611 	rate = iwm_tx_fill_cmd(sc, in, m, tx);
3612 
3613 	/* Encrypt the frame if need be. */
3614 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3615 		/* Retrieve key for TX && do software encryption. */
3616 		k = ieee80211_crypto_encap(ni, m);
3617 		if (k == NULL) {
3618 			m_freem(m);
3619 			return (ENOBUFS);
3620 		}
3621 		/* 802.11 header may have moved. */
3622 		wh = mtod(m, struct ieee80211_frame *);
3623 	}
3624 
3625 	if (ieee80211_radiotap_active_vap(vap)) {
3626 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3627 
3628 		tap->wt_flags = 0;
3629 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3630 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3631 		tap->wt_rate = rate;
3632 		if (k != NULL)
3633 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3634 		ieee80211_radiotap_tx(vap, m);
3635 	}
3636 
3637 
3638 	totlen = m->m_pkthdr.len;
3639 
3640 	flags = 0;
3641 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3642 		flags |= IWM_TX_CMD_FLG_ACK;
3643 	}
3644 
3645 	if (type == IEEE80211_FC0_TYPE_DATA
3646 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3647 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3648 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3649 	}
3650 
3651 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3652 	    type != IEEE80211_FC0_TYPE_DATA)
3653 		tx->sta_id = sc->sc_aux_sta.sta_id;
3654 	else
3655 		tx->sta_id = IWM_STATION_ID;
3656 
3657 	if (type == IEEE80211_FC0_TYPE_MGT) {
3658 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3659 
3660 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3661 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3662 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3663 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3664 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3665 		} else {
3666 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3667 		}
3668 	} else {
3669 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3670 	}
3671 
3672 	if (hdrlen & 3) {
3673 		/* First segment length must be a multiple of 4. */
3674 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3675 		pad = 4 - (hdrlen & 3);
3676 	} else
3677 		pad = 0;
3678 
3679 	tx->driver_txop = 0;
3680 	tx->next_frame_len = 0;
3681 
3682 	tx->len = htole16(totlen);
3683 	tx->tid_tspec = tid;
3684 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3685 
3686 	/* Set physical address of "scratch area". */
3687 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3688 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3689 
3690 	/* Copy 802.11 header in TX command. */
3691 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3692 
3693 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3694 
3695 	tx->sec_ctl = 0;
3696 	tx->tx_flags |= htole32(flags);
3697 
3698 	/* Trim 802.11 header. */
3699 	m_adj(m, hdrlen);
3700 #if defined(__DragonFly__)
3701 	error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3702 					    segs, IWM_MAX_SCATTER - 2,
3703 					    &nsegs, BUS_DMA_NOWAIT);
3704 #else
3705 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3706 	    segs, &nsegs, BUS_DMA_NOWAIT);
3707 #endif
3708 	if (error != 0) {
3709 #if defined(__DragonFly__)
3710 		device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3711 		    error);
3712 		m_freem(m);
3713 		return error;
3714 #else
3715 		if (error != EFBIG) {
3716 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3717 			    error);
3718 			m_freem(m);
3719 			return error;
3720 		}
3721 		/* Too many DMA segments, linearize mbuf. */
3722 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3723 		if (m1 == NULL) {
3724 			device_printf(sc->sc_dev,
3725 			    "%s: could not defrag mbuf\n", __func__);
3726 			m_freem(m);
3727 			return (ENOBUFS);
3728 		}
3729 		m = m1;
3730 
3731 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3732 		    segs, &nsegs, BUS_DMA_NOWAIT);
3733 		if (error != 0) {
3734 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3735 			    error);
3736 			m_freem(m);
3737 			return error;
3738 		}
3739 #endif
3740 	}
3741 	data->m = m;
3742 	data->in = in;
3743 	data->done = 0;
3744 
3745 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3746 	    "sending txd %p, in %p\n", data, data->in);
3747 	KASSERT(data->in != NULL, ("node is NULL"));
3748 
3749 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3750 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3751 	    ring->qid, ring->cur, totlen, nsegs,
3752 	    le32toh(tx->tx_flags),
3753 	    le32toh(tx->rate_n_flags),
3754 	    tx->initial_rate_index
3755 	    );
3756 
3757 	/* Fill TX descriptor. */
3758 	desc->num_tbs = 2 + nsegs;
3759 
3760 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3761 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3762 	    (TB0_SIZE << 4);
3763 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3764 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3765 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3766 	      + hdrlen + pad - TB0_SIZE) << 4);
3767 
3768 	/* Other DMA segments are for data payload. */
3769 	for (i = 0; i < nsegs; i++) {
3770 		seg = &segs[i];
3771 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3772 		desc->tbs[i+2].hi_n_len = \
3773 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3774 		    | ((seg->ds_len) << 4);
3775 	}
3776 
3777 	bus_dmamap_sync(ring->data_dmat, data->map,
3778 	    BUS_DMASYNC_PREWRITE);
3779 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3780 	    BUS_DMASYNC_PREWRITE);
3781 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3782 	    BUS_DMASYNC_PREWRITE);
3783 
3784 #if 0
3785 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3786 #endif
3787 
3788 	/* Kick TX ring. */
3789 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3790 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3791 
3792 	/* Mark TX ring as full if we reach a certain threshold. */
3793 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3794 		sc->qfullmsk |= 1 << ring->qid;
3795 	}
3796 
3797 	return 0;
3798 }
3799 
3800 static int
3801 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3802     const struct ieee80211_bpf_params *params)
3803 {
3804 	struct ieee80211com *ic = ni->ni_ic;
3805 	struct iwm_softc *sc = ic->ic_softc;
3806 	int error = 0;
3807 
3808 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3809 	    "->%s begin\n", __func__);
3810 
3811 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3812 		m_freem(m);
3813 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3814 		    "<-%s not RUNNING\n", __func__);
3815 		return (ENETDOWN);
3816         }
3817 
3818 	IWM_LOCK(sc);
3819 	/* XXX fix this */
3820         if (params == NULL) {
3821 		error = iwm_tx(sc, m, ni, 0);
3822 	} else {
3823 		error = iwm_tx(sc, m, ni, 0);
3824 	}
3825 	if (sc->sc_tx_timer == 0)
3826 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3827 	sc->sc_tx_timer = 5;
3828 	IWM_UNLOCK(sc);
3829 
3830         return (error);
3831 }
3832 
3833 /*
3834  * mvm/tx.c
3835  */
3836 
3837 /*
3838  * Note that there are transports that buffer frames before they reach
3839  * the firmware. This means that after flush_tx_path is called, the
3840  * queue might not be empty. The race-free way to handle this is to:
3841  * 1) set the station as draining
3842  * 2) flush the Tx path
3843  * 3) wait for the transport queues to be empty
3844  */
3845 int
3846 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3847 {
3848 	int ret;
3849 	struct iwm_tx_path_flush_cmd flush_cmd = {
3850 		.queues_ctl = htole32(tfd_msk),
3851 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3852 	};
3853 
3854 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3855 	    sizeof(flush_cmd), &flush_cmd);
3856 	if (ret)
3857                 device_printf(sc->sc_dev,
3858 		    "Flushing tx queue failed: %d\n", ret);
3859 	return ret;
3860 }
3861 
3862 static int
3863 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3864 {
3865 	struct iwm_time_quota_cmd cmd;
3866 	int i, idx, ret, num_active_macs, quota, quota_rem;
3867 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3868 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3869 	uint16_t id;
3870 
3871 	memset(&cmd, 0, sizeof(cmd));
3872 
3873 	/* currently, PHY ID == binding ID */
3874 	if (ivp) {
3875 		id = ivp->phy_ctxt->id;
3876 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3877 		colors[id] = ivp->phy_ctxt->color;
3878 
3879 		if (1)
3880 			n_ifs[id] = 1;
3881 	}
3882 
3883 	/*
3884 	 * The FW's scheduling session consists of
3885 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3886 	 * equally between all the bindings that require quota
3887 	 */
3888 	num_active_macs = 0;
3889 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3890 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3891 		num_active_macs += n_ifs[i];
3892 	}
3893 
3894 	quota = 0;
3895 	quota_rem = 0;
3896 	if (num_active_macs) {
3897 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3898 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3899 	}
3900 
3901 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3902 		if (colors[i] < 0)
3903 			continue;
3904 
3905 		cmd.quotas[idx].id_and_color =
3906 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3907 
3908 		if (n_ifs[i] <= 0) {
3909 			cmd.quotas[idx].quota = htole32(0);
3910 			cmd.quotas[idx].max_duration = htole32(0);
3911 		} else {
3912 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3913 			cmd.quotas[idx].max_duration = htole32(0);
3914 		}
3915 		idx++;
3916 	}
3917 
3918 	/* Give the remainder of the session to the first binding */
3919 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3920 
3921 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3922 	    sizeof(cmd), &cmd);
3923 	if (ret)
3924 		device_printf(sc->sc_dev,
3925 		    "%s: Failed to send quota: %d\n", __func__, ret);
3926 	return ret;
3927 }
3928 
3929 /*
3930  * ieee80211 routines
3931  */
3932 
3933 /*
3934  * Change to AUTH state in 80211 state machine.  Roughly matches what
3935  * Linux does in bss_info_changed().
3936  */
3937 static int
3938 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3939 {
3940 	struct ieee80211_node *ni;
3941 	struct iwm_node *in;
3942 	struct iwm_vap *iv = IWM_VAP(vap);
3943 	uint32_t duration;
3944 	int error;
3945 
3946 	/*
3947 	 * XXX i have a feeling that the vap node is being
3948 	 * freed from underneath us. Grr.
3949 	 */
3950 	ni = ieee80211_ref_node(vap->iv_bss);
3951 	in = IWM_NODE(ni);
3952 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3953 	    "%s: called; vap=%p, bss ni=%p\n",
3954 	    __func__,
3955 	    vap,
3956 	    ni);
3957 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
3958 	    __func__, ether_sprintf(ni->ni_bssid));
3959 
3960 	in->in_assoc = 0;
3961 	iv->iv_auth = 1;
3962 
3963 	/*
3964 	 * Firmware bug - it'll crash if the beacon interval is less
3965 	 * than 16. We can't avoid connecting at all, so refuse the
3966 	 * station state change, this will cause net80211 to abandon
3967 	 * attempts to connect to this AP, and eventually wpa_s will
3968 	 * blacklist the AP...
3969 	 */
3970 	if (ni->ni_intval < 16) {
3971 		device_printf(sc->sc_dev,
3972 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3973 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3974 		error = EINVAL;
3975 		goto out;
3976 	}
3977 
3978 	error = iwm_allow_mcast(vap, sc);
3979 	if (error) {
3980 		device_printf(sc->sc_dev,
3981 		    "%s: failed to set multicast\n", __func__);
3982 		goto out;
3983 	}
3984 
3985 	/*
3986 	 * This is where it deviates from what Linux does.
3987 	 *
3988 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3989 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3990 	 * and always does a mac_ctx_changed().
3991 	 *
3992 	 * The openbsd port doesn't attempt to do that - it reset things
3993 	 * at odd states and does the add here.
3994 	 *
3995 	 * So, until the state handling is fixed (ie, we never reset
3996 	 * the NIC except for a firmware failure, which should drag
3997 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3998 	 * contexts that are required), let's do a dirty hack here.
3999 	 */
4000 	if (iv->is_uploaded) {
4001 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4002 			device_printf(sc->sc_dev,
4003 			    "%s: failed to update MAC\n", __func__);
4004 			goto out;
4005 		}
4006 	} else {
4007 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4008 			device_printf(sc->sc_dev,
4009 			    "%s: failed to add MAC\n", __func__);
4010 			goto out;
4011 		}
4012 	}
4013 	sc->sc_firmware_state = 1;
4014 
4015 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4016 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4017 		device_printf(sc->sc_dev,
4018 		    "%s: failed update phy ctxt\n", __func__);
4019 		goto out;
4020 	}
4021 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4022 
4023 	if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4024 		device_printf(sc->sc_dev,
4025 		    "%s: binding update cmd\n", __func__);
4026 		goto out;
4027 	}
4028 	sc->sc_firmware_state = 2;
4029 	/*
4030 	 * Authentication becomes unreliable when powersaving is left enabled
4031 	 * here. Powersaving will be activated again when association has
4032 	 * finished or is aborted.
4033 	 */
4034 	iv->ps_disabled = TRUE;
4035 	error = iwm_mvm_power_update_mac(sc);
4036 	iv->ps_disabled = FALSE;
4037 	if (error != 0) {
4038 		device_printf(sc->sc_dev,
4039 		    "%s: failed to update power management\n",
4040 		    __func__);
4041 		goto out;
4042 	}
4043 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4044 		device_printf(sc->sc_dev,
4045 		    "%s: failed to add sta\n", __func__);
4046 		goto out;
4047 	}
4048 	sc->sc_firmware_state = 3;
4049 
4050 	/*
4051 	 * Prevent the FW from wandering off channel during association
4052 	 * by "protecting" the session with a time event.
4053 	 */
4054 	/* XXX duration is in units of TU, not MS */
4055 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4056 	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4057 
4058 	error = 0;
4059 out:
4060 	if (error != 0)
4061 		iv->iv_auth = 0;
4062 	ieee80211_free_node(ni);
4063 	return (error);
4064 }
4065 
4066 static struct ieee80211_node *
4067 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4068 {
4069 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4070 	    M_INTWAIT | M_ZERO);
4071 }
4072 
4073 static uint8_t
4074 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4075 {
4076 	uint8_t plcp = rate_n_flags & 0xff;
4077 	int i;
4078 
4079 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4080 		if (iwm_rates[i].plcp == plcp)
4081 			return iwm_rates[i].rate;
4082 	}
4083 	return 0;
4084 }
4085 
4086 uint8_t
4087 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4088 {
4089 	int i;
4090 	uint8_t rval;
4091 
4092 	for (i = 0; i < rs->rs_nrates; i++) {
4093 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4094 		if (rval == iwm_rates[ridx].rate)
4095 			return rs->rs_rates[i];
4096 	}
4097 
4098 	return 0;
4099 }
4100 
4101 static int
4102 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4103 {
4104 	int i;
4105 
4106 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4107 		if (iwm_rates[i].rate == rate)
4108 			return i;
4109 	}
4110 
4111 	device_printf(sc->sc_dev,
4112 	    "%s: WARNING: device rate for %u not found!\n",
4113 	    __func__, rate);
4114 
4115 	return -1;
4116 }
4117 
4118 static void
4119 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4120 {
4121 	struct ieee80211_node *ni = &in->in_ni;
4122 	struct iwm_lq_cmd *lq = &in->in_lq;
4123 	struct ieee80211_rateset *rs = &ni->ni_rates;
4124 	int nrates = rs->rs_nrates;
4125 	int i, ridx, tab = 0;
4126 	int txant = 0;
4127 
4128 	KKASSERT(rix >= 0 && rix < nrates);
4129 
4130 	if (nrates > nitems(lq->rs_table)) {
4131 		device_printf(sc->sc_dev,
4132 		    "%s: node supports %d rates, driver handles "
4133 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4134 		return;
4135 	}
4136 	if (nrates == 0) {
4137 		device_printf(sc->sc_dev,
4138 		    "%s: node supports 0 rates, odd!\n", __func__);
4139 		return;
4140 	}
4141 	nrates = imin(rix + 1, nrates);
4142 
4143 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4144 	    "%s: nrates=%d\n", __func__, nrates);
4145 
4146 	/* then construct a lq_cmd based on those */
4147 	memset(lq, 0, sizeof(*lq));
4148 	lq->sta_id = IWM_STATION_ID;
4149 
4150 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4151 	if (ni->ni_flags & IEEE80211_NODE_HT)
4152 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4153 
4154 	/*
4155 	 * are these used? (we don't do SISO or MIMO)
4156 	 * need to set them to non-zero, though, or we get an error.
4157 	 */
4158 	lq->single_stream_ant_msk = 1;
4159 	lq->dual_stream_ant_msk = 1;
4160 
4161 	/*
4162 	 * Build the actual rate selection table.
4163 	 * The lowest bits are the rates.  Additionally,
4164 	 * CCK needs bit 9 to be set.  The rest of the bits
4165 	 * we add to the table select the tx antenna
4166 	 * Note that we add the rates in the highest rate first
4167 	 * (opposite of ni_rates).
4168 	 */
4169 	for (i = 0; i < nrates; i++) {
4170 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4171 		int nextant;
4172 
4173 		/* Map 802.11 rate to HW rate index. */
4174 		ridx = iwm_rate2ridx(sc, rate);
4175 		if (ridx == -1)
4176 			continue;
4177 
4178 		if (txant == 0)
4179 			txant = iwm_mvm_get_valid_tx_ant(sc);
4180 		nextant = 1<<(ffs(txant)-1);
4181 		txant &= ~nextant;
4182 
4183 		tab = iwm_rates[ridx].plcp;
4184 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4185 		if (IWM_RIDX_IS_CCK(ridx))
4186 			tab |= IWM_RATE_MCS_CCK_MSK;
4187 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4188 		    "station rate i=%d, rate=%d, hw=%x\n",
4189 		    i, iwm_rates[ridx].rate, tab);
4190 		lq->rs_table[i] = htole32(tab);
4191 	}
4192 	/* then fill the rest with the lowest possible rate */
4193 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4194 		KASSERT(tab != 0, ("invalid tab"));
4195 		lq->rs_table[i] = htole32(tab);
4196 	}
4197 }
4198 
4199 static int
4200 iwm_media_change(struct ifnet *ifp)
4201 {
4202 	struct ieee80211vap *vap = ifp->if_softc;
4203 	struct ieee80211com *ic = vap->iv_ic;
4204 	struct iwm_softc *sc = ic->ic_softc;
4205 	int error;
4206 
4207 	error = ieee80211_media_change(ifp);
4208 	if (error != ENETRESET)
4209 		return error;
4210 
4211 	IWM_LOCK(sc);
4212 	if (ic->ic_nrunning > 0) {
4213 		iwm_stop(sc);
4214 		iwm_init(sc);
4215 	}
4216 	IWM_UNLOCK(sc);
4217 	return error;
4218 }
4219 
4220 static void
4221 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4222 {
4223 	struct iwm_vap *ivp = IWM_VAP(vap);
4224 	int error;
4225 
4226 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4227 	sc->sc_tx_timer = 0;
4228 
4229 	ivp->iv_auth = 0;
4230 	if (sc->sc_firmware_state == 3) {
4231 		iwm_xmit_queue_drain(sc);
4232 //		iwm_mvm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4233 		error = iwm_mvm_rm_sta(sc, vap, TRUE);
4234 		if (error) {
4235 			device_printf(sc->sc_dev,
4236 			    "%s: Failed to remove station: %d\n",
4237 			    __func__, error);
4238 		}
4239 	}
4240 	if (sc->sc_firmware_state == 3) {
4241 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4242 		if (error) {
4243 			device_printf(sc->sc_dev,
4244 			    "%s: Failed to change mac context: %d\n",
4245 			    __func__, error);
4246 		}
4247 	}
4248 	if (sc->sc_firmware_state == 3) {
4249 		error = iwm_mvm_sf_update(sc, vap, FALSE);
4250 		if (error) {
4251 			device_printf(sc->sc_dev,
4252 			    "%s: Failed to update smart FIFO: %d\n",
4253 			    __func__, error);
4254 		}
4255 	}
4256 	if (sc->sc_firmware_state == 3) {
4257 		error = iwm_mvm_rm_sta_id(sc, vap);
4258 		if (error) {
4259 			device_printf(sc->sc_dev,
4260 			    "%s: Failed to remove station id: %d\n",
4261 			    __func__, error);
4262 		}
4263 	}
4264 	if (sc->sc_firmware_state == 3) {
4265 		error = iwm_mvm_update_quotas(sc, NULL);
4266 		if (error) {
4267 			device_printf(sc->sc_dev,
4268 			    "%s: Failed to update PHY quota: %d\n",
4269 			    __func__, error);
4270 		}
4271 	}
4272 	if (sc->sc_firmware_state == 3) {
4273 		/* XXX Might need to specify bssid correctly. */
4274 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4275 		if (error) {
4276 			device_printf(sc->sc_dev,
4277 			    "%s: Failed to change mac context: %d\n",
4278 			    __func__, error);
4279 		}
4280 	}
4281 	if (sc->sc_firmware_state == 3) {
4282 		sc->sc_firmware_state = 2;
4283 	}
4284 	if (sc->sc_firmware_state > 1) {
4285 		error = iwm_mvm_binding_remove_vif(sc, ivp);
4286 		if (error) {
4287 			device_printf(sc->sc_dev,
4288 			    "%s: Failed to remove channel ctx: %d\n",
4289 			    __func__, error);
4290 		}
4291 	}
4292 	if (sc->sc_firmware_state > 1) {
4293 		sc->sc_firmware_state = 1;
4294 	}
4295 	ivp->phy_ctxt = NULL;
4296 	if (sc->sc_firmware_state > 0) {
4297 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4298 		if (error) {
4299 			device_printf(sc->sc_dev,
4300 			    "%s: Failed to change mac context: %d\n",
4301 			    __func__, error);
4302 		}
4303 	}
4304 	if (sc->sc_firmware_state > 0) {
4305 		error = iwm_mvm_power_update_mac(sc);
4306 		if (error != 0) {
4307 			device_printf(sc->sc_dev,
4308 			    "%s: failed to update power management\n",
4309 			    __func__);
4310 		}
4311 	}
4312 	sc->sc_firmware_state = 0;
4313 }
4314 
4315 static int
4316 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4317 {
4318 	struct iwm_vap *ivp = IWM_VAP(vap);
4319 	struct ieee80211com *ic = vap->iv_ic;
4320 	struct iwm_softc *sc = ic->ic_softc;
4321 	struct iwm_node *in;
4322 	int error;
4323 
4324 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4325 	    "switching state %s -> %s arg=0x%x\n",
4326 	    ieee80211_state_name[vap->iv_state],
4327 	    ieee80211_state_name[nstate],
4328 	    arg);
4329 
4330 	IEEE80211_UNLOCK(ic);
4331 	IWM_LOCK(sc);
4332 
4333 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4334 	    (nstate == IEEE80211_S_AUTH ||
4335 	     nstate == IEEE80211_S_ASSOC ||
4336 	     nstate == IEEE80211_S_RUN)) {
4337 		/* Stop blinking for a scan, when authenticating. */
4338 		iwm_led_blink_stop(sc);
4339 	}
4340 
4341 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4342 		iwm_mvm_led_disable(sc);
4343 		/* disable beacon filtering if we're hopping out of RUN */
4344 		iwm_mvm_disable_beacon_filter(sc);
4345 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4346 			in->in_assoc = 0;
4347 	}
4348 
4349 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4350 	     vap->iv_state == IEEE80211_S_ASSOC ||
4351 	     vap->iv_state == IEEE80211_S_RUN) &&
4352 	    (nstate == IEEE80211_S_INIT ||
4353 	     nstate == IEEE80211_S_SCAN ||
4354 	     nstate == IEEE80211_S_AUTH)) {
4355 		iwm_mvm_stop_session_protection(sc, ivp);
4356 	}
4357 
4358 	if ((vap->iv_state == IEEE80211_S_RUN ||
4359 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4360 	    nstate == IEEE80211_S_INIT) {
4361 		/*
4362 		 * In this case, iv_newstate() wants to send an 80211 frame on
4363 		 * the network that we are leaving. So we need to call it,
4364 		 * before tearing down all the firmware state.
4365 		 */
4366 		IWM_UNLOCK(sc);
4367 		IEEE80211_LOCK(ic);
4368 		ivp->iv_newstate(vap, nstate, arg);
4369 		IEEE80211_UNLOCK(ic);
4370 		IWM_LOCK(sc);
4371 		iwm_bring_down_firmware(sc, vap);
4372 		IWM_UNLOCK(sc);
4373 		IEEE80211_LOCK(ic);
4374 		return 0;
4375 	}
4376 
4377 	switch (nstate) {
4378 	case IEEE80211_S_INIT:
4379 	case IEEE80211_S_SCAN:
4380 		break;
4381 
4382 	case IEEE80211_S_AUTH:
4383 		iwm_bring_down_firmware(sc, vap);
4384 		if ((error = iwm_auth(vap, sc)) != 0) {
4385 			device_printf(sc->sc_dev,
4386 			    "%s: could not move to auth state: %d\n",
4387 			    __func__, error);
4388 			iwm_bring_down_firmware(sc, vap);
4389 			IWM_UNLOCK(sc);
4390 			IEEE80211_LOCK(ic);
4391 			return 1;
4392 		}
4393 		break;
4394 
4395 	case IEEE80211_S_ASSOC:
4396 		/*
4397 		 * EBS may be disabled due to previous failures reported by FW.
4398 		 * Reset EBS status here assuming environment has been changed.
4399 		 */
4400 		sc->last_ebs_successful = TRUE;
4401 		break;
4402 
4403 	case IEEE80211_S_RUN:
4404 		in = IWM_NODE(vap->iv_bss);
4405 		/* Update the association state, now we have it all */
4406 		/* (eg associd comes in at this point */
4407 		error = iwm_mvm_update_sta(sc, in);
4408 		if (error != 0) {
4409 			device_printf(sc->sc_dev,
4410 			    "%s: failed to update STA\n", __func__);
4411 			IWM_UNLOCK(sc);
4412 			IEEE80211_LOCK(ic);
4413 			return error;
4414 		}
4415 		in->in_assoc = 1;
4416 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4417 		if (error != 0) {
4418 			device_printf(sc->sc_dev,
4419 			    "%s: failed to update MAC: %d\n", __func__, error);
4420 		}
4421 
4422 		iwm_mvm_sf_update(sc, vap, FALSE);
4423 		iwm_mvm_enable_beacon_filter(sc, ivp);
4424 		iwm_mvm_power_update_mac(sc);
4425 		iwm_mvm_update_quotas(sc, ivp);
4426 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4427 		iwm_setrates(sc, in, rix);
4428 
4429 		if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4430 			device_printf(sc->sc_dev,
4431 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4432 		}
4433 
4434 		iwm_mvm_led_enable(sc);
4435 		break;
4436 
4437 	default:
4438 		break;
4439 	}
4440 	IWM_UNLOCK(sc);
4441 	IEEE80211_LOCK(ic);
4442 
4443 	return (ivp->iv_newstate(vap, nstate, arg));
4444 }
4445 
4446 void
4447 iwm_endscan_cb(void *arg, int pending)
4448 {
4449 	struct iwm_softc *sc = arg;
4450 	struct ieee80211com *ic = &sc->sc_ic;
4451 
4452 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4453 	    "%s: scan ended\n",
4454 	    __func__);
4455 
4456 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4457 }
4458 
4459 static int
4460 iwm_send_bt_init_conf(struct iwm_softc *sc)
4461 {
4462 	struct iwm_bt_coex_cmd bt_cmd;
4463 
4464 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4465 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4466 
4467 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4468 	    &bt_cmd);
4469 }
4470 
4471 static boolean_t
4472 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4473 {
4474 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4475 	boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa,
4476 					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4477 
4478 	if (iwm_lar_disable)
4479 		return FALSE;
4480 
4481 	/*
4482 	 * Enable LAR only if it is supported by the FW (TLV) &&
4483 	 * enabled in the NVM
4484 	 */
4485 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4486 		return nvm_lar && tlv_lar;
4487 	else
4488 		return tlv_lar;
4489 }
4490 
4491 static boolean_t
4492 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4493 {
4494 	return fw_has_api(&sc->sc_fw.ucode_capa,
4495 			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4496 	       fw_has_capa(&sc->sc_fw.ucode_capa,
4497 			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4498 }
4499 
4500 static int
4501 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4502 {
4503 	struct iwm_mcc_update_cmd mcc_cmd;
4504 	struct iwm_host_cmd hcmd = {
4505 		.id = IWM_MCC_UPDATE_CMD,
4506 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4507 		.data = { &mcc_cmd },
4508 	};
4509 	int ret;
4510 #ifdef IWM_DEBUG
4511 	struct iwm_rx_packet *pkt;
4512 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4513 	struct iwm_mcc_update_resp *mcc_resp;
4514 	int n_channels;
4515 	uint16_t mcc;
4516 #endif
4517 	int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa,
4518 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4519 
4520 	if (!iwm_mvm_is_lar_supported(sc)) {
4521 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4522 		    __func__);
4523 		return 0;
4524 	}
4525 
4526 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4527 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4528 	if (iwm_mvm_is_wifi_mcc_supported(sc))
4529 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4530 	else
4531 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4532 
4533 	if (resp_v2)
4534 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4535 	else
4536 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4537 
4538 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4539 	    "send MCC update to FW with '%c%c' src = %d\n",
4540 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4541 
4542 	ret = iwm_send_cmd(sc, &hcmd);
4543 	if (ret)
4544 		return ret;
4545 
4546 #ifdef IWM_DEBUG
4547 	pkt = hcmd.resp_pkt;
4548 
4549 	/* Extract MCC response */
4550 	if (resp_v2) {
4551 		mcc_resp = (void *)pkt->data;
4552 		mcc = mcc_resp->mcc;
4553 		n_channels =  le32toh(mcc_resp->n_channels);
4554 	} else {
4555 		mcc_resp_v1 = (void *)pkt->data;
4556 		mcc = mcc_resp_v1->mcc;
4557 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4558 	}
4559 
4560 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4561 	if (mcc == 0)
4562 		mcc = 0x3030;  /* "00" - world */
4563 
4564 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4565 	    "regulatory domain '%c%c' (%d channels available)\n",
4566 	    mcc >> 8, mcc & 0xff, n_channels);
4567 #endif
4568 	iwm_free_resp(sc, &hcmd);
4569 
4570 	return 0;
4571 }
4572 
4573 static void
4574 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4575 {
4576 	struct iwm_host_cmd cmd = {
4577 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4578 		.len = { sizeof(uint32_t), },
4579 		.data = { &backoff, },
4580 	};
4581 
4582 	if (iwm_send_cmd(sc, &cmd) != 0) {
4583 		device_printf(sc->sc_dev,
4584 		    "failed to change thermal tx backoff\n");
4585 	}
4586 }
4587 
4588 static int
4589 iwm_init_hw(struct iwm_softc *sc)
4590 {
4591 	struct ieee80211com *ic = &sc->sc_ic;
4592 	int error, i, ac;
4593 
4594 	sc->sf_state = IWM_SF_UNINIT;
4595 
4596 	if ((error = iwm_start_hw(sc)) != 0) {
4597 		kprintf("iwm_start_hw: failed %d\n", error);
4598 		return error;
4599 	}
4600 
4601 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4602 		kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4603 		return error;
4604 	}
4605 
4606 	/*
4607 	 * should stop and start HW since that INIT
4608 	 * image just loaded
4609 	 */
4610 	iwm_stop_device(sc);
4611 	sc->sc_ps_disabled = FALSE;
4612 	if ((error = iwm_start_hw(sc)) != 0) {
4613 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4614 		return error;
4615 	}
4616 
4617 	/* omstart, this time with the regular firmware */
4618 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4619 	if (error) {
4620 		device_printf(sc->sc_dev, "could not load firmware\n");
4621 		goto error;
4622 	}
4623 
4624 	error = iwm_mvm_sf_update(sc, NULL, FALSE);
4625 	if (error)
4626 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4627 
4628 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4629 		device_printf(sc->sc_dev, "bt init conf failed\n");
4630 		goto error;
4631 	}
4632 
4633 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4634 	if (error != 0) {
4635 		device_printf(sc->sc_dev, "antenna config failed\n");
4636 		goto error;
4637 	}
4638 
4639 	/* Send phy db control command and then phy db calibration */
4640 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4641 		goto error;
4642 
4643 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4644 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4645 		goto error;
4646 	}
4647 
4648 	/* Add auxiliary station for scanning */
4649 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4650 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4651 		goto error;
4652 	}
4653 
4654 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4655 		/*
4656 		 * The channel used here isn't relevant as it's
4657 		 * going to be overwritten in the other flows.
4658 		 * For now use the first channel we have.
4659 		 */
4660 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4661 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4662 			goto error;
4663 	}
4664 
4665 	/* Initialize tx backoffs to the minimum. */
4666 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4667 		iwm_mvm_tt_tx_backoff(sc, 0);
4668 
4669 	if (iwm_mvm_config_ltr(sc) != 0)
4670 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4671 
4672 	error = iwm_mvm_power_update_device(sc);
4673 	if (error)
4674 		goto error;
4675 
4676 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4677 		goto error;
4678 
4679 	if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4680 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4681 			goto error;
4682 	}
4683 
4684 	/* Enable Tx queues. */
4685 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4686 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4687 		    iwm_mvm_ac_to_tx_fifo[ac]);
4688 		if (error)
4689 			goto error;
4690 	}
4691 
4692 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4693 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4694 		goto error;
4695 	}
4696 
4697 	return 0;
4698 
4699  error:
4700 	iwm_stop_device(sc);
4701 	return error;
4702 }
4703 
4704 /* Allow multicast from our BSSID. */
4705 static int
4706 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4707 {
4708 	struct ieee80211_node *ni = vap->iv_bss;
4709 	struct iwm_mcast_filter_cmd *cmd;
4710 	size_t size;
4711 	int error;
4712 
4713 	size = roundup(sizeof(*cmd), 4);
4714 	cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4715 	if (cmd == NULL)
4716 		return ENOMEM;
4717 	cmd->filter_own = 1;
4718 	cmd->port_id = 0;
4719 	cmd->count = 0;
4720 	cmd->pass_all = 1;
4721 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4722 
4723 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4724 	    IWM_CMD_SYNC, size, cmd);
4725 	kfree(cmd, M_DEVBUF);
4726 
4727 	return (error);
4728 }
4729 
4730 /*
4731  * ifnet interfaces
4732  */
4733 
4734 static void
4735 iwm_init(struct iwm_softc *sc)
4736 {
4737 	int error;
4738 
4739 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4740 		return;
4741 	}
4742 	sc->sc_generation++;
4743 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4744 
4745 	if ((error = iwm_init_hw(sc)) != 0) {
4746 		kprintf("iwm_init_hw failed %d\n", error);
4747 		iwm_stop(sc);
4748 		return;
4749 	}
4750 
4751 	/*
4752 	 * Ok, firmware loaded and we are jogging
4753 	 */
4754 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4755 }
4756 
4757 static int
4758 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4759 {
4760 	struct iwm_softc *sc;
4761 	int error;
4762 
4763 	sc = ic->ic_softc;
4764 
4765 	IWM_LOCK(sc);
4766 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4767 		IWM_UNLOCK(sc);
4768 		return (ENXIO);
4769 	}
4770 	error = mbufq_enqueue(&sc->sc_snd, m);
4771 	if (error) {
4772 		IWM_UNLOCK(sc);
4773 		return (error);
4774 	}
4775 	iwm_start(sc);
4776 	IWM_UNLOCK(sc);
4777 	return (0);
4778 }
4779 
4780 /*
4781  * Dequeue packets from sendq and call send.
4782  */
4783 static void
4784 iwm_start(struct iwm_softc *sc)
4785 {
4786 	struct ieee80211_node *ni;
4787 	struct mbuf *m;
4788 	int ac = 0;
4789 
4790 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4791 	while (sc->qfullmsk == 0 &&
4792 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4793 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4794 		if (iwm_tx(sc, m, ni, ac) != 0) {
4795 			if_inc_counter(ni->ni_vap->iv_ifp,
4796 			    IFCOUNTER_OERRORS, 1);
4797 			ieee80211_free_node(ni);
4798 			continue;
4799 		}
4800 		if (sc->sc_tx_timer == 0) {
4801 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4802 			    sc);
4803 		}
4804 		sc->sc_tx_timer = 15;
4805 	}
4806 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4807 }
4808 
4809 static void
4810 iwm_stop(struct iwm_softc *sc)
4811 {
4812 
4813 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4814 	sc->sc_flags |= IWM_FLAG_STOPPED;
4815 	sc->sc_generation++;
4816 	iwm_led_blink_stop(sc);
4817 	sc->sc_tx_timer = 0;
4818 	iwm_stop_device(sc);
4819 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4820 }
4821 
4822 static void
4823 iwm_watchdog(void *arg)
4824 {
4825 	struct iwm_softc *sc = arg;
4826 
4827 	if (sc->sc_attached == 0)
4828 		return;
4829 
4830 	if (sc->sc_tx_timer > 0) {
4831 		if (--sc->sc_tx_timer == 0) {
4832 			device_printf(sc->sc_dev, "device timeout\n");
4833 #ifdef IWM_DEBUG
4834 			iwm_nic_error(sc);
4835 #endif
4836 			iwm_stop(sc);
4837 #if defined(__DragonFly__)
4838 			++sc->sc_ic.ic_oerrors;
4839 #else
4840 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4841 #endif
4842 			return;
4843 		}
4844 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4845 	}
4846 }
4847 
4848 static void
4849 iwm_parent(struct ieee80211com *ic)
4850 {
4851 	struct iwm_softc *sc = ic->ic_softc;
4852 	int startall = 0;
4853 
4854 	IWM_LOCK(sc);
4855 	if (ic->ic_nrunning > 0) {
4856 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4857 			iwm_init(sc);
4858 			startall = 1;
4859 		}
4860 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4861 		iwm_stop(sc);
4862 	IWM_UNLOCK(sc);
4863 	if (startall)
4864 		ieee80211_start_all(ic);
4865 }
4866 
4867 /*
4868  * The interrupt side of things
4869  */
4870 
4871 /*
4872  * error dumping routines are from iwlwifi/mvm/utils.c
4873  */
4874 
4875 /*
4876  * Note: This structure is read from the device with IO accesses,
4877  * and the reading already does the endian conversion. As it is
4878  * read with uint32_t-sized accesses, any members with a different size
4879  * need to be ordered correctly though!
4880  */
4881 struct iwm_error_event_table {
4882 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4883 	uint32_t error_id;		/* type of error */
4884 	uint32_t trm_hw_status0;	/* TRM HW status */
4885 	uint32_t trm_hw_status1;	/* TRM HW status */
4886 	uint32_t blink2;		/* branch link */
4887 	uint32_t ilink1;		/* interrupt link */
4888 	uint32_t ilink2;		/* interrupt link */
4889 	uint32_t data1;		/* error-specific data */
4890 	uint32_t data2;		/* error-specific data */
4891 	uint32_t data3;		/* error-specific data */
4892 	uint32_t bcon_time;		/* beacon timer */
4893 	uint32_t tsf_low;		/* network timestamp function timer */
4894 	uint32_t tsf_hi;		/* network timestamp function timer */
4895 	uint32_t gp1;		/* GP1 timer register */
4896 	uint32_t gp2;		/* GP2 timer register */
4897 	uint32_t fw_rev_type;	/* firmware revision type */
4898 	uint32_t major;		/* uCode version major */
4899 	uint32_t minor;		/* uCode version minor */
4900 	uint32_t hw_ver;		/* HW Silicon version */
4901 	uint32_t brd_ver;		/* HW board version */
4902 	uint32_t log_pc;		/* log program counter */
4903 	uint32_t frame_ptr;		/* frame pointer */
4904 	uint32_t stack_ptr;		/* stack pointer */
4905 	uint32_t hcmd;		/* last host command header */
4906 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4907 				 * rxtx_flag */
4908 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4909 				 * host_flag */
4910 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4911 				 * enc_flag */
4912 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4913 				 * time_flag */
4914 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4915 				 * wico interrupt */
4916 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4917 	uint32_t wait_event;		/* wait event() caller address */
4918 	uint32_t l2p_control;	/* L2pControlField */
4919 	uint32_t l2p_duration;	/* L2pDurationField */
4920 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4921 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4922 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4923 				 * (LMPM_PMG_SEL) */
4924 	uint32_t u_timestamp;	/* indicate when the date and time of the
4925 				 * compilation */
4926 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4927 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4928 
4929 /*
4930  * UMAC error struct - relevant starting from family 8000 chip.
4931  * Note: This structure is read from the device with IO accesses,
4932  * and the reading already does the endian conversion. As it is
4933  * read with u32-sized accesses, any members with a different size
4934  * need to be ordered correctly though!
4935  */
4936 struct iwm_umac_error_event_table {
4937 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4938 	uint32_t error_id;	/* type of error */
4939 	uint32_t blink1;	/* branch link */
4940 	uint32_t blink2;	/* branch link */
4941 	uint32_t ilink1;	/* interrupt link */
4942 	uint32_t ilink2;	/* interrupt link */
4943 	uint32_t data1;		/* error-specific data */
4944 	uint32_t data2;		/* error-specific data */
4945 	uint32_t data3;		/* error-specific data */
4946 	uint32_t umac_major;
4947 	uint32_t umac_minor;
4948 	uint32_t frame_pointer;	/* core register 27*/
4949 	uint32_t stack_pointer;	/* core register 28 */
4950 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4951 	uint32_t nic_isr_pref;	/* ISR status register */
4952 } __packed;
4953 
4954 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4955 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4956 
4957 #ifdef IWM_DEBUG
4958 struct {
4959 	const char *name;
4960 	uint8_t num;
4961 } advanced_lookup[] = {
4962 	{ "NMI_INTERRUPT_WDG", 0x34 },
4963 	{ "SYSASSERT", 0x35 },
4964 	{ "UCODE_VERSION_MISMATCH", 0x37 },
4965 	{ "BAD_COMMAND", 0x38 },
4966 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4967 	{ "FATAL_ERROR", 0x3D },
4968 	{ "NMI_TRM_HW_ERR", 0x46 },
4969 	{ "NMI_INTERRUPT_TRM", 0x4C },
4970 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4971 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4972 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4973 	{ "NMI_INTERRUPT_HOST", 0x66 },
4974 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
4975 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
4976 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4977 	{ "ADVANCED_SYSASSERT", 0 },
4978 };
4979 
4980 static const char *
4981 iwm_desc_lookup(uint32_t num)
4982 {
4983 	int i;
4984 
4985 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4986 		if (advanced_lookup[i].num == num)
4987 			return advanced_lookup[i].name;
4988 
4989 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4990 	return advanced_lookup[i].name;
4991 }
4992 
4993 static void
4994 iwm_nic_umac_error(struct iwm_softc *sc)
4995 {
4996 	struct iwm_umac_error_event_table table;
4997 	uint32_t base;
4998 
4999 	base = sc->umac_error_event_table;
5000 
5001 	if (base < 0x800000) {
5002 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5003 		    base);
5004 		return;
5005 	}
5006 
5007 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5008 		device_printf(sc->sc_dev, "reading errlog failed\n");
5009 		return;
5010 	}
5011 
5012 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5013 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5014 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5015 		    sc->sc_flags, table.valid);
5016 	}
5017 
5018 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5019 		iwm_desc_lookup(table.error_id));
5020 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5021 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5022 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5023 	    table.ilink1);
5024 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5025 	    table.ilink2);
5026 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5027 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5028 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5029 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5030 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5031 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5032 	    table.frame_pointer);
5033 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5034 	    table.stack_pointer);
5035 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5036 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5037 	    table.nic_isr_pref);
5038 }
5039 
5040 /*
5041  * Support for dumping the error log seemed like a good idea ...
5042  * but it's mostly hex junk and the only sensible thing is the
5043  * hw/ucode revision (which we know anyway).  Since it's here,
5044  * I'll just leave it in, just in case e.g. the Intel guys want to
5045  * help us decipher some "ADVANCED_SYSASSERT" later.
5046  */
5047 static void
5048 iwm_nic_error(struct iwm_softc *sc)
5049 {
5050 	struct iwm_error_event_table table;
5051 	uint32_t base;
5052 
5053 	device_printf(sc->sc_dev, "dumping device error log\n");
5054 	base = sc->error_event_table[0];
5055 	if (base < 0x800000) {
5056 		device_printf(sc->sc_dev,
5057 		    "Invalid error log pointer 0x%08x\n", base);
5058 		return;
5059 	}
5060 
5061 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5062 		device_printf(sc->sc_dev, "reading errlog failed\n");
5063 		return;
5064 	}
5065 
5066 	if (!table.valid) {
5067 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5068 		return;
5069 	}
5070 
5071 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5072 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5073 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5074 		    sc->sc_flags, table.valid);
5075 	}
5076 
5077 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5078 	    iwm_desc_lookup(table.error_id));
5079 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5080 	    table.trm_hw_status0);
5081 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5082 	    table.trm_hw_status1);
5083 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5084 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5085 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5086 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5087 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5088 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5089 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5090 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5091 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5092 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5093 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5094 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5095 	    table.fw_rev_type);
5096 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5097 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5098 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5099 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5100 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5101 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5102 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5103 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5104 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5105 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5106 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5107 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5108 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5109 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5110 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5111 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5112 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5113 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5114 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5115 
5116 	if (sc->umac_error_event_table)
5117 		iwm_nic_umac_error(sc);
5118 }
5119 #endif
5120 
5121 static void
5122 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5123 {
5124 	struct ieee80211com *ic = &sc->sc_ic;
5125 	struct iwm_cmd_response *cresp;
5126 	struct mbuf *m1;
5127 	uint32_t offset = 0;
5128 	uint32_t maxoff = IWM_RBUF_SIZE;
5129 	uint32_t nextoff;
5130 	boolean_t stolen = FALSE;
5131 
5132 #define HAVEROOM(a)	\
5133     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5134 
5135 	while (HAVEROOM(offset)) {
5136 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5137 		    offset);
5138 		int qid, idx, code, len;
5139 
5140 		qid = pkt->hdr.qid;
5141 		idx = pkt->hdr.idx;
5142 
5143 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5144 
5145 		/*
5146 		 * randomly get these from the firmware, no idea why.
5147 		 * they at least seem harmless, so just ignore them for now
5148 		 */
5149 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5150 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5151 			break;
5152 		}
5153 
5154 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5155 		    "rx packet qid=%d idx=%d type=%x\n",
5156 		    qid & ~0x80, pkt->hdr.idx, code);
5157 
5158 		len = iwm_rx_packet_len(pkt);
5159 		len += sizeof(uint32_t); /* account for status word */
5160 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5161 
5162 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5163 
5164 		switch (code) {
5165 		case IWM_REPLY_RX_PHY_CMD:
5166 			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5167 			break;
5168 
5169 		case IWM_REPLY_RX_MPDU_CMD: {
5170 			/*
5171 			 * If this is the last frame in the RX buffer, we
5172 			 * can directly feed the mbuf to the sharks here.
5173 			 */
5174 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5175 			    struct iwm_rx_packet *, nextoff);
5176 			if (!HAVEROOM(nextoff) ||
5177 			    (nextpkt->hdr.code == 0 &&
5178 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5179 			     nextpkt->hdr.idx == 0) ||
5180 			    (nextpkt->len_n_flags ==
5181 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5182 				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5183 					stolen = FALSE;
5184 					/* Make sure we abort the loop */
5185 					nextoff = maxoff;
5186 				}
5187 				break;
5188 			}
5189 
5190 			/*
5191 			 * Use m_copym instead of m_split, because that
5192 			 * makes it easier to keep a valid rx buffer in
5193 			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5194 			 *
5195 			 * We need to start m_copym() at offset 0, to get the
5196 			 * M_PKTHDR flag preserved.
5197 			 */
5198 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5199 			if (m1) {
5200 				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5201 					stolen = TRUE;
5202 				else
5203 					m_freem(m1);
5204 			}
5205 			break;
5206 		}
5207 
5208 		case IWM_TX_CMD:
5209 			iwm_mvm_rx_tx_cmd(sc, pkt);
5210 			break;
5211 
5212 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5213 			struct iwm_missed_beacons_notif *resp;
5214 			int missed;
5215 
5216 			/* XXX look at mac_id to determine interface ID */
5217 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5218 
5219 			resp = (void *)pkt->data;
5220 			missed = le32toh(resp->consec_missed_beacons);
5221 
5222 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5223 			    "%s: MISSED_BEACON: mac_id=%d, "
5224 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5225 			    "num_rx=%d\n",
5226 			    __func__,
5227 			    le32toh(resp->mac_id),
5228 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5229 			    le32toh(resp->consec_missed_beacons),
5230 			    le32toh(resp->num_expected_beacons),
5231 			    le32toh(resp->num_recvd_beacons));
5232 
5233 			/* Be paranoid */
5234 			if (vap == NULL)
5235 				break;
5236 
5237 			/* XXX no net80211 locking? */
5238 			if (vap->iv_state == IEEE80211_S_RUN &&
5239 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5240 				if (missed > vap->iv_bmissthreshold) {
5241 					/* XXX bad locking; turn into task */
5242 					IWM_UNLOCK(sc);
5243 					ieee80211_beacon_miss(ic);
5244 					IWM_LOCK(sc);
5245 				}
5246 			}
5247 
5248 			break; }
5249 
5250 		case IWM_MFUART_LOAD_NOTIFICATION:
5251 			break;
5252 
5253 		case IWM_MVM_ALIVE:
5254 			break;
5255 
5256 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5257 			break;
5258 
5259 		case IWM_STATISTICS_NOTIFICATION:
5260 			iwm_mvm_handle_rx_statistics(sc, pkt);
5261 			break;
5262 
5263 		case IWM_NVM_ACCESS_CMD:
5264 		case IWM_MCC_UPDATE_CMD:
5265 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5266 				memcpy(sc->sc_cmd_resp,
5267 				    pkt, sizeof(sc->sc_cmd_resp));
5268 			}
5269 			break;
5270 
5271 		case IWM_MCC_CHUB_UPDATE_CMD: {
5272 			struct iwm_mcc_chub_notif *notif;
5273 			notif = (void *)pkt->data;
5274 
5275 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5276 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5277 			sc->sc_fw_mcc[2] = '\0';
5278 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5279 			    "fw source %d sent CC '%s'\n",
5280 			    notif->source_id, sc->sc_fw_mcc);
5281 			break;
5282 		}
5283 
5284 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5285 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5286 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5287 			struct iwm_dts_measurement_notif_v1 *notif;
5288 
5289 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5290 				device_printf(sc->sc_dev,
5291 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5292 				break;
5293 			}
5294 			notif = (void *)pkt->data;
5295 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5296 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5297 			    notif->temp);
5298 			break;
5299 		}
5300 
5301 		case IWM_PHY_CONFIGURATION_CMD:
5302 		case IWM_TX_ANT_CONFIGURATION_CMD:
5303 		case IWM_ADD_STA:
5304 		case IWM_MAC_CONTEXT_CMD:
5305 		case IWM_REPLY_SF_CFG_CMD:
5306 		case IWM_POWER_TABLE_CMD:
5307 		case IWM_LTR_CONFIG:
5308 		case IWM_PHY_CONTEXT_CMD:
5309 		case IWM_BINDING_CONTEXT_CMD:
5310 		case IWM_TIME_EVENT_CMD:
5311 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5312 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5313 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5314 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5315 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5316 		case IWM_REPLY_BEACON_FILTERING_CMD:
5317 		case IWM_MAC_PM_POWER_TABLE:
5318 		case IWM_TIME_QUOTA_CMD:
5319 		case IWM_REMOVE_STA:
5320 		case IWM_TXPATH_FLUSH:
5321 		case IWM_LQ_CMD:
5322 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5323 				 IWM_FW_PAGING_BLOCK_CMD):
5324 		case IWM_BT_CONFIG:
5325 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5326 			cresp = (void *)pkt->data;
5327 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5328 				memcpy(sc->sc_cmd_resp,
5329 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5330 			}
5331 			break;
5332 
5333 		/* ignore */
5334 		case IWM_PHY_DB_CMD:
5335 			break;
5336 
5337 		case IWM_INIT_COMPLETE_NOTIF:
5338 			break;
5339 
5340 		case IWM_SCAN_OFFLOAD_COMPLETE:
5341 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5342 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5343 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5344 				ieee80211_runtask(ic, &sc->sc_es_task);
5345 			}
5346 			break;
5347 
5348 		case IWM_SCAN_ITERATION_COMPLETE: {
5349 			struct iwm_lmac_scan_complete_notif *notif;
5350 			notif = (void *)pkt->data;
5351 			break;
5352 		}
5353 
5354 		case IWM_SCAN_COMPLETE_UMAC:
5355 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5356 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5357 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5358 				ieee80211_runtask(ic, &sc->sc_es_task);
5359 			}
5360 			break;
5361 
5362 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5363 			struct iwm_umac_scan_iter_complete_notif *notif;
5364 			notif = (void *)pkt->data;
5365 
5366 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5367 			    "complete, status=0x%x, %d channels scanned\n",
5368 			    notif->status, notif->scanned_channels);
5369 			break;
5370 		}
5371 
5372 		case IWM_REPLY_ERROR: {
5373 			struct iwm_error_resp *resp;
5374 			resp = (void *)pkt->data;
5375 
5376 			device_printf(sc->sc_dev,
5377 			    "firmware error 0x%x, cmd 0x%x\n",
5378 			    le32toh(resp->error_type),
5379 			    resp->cmd_id);
5380 			break;
5381 		}
5382 
5383 		case IWM_TIME_EVENT_NOTIFICATION:
5384 			iwm_mvm_rx_time_event_notif(sc, pkt);
5385 			break;
5386 
5387 		/*
5388 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5389 		 * messages. Just ignore them for now.
5390 		 */
5391 		case IWM_DEBUG_LOG_MSG:
5392 			break;
5393 
5394 		case IWM_MCAST_FILTER_CMD:
5395 			break;
5396 
5397 		case IWM_SCD_QUEUE_CFG: {
5398 			struct iwm_scd_txq_cfg_rsp *rsp;
5399 			rsp = (void *)pkt->data;
5400 
5401 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5402 			    "queue cfg token=0x%x sta_id=%d "
5403 			    "tid=%d scd_queue=%d\n",
5404 			    rsp->token, rsp->sta_id, rsp->tid,
5405 			    rsp->scd_queue);
5406 			break;
5407 		}
5408 
5409 		default:
5410 			device_printf(sc->sc_dev,
5411 			    "frame %d/%d %x UNHANDLED (this should "
5412 			    "not happen)\n", qid & ~0x80, idx,
5413 			    pkt->len_n_flags);
5414 			break;
5415 		}
5416 
5417 		/*
5418 		 * Why test bit 0x80?  The Linux driver:
5419 		 *
5420 		 * There is one exception:  uCode sets bit 15 when it
5421 		 * originates the response/notification, i.e. when the
5422 		 * response/notification is not a direct response to a
5423 		 * command sent by the driver.  For example, uCode issues
5424 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5425 		 * it is not a direct response to any driver command.
5426 		 *
5427 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5428 		 * uses a slightly different format for pkt->hdr, and "qid"
5429 		 * is actually the upper byte of a two-byte field.
5430 		 */
5431 		if (!(qid & (1 << 7)))
5432 			iwm_cmd_done(sc, pkt);
5433 
5434 		offset = nextoff;
5435 	}
5436 	if (stolen)
5437 		m_freem(m);
5438 #undef HAVEROOM
5439 }
5440 
5441 /*
5442  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5443  * Basic structure from if_iwn
5444  */
5445 static void
5446 iwm_notif_intr(struct iwm_softc *sc)
5447 {
5448 	uint16_t hw;
5449 
5450 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5451 	    BUS_DMASYNC_POSTREAD);
5452 
5453 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5454 
5455 	/*
5456 	 * Process responses
5457 	 */
5458 	while (sc->rxq.cur != hw) {
5459 		struct iwm_rx_ring *ring = &sc->rxq;
5460 		struct iwm_rx_data *data = &ring->data[ring->cur];
5461 
5462 		bus_dmamap_sync(ring->data_dmat, data->map,
5463 		    BUS_DMASYNC_POSTREAD);
5464 
5465 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5466 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5467 		iwm_handle_rxb(sc, data->m);
5468 
5469 		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5470 	}
5471 
5472 	/*
5473 	 * Tell the firmware that it can reuse the ring entries that
5474 	 * we have just processed.
5475 	 * Seems like the hardware gets upset unless we align
5476 	 * the write by 8??
5477 	 */
5478 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5479 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5480 }
5481 
5482 static void
5483 iwm_intr(void *arg)
5484 {
5485 	struct iwm_softc *sc = arg;
5486 	int handled = 0;
5487 	int r1, r2, rv = 0;
5488 	int isperiodic = 0;
5489 
5490 #if defined(__DragonFly__)
5491 	if (sc->sc_mem == NULL) {
5492 		kprintf("iwm_intr: detached\n");
5493 		return;
5494 	}
5495 #endif
5496 	IWM_LOCK(sc);
5497 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5498 
5499 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5500 		uint32_t *ict = sc->ict_dma.vaddr;
5501 		int tmp;
5502 
5503 		tmp = htole32(ict[sc->ict_cur]);
5504 		if (!tmp)
5505 			goto out_ena;
5506 
5507 		/*
5508 		 * ok, there was something.  keep plowing until we have all.
5509 		 */
5510 		r1 = r2 = 0;
5511 		while (tmp) {
5512 			r1 |= tmp;
5513 			ict[sc->ict_cur] = 0;
5514 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5515 			tmp = htole32(ict[sc->ict_cur]);
5516 		}
5517 
5518 		/* this is where the fun begins.  don't ask */
5519 		if (r1 == 0xffffffff)
5520 			r1 = 0;
5521 
5522 		/* i am not expected to understand this */
5523 		if (r1 & 0xc0000)
5524 			r1 |= 0x8000;
5525 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5526 	} else {
5527 		r1 = IWM_READ(sc, IWM_CSR_INT);
5528 		/* "hardware gone" (where, fishing?) */
5529 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5530 			goto out;
5531 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5532 	}
5533 	if (r1 == 0 && r2 == 0) {
5534 		goto out_ena;
5535 	}
5536 
5537 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5538 
5539 	/* Safely ignore these bits for debug checks below */
5540 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5541 
5542 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5543 		int i;
5544 		struct ieee80211com *ic = &sc->sc_ic;
5545 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5546 
5547 #ifdef IWM_DEBUG
5548 		iwm_nic_error(sc);
5549 #endif
5550 		/* Dump driver status (TX and RX rings) while we're here. */
5551 		device_printf(sc->sc_dev, "driver status:\n");
5552 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5553 			struct iwm_tx_ring *ring = &sc->txq[i];
5554 			device_printf(sc->sc_dev,
5555 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5556 			    "queued=%-3d\n",
5557 			    i, ring->qid, ring->cur, ring->queued);
5558 		}
5559 		device_printf(sc->sc_dev,
5560 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5561 		device_printf(sc->sc_dev,
5562 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5563 
5564 		/* Reset our firmware state tracking. */
5565 		sc->sc_firmware_state = 0;
5566 		/* Don't stop the device; just do a VAP restart */
5567 		IWM_UNLOCK(sc);
5568 
5569 		if (vap == NULL) {
5570 			kprintf("%s: null vap\n", __func__);
5571 			return;
5572 		}
5573 
5574 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5575 		    "restarting\n", __func__, vap->iv_state);
5576 
5577 		ieee80211_restart_all(ic);
5578 		return;
5579 	}
5580 
5581 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5582 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5583 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5584 		iwm_stop(sc);
5585 		rv = 1;
5586 		goto out;
5587 	}
5588 
5589 	/* firmware chunk loaded */
5590 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5591 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5592 		handled |= IWM_CSR_INT_BIT_FH_TX;
5593 		sc->sc_fw_chunk_done = 1;
5594 		wakeup(&sc->sc_fw);
5595 	}
5596 
5597 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5598 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5599 		if (iwm_check_rfkill(sc)) {
5600 			device_printf(sc->sc_dev,
5601 			    "%s: rfkill switch, disabling interface\n",
5602 			    __func__);
5603 			iwm_stop(sc);
5604 		}
5605 	}
5606 
5607 	/*
5608 	 * The Linux driver uses periodic interrupts to avoid races.
5609 	 * We cargo-cult like it's going out of fashion.
5610 	 */
5611 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5612 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5613 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5614 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5615 			IWM_WRITE_1(sc,
5616 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5617 		isperiodic = 1;
5618 	}
5619 
5620 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5621 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5622 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5623 
5624 		iwm_notif_intr(sc);
5625 
5626 		/* enable periodic interrupt, see above */
5627 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5628 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5629 			    IWM_CSR_INT_PERIODIC_ENA);
5630 	}
5631 
5632 	if (__predict_false(r1 & ~handled))
5633 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5634 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5635 	rv = 1;
5636 
5637  out_ena:
5638 	iwm_restore_interrupts(sc);
5639  out:
5640 	IWM_UNLOCK(sc);
5641 	return;
5642 }
5643 
5644 /*
5645  * Autoconf glue-sniffing
5646  */
5647 #define	PCI_VENDOR_INTEL		0x8086
5648 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5649 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5650 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5651 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5652 #define	PCI_PRODUCT_INTEL_WL_3168	0x24fb
5653 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5654 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5655 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5656 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5657 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5658 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5659 #define	PCI_PRODUCT_INTEL_WL_8265	0x24fd
5660 
5661 static const struct iwm_devices {
5662 	uint16_t		device;
5663 	const struct iwm_cfg	*cfg;
5664 } iwm_devices[] = {
5665 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5666 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5667 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5668 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5669 	{ PCI_PRODUCT_INTEL_WL_3168,   &iwm3168_cfg },
5670 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5671 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5672 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5673 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5674 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5675 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5676 	{ PCI_PRODUCT_INTEL_WL_8265,   &iwm8265_cfg },
5677 };
5678 
5679 static int
5680 iwm_probe(device_t dev)
5681 {
5682 	int i;
5683 
5684 	for (i = 0; i < nitems(iwm_devices); i++) {
5685 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5686 		    pci_get_device(dev) == iwm_devices[i].device) {
5687 			device_set_desc(dev, iwm_devices[i].cfg->name);
5688 			return (BUS_PROBE_DEFAULT);
5689 		}
5690 	}
5691 
5692 	return (ENXIO);
5693 }
5694 
5695 static int
5696 iwm_dev_check(device_t dev)
5697 {
5698 	struct iwm_softc *sc;
5699 	uint16_t devid;
5700 	int i;
5701 
5702 	sc = device_get_softc(dev);
5703 
5704 	devid = pci_get_device(dev);
5705 	for (i = 0; i < NELEM(iwm_devices); i++) {
5706 		if (iwm_devices[i].device == devid) {
5707 			sc->cfg = iwm_devices[i].cfg;
5708 			return (0);
5709 		}
5710 	}
5711 	device_printf(dev, "unknown adapter type\n");
5712 	return ENXIO;
5713 }
5714 
5715 /* PCI registers */
5716 #define PCI_CFG_RETRY_TIMEOUT	0x041
5717 
5718 static int
5719 iwm_pci_attach(device_t dev)
5720 {
5721 	struct iwm_softc *sc;
5722 	int count, error, rid;
5723 	uint16_t reg;
5724 #if defined(__DragonFly__)
5725 	int irq_flags;
5726 #endif
5727 
5728 	sc = device_get_softc(dev);
5729 
5730 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5731 	 * PCI Tx retries from interfering with C3 CPU state */
5732 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5733 
5734 	/* Enable bus-mastering and hardware bug workaround. */
5735 	pci_enable_busmaster(dev);
5736 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5737 	/* if !MSI */
5738 	if (reg & PCIM_STATUS_INTxSTATE) {
5739 		reg &= ~PCIM_STATUS_INTxSTATE;
5740 	}
5741 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5742 
5743 	rid = PCIR_BAR(0);
5744 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5745 	    RF_ACTIVE);
5746 	if (sc->sc_mem == NULL) {
5747 		device_printf(sc->sc_dev, "can't map mem space\n");
5748 		return (ENXIO);
5749 	}
5750 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5751 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5752 
5753 	/* Install interrupt handler. */
5754 	count = 1;
5755 	rid = 0;
5756 #if defined(__DragonFly__)
5757 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5758 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5759 #else
5760 	if (pci_alloc_msi(dev, &count) == 0)
5761 		rid = 1;
5762 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5763 	    (rid != 0 ? 0 : RF_SHAREABLE));
5764 #endif
5765 	if (sc->sc_irq == NULL) {
5766 		device_printf(dev, "can't map interrupt\n");
5767 			return (ENXIO);
5768 	}
5769 #if defined(__DragonFly__)
5770 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5771 			       iwm_intr, sc, &sc->sc_ih,
5772 			       &wlan_global_serializer);
5773 #else
5774 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5775 	    NULL, iwm_intr, sc, &sc->sc_ih);
5776 #endif
5777 	if (sc->sc_ih == NULL) {
5778 		device_printf(dev, "can't establish interrupt");
5779 #if defined(__DragonFly__)
5780                 pci_release_msi(dev);
5781 #endif
5782 			return (ENXIO);
5783 	}
5784 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5785 
5786 	return (0);
5787 }
5788 
5789 static void
5790 iwm_pci_detach(device_t dev)
5791 {
5792 	struct iwm_softc *sc = device_get_softc(dev);
5793 
5794 	if (sc->sc_irq != NULL) {
5795 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5796 		bus_release_resource(dev, SYS_RES_IRQ,
5797 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5798 		pci_release_msi(dev);
5799 #if defined(__DragonFly__)
5800 		sc->sc_irq = NULL;
5801 #endif
5802         }
5803 	if (sc->sc_mem != NULL) {
5804 		bus_release_resource(dev, SYS_RES_MEMORY,
5805 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5806 #if defined(__DragonFly__)
5807 		sc->sc_mem = NULL;
5808 #endif
5809 	}
5810 }
5811 
5812 
5813 
5814 static int
5815 iwm_attach(device_t dev)
5816 {
5817 	struct iwm_softc *sc = device_get_softc(dev);
5818 	struct ieee80211com *ic = &sc->sc_ic;
5819 	int error;
5820 	int txq_i, i;
5821 
5822 	sc->sc_dev = dev;
5823 	sc->sc_attached = 1;
5824 	IWM_LOCK_INIT(sc);
5825 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5826 #if defined(__DragonFly__)
5827 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5828 #else
5829 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5830 #endif
5831 	callout_init(&sc->sc_led_blink_to);
5832 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5833 
5834 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5835 	if (sc->sc_notif_wait == NULL) {
5836 		device_printf(dev, "failed to init notification wait struct\n");
5837 		goto fail;
5838 	}
5839 
5840 	sc->sf_state = IWM_SF_UNINIT;
5841 
5842 	/* Init phy db */
5843 	sc->sc_phy_db = iwm_phy_db_init(sc);
5844 	if (!sc->sc_phy_db) {
5845 		device_printf(dev, "Cannot init phy_db\n");
5846 		goto fail;
5847 	}
5848 
5849 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5850 	sc->last_ebs_successful = TRUE;
5851 
5852 	/* PCI attach */
5853 	error = iwm_pci_attach(dev);
5854 	if (error != 0)
5855 		goto fail;
5856 
5857 	sc->sc_wantresp = -1;
5858 
5859 	/* Match device id */
5860 	error = iwm_dev_check(dev);
5861 	if (error != 0)
5862 		goto fail;
5863 
5864 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5865 	/*
5866 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5867 	 * changed, and now the revision step also includes bit 0-1 (no more
5868 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5869 	 * in the old format.
5870 	 */
5871 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5872 		int ret;
5873 		uint32_t hw_step;
5874 
5875 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5876 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5877 
5878 		if (iwm_prepare_card_hw(sc) != 0) {
5879 			device_printf(dev, "could not initialize hardware\n");
5880 			goto fail;
5881 		}
5882 
5883 		/*
5884 		 * In order to recognize C step the driver should read the
5885 		 * chip version id located at the AUX bus MISC address.
5886 		 */
5887 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5888 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5889 		DELAY(2);
5890 
5891 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5892 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5893 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5894 				   25000);
5895 		if (!ret) {
5896 			device_printf(sc->sc_dev,
5897 			    "Failed to wake up the nic\n");
5898 			goto fail;
5899 		}
5900 
5901 		if (iwm_nic_lock(sc)) {
5902 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5903 			hw_step |= IWM_ENABLE_WFPM;
5904 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5905 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5906 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5907 			if (hw_step == 0x3)
5908 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5909 						(IWM_SILICON_C_STEP << 2);
5910 			iwm_nic_unlock(sc);
5911 		} else {
5912 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5913 			goto fail;
5914 		}
5915 	}
5916 
5917 	/* special-case 7265D, it has the same PCI IDs. */
5918 	if (sc->cfg == &iwm7265_cfg &&
5919 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5920 		sc->cfg = &iwm7265d_cfg;
5921 	}
5922 
5923 	/* Allocate DMA memory for firmware transfers. */
5924 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5925 		device_printf(dev, "could not allocate memory for firmware\n");
5926 		goto fail;
5927 	}
5928 
5929 	/* Allocate "Keep Warm" page. */
5930 	if ((error = iwm_alloc_kw(sc)) != 0) {
5931 		device_printf(dev, "could not allocate keep warm page\n");
5932 		goto fail;
5933 	}
5934 
5935 	/* We use ICT interrupts */
5936 	if ((error = iwm_alloc_ict(sc)) != 0) {
5937 		device_printf(dev, "could not allocate ICT table\n");
5938 		goto fail;
5939 	}
5940 
5941 	/* Allocate TX scheduler "rings". */
5942 	if ((error = iwm_alloc_sched(sc)) != 0) {
5943 		device_printf(dev, "could not allocate TX scheduler rings\n");
5944 		goto fail;
5945 	}
5946 
5947 	/* Allocate TX rings */
5948 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5949 		if ((error = iwm_alloc_tx_ring(sc,
5950 		    &sc->txq[txq_i], txq_i)) != 0) {
5951 			device_printf(dev,
5952 			    "could not allocate TX ring %d\n",
5953 			    txq_i);
5954 			goto fail;
5955 		}
5956 	}
5957 
5958 	/* Allocate RX ring. */
5959 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5960 		device_printf(dev, "could not allocate RX ring\n");
5961 		goto fail;
5962 	}
5963 
5964 	/* Clear pending interrupts. */
5965 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5966 
5967 	ic->ic_softc = sc;
5968 	ic->ic_name = device_get_nameunit(sc->sc_dev);
5969 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
5970 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
5971 
5972 	/* Set device capabilities. */
5973 	ic->ic_caps =
5974 	    IEEE80211_C_STA |
5975 	    IEEE80211_C_WPA |		/* WPA/RSN */
5976 	    IEEE80211_C_WME |
5977 	    IEEE80211_C_PMGT |
5978 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
5979 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
5980 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
5981 	    ;
5982 	/* Advertise full-offload scanning */
5983 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5984 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5985 		sc->sc_phyctxt[i].id = i;
5986 		sc->sc_phyctxt[i].color = 0;
5987 		sc->sc_phyctxt[i].ref = 0;
5988 		sc->sc_phyctxt[i].channel = NULL;
5989 	}
5990 
5991 	/* Default noise floor */
5992 	sc->sc_noise = -96;
5993 
5994 	/* Max RSSI */
5995 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5996 
5997 #ifdef IWM_DEBUG
5998 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5999 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6000 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6001 #endif
6002 
6003 	error = iwm_read_firmware(sc);
6004 	if (error) {
6005 		goto fail;
6006 	} else if (sc->sc_fw.fw_fp == NULL) {
6007 		/*
6008 		 * XXX Add a solution for properly deferring firmware load
6009 		 *     during bootup.
6010 		 */
6011 		goto fail;
6012 	} else {
6013 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6014 		sc->sc_preinit_hook.ich_arg = sc;
6015 		sc->sc_preinit_hook.ich_desc = "iwm";
6016 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6017 			device_printf(dev,
6018 			    "config_intrhook_establish failed\n");
6019 			goto fail;
6020 		}
6021 	}
6022 
6023 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6024 	    "<-%s\n", __func__);
6025 
6026 	return 0;
6027 
6028 	/* Free allocated memory if something failed during attachment. */
6029 fail:
6030 	iwm_detach_local(sc, 0);
6031 
6032 	return ENXIO;
6033 }
6034 
6035 static int
6036 iwm_is_valid_ether_addr(uint8_t *addr)
6037 {
6038 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6039 
6040 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6041 		return (FALSE);
6042 
6043 	return (TRUE);
6044 }
6045 
6046 static int
6047 iwm_wme_update(struct ieee80211com *ic)
6048 {
6049 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6050 	struct iwm_softc *sc = ic->ic_softc;
6051 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6052 	struct iwm_vap *ivp = IWM_VAP(vap);
6053 	struct iwm_node *in;
6054 	struct wmeParams tmp[WME_NUM_AC];
6055 	int aci, error;
6056 
6057 	if (vap == NULL)
6058 		return (0);
6059 
6060 	IEEE80211_LOCK(ic);
6061 	for (aci = 0; aci < WME_NUM_AC; aci++)
6062 		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6063 	IEEE80211_UNLOCK(ic);
6064 
6065 	IWM_LOCK(sc);
6066 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6067 		const struct wmeParams *ac = &tmp[aci];
6068 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6069 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6070 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6071 		ivp->queue_params[aci].edca_txop =
6072 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6073 	}
6074 	ivp->have_wme = TRUE;
6075 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6076 		in = IWM_NODE(vap->iv_bss);
6077 		if (in->in_assoc) {
6078 			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6079 				device_printf(sc->sc_dev,
6080 				    "%s: failed to update MAC\n", __func__);
6081 			}
6082 		}
6083 	}
6084 	IWM_UNLOCK(sc);
6085 
6086 	return (0);
6087 #undef IWM_EXP2
6088 }
6089 
6090 static void
6091 iwm_preinit(void *arg)
6092 {
6093 	struct iwm_softc *sc = arg;
6094 	device_t dev = sc->sc_dev;
6095 	struct ieee80211com *ic = &sc->sc_ic;
6096 	int error;
6097 
6098 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6099 	    "->%s\n", __func__);
6100 
6101 	IWM_LOCK(sc);
6102 	if ((error = iwm_start_hw(sc)) != 0) {
6103 		device_printf(dev, "could not initialize hardware\n");
6104 		IWM_UNLOCK(sc);
6105 		goto fail;
6106 	}
6107 
6108 	error = iwm_run_init_mvm_ucode(sc, 1);
6109 	iwm_stop_device(sc);
6110 	if (error) {
6111 		IWM_UNLOCK(sc);
6112 		goto fail;
6113 	}
6114 	device_printf(dev,
6115 	    "hw rev 0x%x, fw ver %s, address %s\n",
6116 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6117 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6118 
6119 	/* not all hardware can do 5GHz band */
6120 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6121 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6122 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6123 	IWM_UNLOCK(sc);
6124 
6125 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6126 	    ic->ic_channels);
6127 
6128 	/*
6129 	 * At this point we've committed - if we fail to do setup,
6130 	 * we now also have to tear down the net80211 state.
6131 	 */
6132 	ieee80211_ifattach(ic);
6133 	ic->ic_vap_create = iwm_vap_create;
6134 	ic->ic_vap_delete = iwm_vap_delete;
6135 	ic->ic_raw_xmit = iwm_raw_xmit;
6136 	ic->ic_node_alloc = iwm_node_alloc;
6137 	ic->ic_scan_start = iwm_scan_start;
6138 	ic->ic_scan_end = iwm_scan_end;
6139 	ic->ic_update_mcast = iwm_update_mcast;
6140 	ic->ic_getradiocaps = iwm_init_channel_map;
6141 	ic->ic_set_channel = iwm_set_channel;
6142 	ic->ic_scan_curchan = iwm_scan_curchan;
6143 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6144 	ic->ic_wme.wme_update = iwm_wme_update;
6145 	ic->ic_parent = iwm_parent;
6146 	ic->ic_transmit = iwm_transmit;
6147 	iwm_radiotap_attach(sc);
6148 	if (bootverbose)
6149 		ieee80211_announce(ic);
6150 
6151 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6152 	    "<-%s\n", __func__);
6153 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6154 
6155 	return;
6156 fail:
6157 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6158 	iwm_detach_local(sc, 0);
6159 }
6160 
6161 /*
6162  * Attach the interface to 802.11 radiotap.
6163  */
6164 static void
6165 iwm_radiotap_attach(struct iwm_softc *sc)
6166 {
6167         struct ieee80211com *ic = &sc->sc_ic;
6168 
6169 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6170 	    "->%s begin\n", __func__);
6171         ieee80211_radiotap_attach(ic,
6172             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6173                 IWM_TX_RADIOTAP_PRESENT,
6174             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6175                 IWM_RX_RADIOTAP_PRESENT);
6176 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6177 	    "->%s end\n", __func__);
6178 }
6179 
6180 static struct ieee80211vap *
6181 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6182     enum ieee80211_opmode opmode, int flags,
6183     const uint8_t bssid[IEEE80211_ADDR_LEN],
6184     const uint8_t mac[IEEE80211_ADDR_LEN])
6185 {
6186 	struct iwm_vap *ivp;
6187 	struct ieee80211vap *vap;
6188 
6189 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6190 		return NULL;
6191 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6192 	vap = &ivp->iv_vap;
6193 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6194 	vap->iv_bmissthreshold = 10;            /* override default */
6195 	/* Override with driver methods. */
6196 	ivp->iv_newstate = vap->iv_newstate;
6197 	vap->iv_newstate = iwm_newstate;
6198 
6199 	ivp->id = IWM_DEFAULT_MACID;
6200 	ivp->color = IWM_DEFAULT_COLOR;
6201 
6202 	ivp->have_wme = FALSE;
6203 	ivp->ps_disabled = FALSE;
6204 
6205 	ieee80211_ratectl_init(vap);
6206 	/* Complete setup. */
6207 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6208 	    mac);
6209 	ic->ic_opmode = opmode;
6210 
6211 	return vap;
6212 }
6213 
6214 static void
6215 iwm_vap_delete(struct ieee80211vap *vap)
6216 {
6217 	struct iwm_vap *ivp = IWM_VAP(vap);
6218 
6219 	ieee80211_ratectl_deinit(vap);
6220 	ieee80211_vap_detach(vap);
6221 	kfree(ivp, M_80211_VAP);
6222 }
6223 
6224 static void
6225 iwm_xmit_queue_drain(struct iwm_softc *sc)
6226 {
6227 	struct mbuf *m;
6228 	struct ieee80211_node *ni;
6229 
6230 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6231 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6232 		ieee80211_free_node(ni);
6233 		m_freem(m);
6234 	}
6235 }
6236 
6237 static void
6238 iwm_scan_start(struct ieee80211com *ic)
6239 {
6240 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6241 	struct iwm_softc *sc = ic->ic_softc;
6242 	int error;
6243 
6244 	IWM_LOCK(sc);
6245 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6246 		/* This should not be possible */
6247 		device_printf(sc->sc_dev,
6248 		    "%s: Previous scan not completed yet\n", __func__);
6249 	}
6250 	if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6251 		error = iwm_mvm_umac_scan(sc);
6252 	else
6253 		error = iwm_mvm_lmac_scan(sc);
6254 	if (error != 0) {
6255 		device_printf(sc->sc_dev, "could not initiate scan\n");
6256 		IWM_UNLOCK(sc);
6257 		ieee80211_cancel_scan(vap);
6258 	} else {
6259 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6260 		iwm_led_blink_start(sc);
6261 		IWM_UNLOCK(sc);
6262 	}
6263 }
6264 
6265 static void
6266 iwm_scan_end(struct ieee80211com *ic)
6267 {
6268 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6269 	struct iwm_softc *sc = ic->ic_softc;
6270 
6271 	IWM_LOCK(sc);
6272 	iwm_led_blink_stop(sc);
6273 	if (vap->iv_state == IEEE80211_S_RUN)
6274 		iwm_mvm_led_enable(sc);
6275 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6276 		/*
6277 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6278 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6279 		 * taskqueue.
6280 		 */
6281 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6282 		iwm_mvm_scan_stop_wait(sc);
6283 	}
6284 	IWM_UNLOCK(sc);
6285 
6286 	/*
6287 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6288 	 * This is to make sure that it won't call ieee80211_scan_done
6289 	 * when we have already started the next scan.
6290 	 */
6291 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6292 }
6293 
6294 static void
6295 iwm_update_mcast(struct ieee80211com *ic)
6296 {
6297 }
6298 
6299 static void
6300 iwm_set_channel(struct ieee80211com *ic)
6301 {
6302 }
6303 
6304 static void
6305 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6306 {
6307 }
6308 
6309 static void
6310 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6311 {
6312 	return;
6313 }
6314 
6315 void
6316 iwm_init_task(void *arg1)
6317 {
6318 	struct iwm_softc *sc = arg1;
6319 
6320 	IWM_LOCK(sc);
6321 	while (sc->sc_flags & IWM_FLAG_BUSY) {
6322 #if defined(__DragonFly__)
6323 		lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6324 #else
6325 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6326 #endif
6327 }
6328 	sc->sc_flags |= IWM_FLAG_BUSY;
6329 	iwm_stop(sc);
6330 	if (sc->sc_ic.ic_nrunning > 0)
6331 		iwm_init(sc);
6332 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6333 	wakeup(&sc->sc_flags);
6334 	IWM_UNLOCK(sc);
6335 }
6336 
6337 static int
6338 iwm_resume(device_t dev)
6339 {
6340 	struct iwm_softc *sc = device_get_softc(dev);
6341 	int do_reinit = 0;
6342 
6343 	/*
6344 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6345 	 * PCI Tx retries from interfering with C3 CPU state.
6346 	 */
6347 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6348 
6349 	if (!sc->sc_attached)
6350 		return 0;
6351 
6352 	iwm_init_task(device_get_softc(dev));
6353 
6354 	IWM_LOCK(sc);
6355 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6356 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6357 		do_reinit = 1;
6358 	}
6359 	IWM_UNLOCK(sc);
6360 
6361 	if (do_reinit)
6362 		ieee80211_resume_all(&sc->sc_ic);
6363 
6364 	return 0;
6365 }
6366 
6367 static int
6368 iwm_suspend(device_t dev)
6369 {
6370 	int do_stop = 0;
6371 	struct iwm_softc *sc = device_get_softc(dev);
6372 
6373 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6374 
6375 	if (!sc->sc_attached)
6376 		return (0);
6377 
6378 	ieee80211_suspend_all(&sc->sc_ic);
6379 
6380 	if (do_stop) {
6381 		IWM_LOCK(sc);
6382 		iwm_stop(sc);
6383 		sc->sc_flags |= IWM_FLAG_SCANNING;
6384 		IWM_UNLOCK(sc);
6385 	}
6386 
6387 	return (0);
6388 }
6389 
6390 static int
6391 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6392 {
6393 	struct iwm_fw_info *fw = &sc->sc_fw;
6394 	device_t dev = sc->sc_dev;
6395 	int i;
6396 
6397 	if (!sc->sc_attached)
6398 		return 0;
6399 	sc->sc_attached = 0;
6400 	if (do_net80211) {
6401 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6402 	}
6403 	iwm_stop_device(sc);
6404 	if (do_net80211) {
6405 		IWM_LOCK(sc);
6406 		iwm_xmit_queue_drain(sc);
6407 		IWM_UNLOCK(sc);
6408 		ieee80211_ifdetach(&sc->sc_ic);
6409 	}
6410 	callout_drain(&sc->sc_led_blink_to);
6411 	callout_drain(&sc->sc_watchdog_to);
6412 
6413 	iwm_phy_db_free(sc->sc_phy_db);
6414 	sc->sc_phy_db = NULL;
6415 
6416 	iwm_free_nvm_data(sc->nvm_data);
6417 
6418 	/* Free descriptor rings */
6419 	iwm_free_rx_ring(sc, &sc->rxq);
6420 	for (i = 0; i < nitems(sc->txq); i++)
6421 		iwm_free_tx_ring(sc, &sc->txq[i]);
6422 
6423 	/* Free firmware */
6424 	if (fw->fw_fp != NULL)
6425 		iwm_fw_info_free(fw);
6426 
6427 	/* Free scheduler */
6428 	iwm_dma_contig_free(&sc->sched_dma);
6429 	iwm_dma_contig_free(&sc->ict_dma);
6430 	iwm_dma_contig_free(&sc->kw_dma);
6431 	iwm_dma_contig_free(&sc->fw_dma);
6432 
6433 	iwm_free_fw_paging(sc);
6434 
6435 	/* Finished with the hardware - detach things */
6436 	iwm_pci_detach(dev);
6437 
6438 	if (sc->sc_notif_wait != NULL) {
6439 		iwm_notification_wait_free(sc->sc_notif_wait);
6440 		sc->sc_notif_wait = NULL;
6441 	}
6442 
6443 	IWM_LOCK_DESTROY(sc);
6444 
6445 	return (0);
6446 }
6447 
6448 static int
6449 iwm_detach(device_t dev)
6450 {
6451 	struct iwm_softc *sc = device_get_softc(dev);
6452 
6453 	return (iwm_detach_local(sc, 1));
6454 }
6455 
6456 static device_method_t iwm_pci_methods[] = {
6457         /* Device interface */
6458         DEVMETHOD(device_probe,         iwm_probe),
6459         DEVMETHOD(device_attach,        iwm_attach),
6460         DEVMETHOD(device_detach,        iwm_detach),
6461         DEVMETHOD(device_suspend,       iwm_suspend),
6462         DEVMETHOD(device_resume,        iwm_resume),
6463 
6464         DEVMETHOD_END
6465 };
6466 
6467 static driver_t iwm_pci_driver = {
6468         "iwm",
6469         iwm_pci_methods,
6470         sizeof (struct iwm_softc)
6471 };
6472 
6473 static devclass_t iwm_devclass;
6474 
6475 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6476 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6477 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6478 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6479