xref: /dragonfly/sys/dev/netif/iwm/if_iwm.c (revision a639f788)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *				DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *	 changes to remove per-device network interface (DragonFly has not
110  *	 caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *	malloc -> kmalloc	(in particular, changing improper M_NOWAIT
114  *				specifications to M_INTWAIT.  We still don't
115  *				understand why FreeBSD uses M_NOWAIT for
116  *				critical must-not-fail kmalloc()s).
117  *	free -> kfree
118  *	printf -> kprintf
119  *	(bug fix) memset in iwm_reset_rx_ring.
120  *	(debug)   added several kprintf()s on error
121  *
122  *	header file paths (DFly allows localized path specifications).
123  *	minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *	(safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *	packet counters
128  *	msleep -> lksleep
129  *	mtx -> lk  (mtx functions -> lockmgr functions)
130  *	callout differences
131  *	taskqueue differences
132  *	MSI differences
133  *	bus_setup_intr() differences
134  *	minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138 
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150 
151 #include <machine/endian.h>
152 
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155 
156 #include <net/bpf.h>
157 
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164 
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169 
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174 
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sf.h"
189 #include "if_iwm_sta.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 #include "if_iwm_fw.h"
193 
194 const uint8_t iwm_nvm_channels[] = {
195 	/* 2.4 GHz */
196 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
197 	/* 5 GHz */
198 	36, 40, 44, 48, 52, 56, 60, 64,
199 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
200 	149, 153, 157, 161, 165
201 };
202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
203     "IWM_NUM_CHANNELS is too small");
204 
205 const uint8_t iwm_nvm_channels_8000[] = {
206 	/* 2.4 GHz */
207 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
208 	/* 5 GHz */
209 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
210 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
211 	149, 153, 157, 161, 165, 169, 173, 177, 181
212 };
213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
214     "IWM_NUM_CHANNELS_8000 is too small");
215 
216 #define IWM_NUM_2GHZ_CHANNELS	14
217 #define IWM_N_HW_ADDR_MASK	0xF
218 
219 /*
220  * XXX For now, there's simply a fixed set of rate table entries
221  * that are populated.
222  */
223 const struct iwm_rate {
224 	uint8_t rate;
225 	uint8_t plcp;
226 } iwm_rates[] = {
227 	{   2,	IWM_RATE_1M_PLCP  },
228 	{   4,	IWM_RATE_2M_PLCP  },
229 	{  11,	IWM_RATE_5M_PLCP  },
230 	{  22,	IWM_RATE_11M_PLCP },
231 	{  12,	IWM_RATE_6M_PLCP  },
232 	{  18,	IWM_RATE_9M_PLCP  },
233 	{  24,	IWM_RATE_12M_PLCP },
234 	{  36,	IWM_RATE_18M_PLCP },
235 	{  48,	IWM_RATE_24M_PLCP },
236 	{  72,	IWM_RATE_36M_PLCP },
237 	{  96,	IWM_RATE_48M_PLCP },
238 	{ 108,	IWM_RATE_54M_PLCP },
239 };
240 #define IWM_RIDX_CCK	0
241 #define IWM_RIDX_OFDM	4
242 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
245 
246 struct iwm_nvm_section {
247 	uint16_t length;
248 	uint8_t *data;
249 };
250 
251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
252 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
253 
254 struct iwm_mvm_alive_data {
255 	int valid;
256 	uint32_t scd_base_addr;
257 };
258 
259 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
260 static int	iwm_firmware_store_section(struct iwm_softc *,
261                                            enum iwm_ucode_type,
262                                            const uint8_t *, size_t);
263 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
264 static void	iwm_fw_info_free(struct iwm_fw_info *);
265 static int	iwm_read_firmware(struct iwm_softc *);
266 #if !defined(__DragonFly__)
267 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
268 #endif
269 static int	iwm_alloc_fwmem(struct iwm_softc *);
270 static int	iwm_alloc_sched(struct iwm_softc *);
271 static int	iwm_alloc_kw(struct iwm_softc *);
272 static int	iwm_alloc_ict(struct iwm_softc *);
273 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
277                                   int);
278 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 static void	iwm_enable_interrupts(struct iwm_softc *);
281 static void	iwm_restore_interrupts(struct iwm_softc *);
282 static void	iwm_disable_interrupts(struct iwm_softc *);
283 static void	iwm_ict_reset(struct iwm_softc *);
284 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
285 static void	iwm_stop_device(struct iwm_softc *);
286 static void	iwm_mvm_nic_config(struct iwm_softc *);
287 static int	iwm_nic_rx_init(struct iwm_softc *);
288 static int	iwm_nic_tx_init(struct iwm_softc *);
289 static int	iwm_nic_init(struct iwm_softc *);
290 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
291 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292                                    uint16_t, uint8_t *, uint16_t *);
293 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294 				     uint16_t *, uint32_t);
295 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
296 static void	iwm_add_channel_band(struct iwm_softc *,
297 		    struct ieee80211_channel[], int, int *, int, size_t,
298 		    const uint8_t[]);
299 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
300 		    struct ieee80211_channel[]);
301 static struct iwm_nvm_data *
302 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
303 			   const uint16_t *, const uint16_t *,
304 			   const uint16_t *, const uint16_t *,
305 			   const uint16_t *);
306 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
307 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
308 					       struct iwm_nvm_data *,
309 					       const uint16_t *,
310 					       const uint16_t *);
311 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312 			    const uint16_t *);
313 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
314 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315 				  const uint16_t *);
316 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
317 				   const uint16_t *);
318 static void	iwm_set_radio_cfg(const struct iwm_softc *,
319 				  struct iwm_nvm_data *, uint32_t);
320 static struct iwm_nvm_data *
321 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
322 static int	iwm_nvm_init(struct iwm_softc *);
323 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
324 				      const struct iwm_fw_desc *);
325 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
326 					     bus_addr_t, uint32_t);
327 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
328 						const struct iwm_fw_img *,
329 						int, int *);
330 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
331 					   const struct iwm_fw_img *,
332 					   int, int *);
333 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
334 					       const struct iwm_fw_img *);
335 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
336 					  const struct iwm_fw_img *);
337 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
338 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
339 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
340 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
341                                               enum iwm_ucode_type);
342 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
343 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
344 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
345 					    struct iwm_rx_phy_info *);
346 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
347                                       struct iwm_rx_packet *);
348 static int	iwm_get_noise(struct iwm_softc *,
349 		    const struct iwm_mvm_statistics_rx_non_phy *);
350 static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
351 		    struct iwm_rx_packet *);
352 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
353 				    uint32_t, boolean_t);
354 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
355                                          struct iwm_rx_packet *,
356 				         struct iwm_node *);
357 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
358 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
359 #if 0
360 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
361                                  uint16_t);
362 #endif
363 static uint8_t	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
364 			struct mbuf *, struct iwm_tx_cmd *);
365 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
366                        struct ieee80211_node *, int);
367 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
368 			     const struct ieee80211_bpf_params *);
369 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
370 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
371 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
372 static struct ieee80211_node *
373 		iwm_node_alloc(struct ieee80211vap *,
374 		               const uint8_t[IEEE80211_ADDR_LEN]);
375 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
376 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
377 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
378 static int	iwm_media_change(struct ifnet *);
379 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
380 static void	iwm_endscan_cb(void *, int);
381 static int	iwm_send_bt_init_conf(struct iwm_softc *);
382 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
383 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
384 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
385 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
386 static int	iwm_init_hw(struct iwm_softc *);
387 static void	iwm_init(struct iwm_softc *);
388 static void	iwm_start(struct iwm_softc *);
389 static void	iwm_stop(struct iwm_softc *);
390 static void	iwm_watchdog(void *);
391 static void	iwm_parent(struct ieee80211com *);
392 #ifdef IWM_DEBUG
393 static const char *
394 		iwm_desc_lookup(uint32_t);
395 static void	iwm_nic_error(struct iwm_softc *);
396 static void	iwm_nic_umac_error(struct iwm_softc *);
397 #endif
398 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
399 static void	iwm_notif_intr(struct iwm_softc *);
400 static void	iwm_intr(void *);
401 static int	iwm_attach(device_t);
402 static int	iwm_is_valid_ether_addr(uint8_t *);
403 static void	iwm_preinit(void *);
404 static int	iwm_detach_local(struct iwm_softc *sc, int);
405 static void	iwm_init_task(void *);
406 static void	iwm_radiotap_attach(struct iwm_softc *);
407 static struct ieee80211vap *
408 		iwm_vap_create(struct ieee80211com *,
409 		               const char [IFNAMSIZ], int,
410 		               enum ieee80211_opmode, int,
411 		               const uint8_t [IEEE80211_ADDR_LEN],
412 		               const uint8_t [IEEE80211_ADDR_LEN]);
413 static void	iwm_vap_delete(struct ieee80211vap *);
414 static void	iwm_xmit_queue_drain(struct iwm_softc *);
415 static void	iwm_scan_start(struct ieee80211com *);
416 static void	iwm_scan_end(struct ieee80211com *);
417 static void	iwm_update_mcast(struct ieee80211com *);
418 static void	iwm_set_channel(struct ieee80211com *);
419 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
420 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
421 static int	iwm_detach(device_t);
422 
423 #if defined(__DragonFly__)
424 static int	iwm_msi_enable = 1;
425 
426 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
427 #endif
428 
429 static int	iwm_lar_disable = 0;
430 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
431 
432 /*
433  * Firmware parser.
434  */
435 
436 static int
437 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
438 {
439 	const struct iwm_fw_cscheme_list *l = (const void *)data;
440 
441 	if (dlen < sizeof(*l) ||
442 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
443 		return EINVAL;
444 
445 	/* we don't actually store anything for now, always use s/w crypto */
446 
447 	return 0;
448 }
449 
450 static int
451 iwm_firmware_store_section(struct iwm_softc *sc,
452     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
453 {
454 	struct iwm_fw_img *fws;
455 	struct iwm_fw_desc *fwone;
456 
457 	if (type >= IWM_UCODE_TYPE_MAX)
458 		return EINVAL;
459 	if (dlen < sizeof(uint32_t))
460 		return EINVAL;
461 
462 	fws = &sc->sc_fw.img[type];
463 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
464 		return EINVAL;
465 
466 	fwone = &fws->sec[fws->fw_count];
467 
468 	/* first 32bit are device load offset */
469 	memcpy(&fwone->offset, data, sizeof(uint32_t));
470 
471 	/* rest is data */
472 	fwone->data = data + sizeof(uint32_t);
473 	fwone->len = dlen - sizeof(uint32_t);
474 
475 	fws->fw_count++;
476 
477 	return 0;
478 }
479 
480 #define IWM_DEFAULT_SCAN_CHANNELS 40
481 
482 struct iwm_tlv_calib_data {
483 	uint32_t ucode_type;
484 	struct iwm_tlv_calib_ctrl calib;
485 } __packed;
486 
487 static int
488 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
489 {
490 	const struct iwm_tlv_calib_data *def_calib = data;
491 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
492 
493 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
494 		device_printf(sc->sc_dev,
495 		    "Wrong ucode_type %u for default "
496 		    "calibration.\n", ucode_type);
497 		return EINVAL;
498 	}
499 
500 	sc->sc_default_calib[ucode_type].flow_trigger =
501 	    def_calib->calib.flow_trigger;
502 	sc->sc_default_calib[ucode_type].event_trigger =
503 	    def_calib->calib.event_trigger;
504 
505 	return 0;
506 }
507 
508 static int
509 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
510 			struct iwm_ucode_capabilities *capa)
511 {
512 	const struct iwm_ucode_api *ucode_api = (const void *)data;
513 	uint32_t api_index = le32toh(ucode_api->api_index);
514 	uint32_t api_flags = le32toh(ucode_api->api_flags);
515 	int i;
516 
517 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
518 		device_printf(sc->sc_dev,
519 		    "api flags index %d larger than supported by driver\n",
520 		    api_index);
521 		/* don't return an error so we can load FW that has more bits */
522 		return 0;
523 	}
524 
525 	for (i = 0; i < 32; i++) {
526 		if (api_flags & (1U << i))
527 			setbit(capa->enabled_api, i + 32 * api_index);
528 	}
529 
530 	return 0;
531 }
532 
533 static int
534 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
535 			   struct iwm_ucode_capabilities *capa)
536 {
537 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
538 	uint32_t api_index = le32toh(ucode_capa->api_index);
539 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
540 	int i;
541 
542 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
543 		device_printf(sc->sc_dev,
544 		    "capa flags index %d larger than supported by driver\n",
545 		    api_index);
546 		/* don't return an error so we can load FW that has more bits */
547 		return 0;
548 	}
549 
550 	for (i = 0; i < 32; i++) {
551 		if (api_flags & (1U << i))
552 			setbit(capa->enabled_capa, i + 32 * api_index);
553 	}
554 
555 	return 0;
556 }
557 
558 static void
559 iwm_fw_info_free(struct iwm_fw_info *fw)
560 {
561 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
562 	fw->fw_fp = NULL;
563 	memset(fw->img, 0, sizeof(fw->img));
564 }
565 
566 static int
567 iwm_read_firmware(struct iwm_softc *sc)
568 {
569 	struct iwm_fw_info *fw = &sc->sc_fw;
570 	const struct iwm_tlv_ucode_header *uhdr;
571 	const struct iwm_ucode_tlv *tlv;
572 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
573 	enum iwm_ucode_tlv_type tlv_type;
574 	const struct firmware *fwp;
575 	const uint8_t *data;
576 	uint32_t tlv_len;
577 	uint32_t usniffer_img;
578 	const uint8_t *tlv_data;
579 	uint32_t paging_mem_size;
580 	int num_of_cpus;
581 	int error = 0;
582 	size_t len;
583 
584 	/*
585 	 * Load firmware into driver memory.
586 	 * fw_fp will be set.
587 	 */
588 	fwp = firmware_get(sc->cfg->fw_name);
589 	if (fwp == NULL) {
590 		device_printf(sc->sc_dev,
591 		    "could not read firmware %s (error %d)\n",
592 		    sc->cfg->fw_name, error);
593 		goto out;
594 	}
595 	fw->fw_fp = fwp;
596 
597 	/* (Re-)Initialize default values. */
598 	capa->flags = 0;
599 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
600 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
601 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
602 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
603 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
604 
605 	/*
606 	 * Parse firmware contents
607 	 */
608 
609 	uhdr = (const void *)fw->fw_fp->data;
610 	if (*(const uint32_t *)fw->fw_fp->data != 0
611 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
612 		device_printf(sc->sc_dev, "invalid firmware %s\n",
613 		    sc->cfg->fw_name);
614 		error = EINVAL;
615 		goto out;
616 	}
617 
618 	ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
619 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
620 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
621 	    IWM_UCODE_API(le32toh(uhdr->ver)));
622 	data = uhdr->data;
623 	len = fw->fw_fp->datasize - sizeof(*uhdr);
624 
625 	while (len >= sizeof(*tlv)) {
626 		len -= sizeof(*tlv);
627 		tlv = (const void *)data;
628 
629 		tlv_len = le32toh(tlv->length);
630 		tlv_type = le32toh(tlv->type);
631 		tlv_data = tlv->data;
632 
633 		if (len < tlv_len) {
634 			device_printf(sc->sc_dev,
635 			    "firmware too short: %zu bytes\n",
636 			    len);
637 			error = EINVAL;
638 			goto parse_out;
639 		}
640 		len -= roundup2(tlv_len, 4);
641 		data += sizeof(tlv) + roundup2(tlv_len, 4);
642 
643 		switch ((int)tlv_type) {
644 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
645 			if (tlv_len != sizeof(uint32_t)) {
646 				device_printf(sc->sc_dev,
647 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
648 				    __func__, tlv_len);
649 				error = EINVAL;
650 				goto parse_out;
651 			}
652 			capa->max_probe_length =
653 			    le32_to_cpup((const uint32_t *)tlv_data);
654 			/* limit it to something sensible */
655 			if (capa->max_probe_length >
656 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
657 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
658 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
659 				    "ridiculous\n", __func__);
660 				error = EINVAL;
661 				goto parse_out;
662 			}
663 			break;
664 		case IWM_UCODE_TLV_PAN:
665 			if (tlv_len) {
666 				device_printf(sc->sc_dev,
667 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
668 				    __func__, tlv_len);
669 				error = EINVAL;
670 				goto parse_out;
671 			}
672 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
673 			break;
674 		case IWM_UCODE_TLV_FLAGS:
675 			if (tlv_len < sizeof(uint32_t)) {
676 				device_printf(sc->sc_dev,
677 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
678 				    __func__, tlv_len);
679 				error = EINVAL;
680 				goto parse_out;
681 			}
682 			if (tlv_len % sizeof(uint32_t)) {
683 				device_printf(sc->sc_dev,
684 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
685 				    __func__, tlv_len);
686 				error = EINVAL;
687 				goto parse_out;
688 			}
689 			/*
690 			 * Apparently there can be many flags, but Linux driver
691 			 * parses only the first one, and so do we.
692 			 *
693 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
694 			 * Intentional or a bug?  Observations from
695 			 * current firmware file:
696 			 *  1) TLV_PAN is parsed first
697 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
698 			 * ==> this resets TLV_PAN to itself... hnnnk
699 			 */
700 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
701 			break;
702 		case IWM_UCODE_TLV_CSCHEME:
703 			if ((error = iwm_store_cscheme(sc,
704 			    tlv_data, tlv_len)) != 0) {
705 				device_printf(sc->sc_dev,
706 				    "%s: iwm_store_cscheme(): returned %d\n",
707 				    __func__, error);
708 				goto parse_out;
709 			}
710 			break;
711 		case IWM_UCODE_TLV_NUM_OF_CPU:
712 			if (tlv_len != sizeof(uint32_t)) {
713 				device_printf(sc->sc_dev,
714 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
715 				    __func__, tlv_len);
716 				error = EINVAL;
717 				goto parse_out;
718 			}
719 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
720 			if (num_of_cpus == 2) {
721 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
722 					TRUE;
723 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
724 					TRUE;
725 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
726 					TRUE;
727 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
728 				device_printf(sc->sc_dev,
729 				    "%s: Driver supports only 1 or 2 CPUs\n",
730 				    __func__);
731 				error = EINVAL;
732 				goto parse_out;
733 			}
734 			break;
735 		case IWM_UCODE_TLV_SEC_RT:
736 			if ((error = iwm_firmware_store_section(sc,
737 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
738 				device_printf(sc->sc_dev,
739 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
740 				    __func__, error);
741 				goto parse_out;
742 			}
743 			break;
744 		case IWM_UCODE_TLV_SEC_INIT:
745 			if ((error = iwm_firmware_store_section(sc,
746 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
747 				device_printf(sc->sc_dev,
748 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
749 				    __func__, error);
750 				goto parse_out;
751 			}
752 			break;
753 		case IWM_UCODE_TLV_SEC_WOWLAN:
754 			if ((error = iwm_firmware_store_section(sc,
755 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
756 				device_printf(sc->sc_dev,
757 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
758 				    __func__, error);
759 				goto parse_out;
760 			}
761 			break;
762 		case IWM_UCODE_TLV_DEF_CALIB:
763 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
764 				device_printf(sc->sc_dev,
765 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
766 				    __func__, tlv_len,
767 				    sizeof(struct iwm_tlv_calib_data));
768 				error = EINVAL;
769 				goto parse_out;
770 			}
771 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
772 				device_printf(sc->sc_dev,
773 				    "%s: iwm_set_default_calib() failed: %d\n",
774 				    __func__, error);
775 				goto parse_out;
776 			}
777 			break;
778 		case IWM_UCODE_TLV_PHY_SKU:
779 			if (tlv_len != sizeof(uint32_t)) {
780 				error = EINVAL;
781 				device_printf(sc->sc_dev,
782 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
783 				    __func__, tlv_len);
784 				goto parse_out;
785 			}
786 			sc->sc_fw.phy_config =
787 			    le32_to_cpup((const uint32_t *)tlv_data);
788 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
789 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
790 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
791 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
792 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
793 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
794 			break;
795 
796 		case IWM_UCODE_TLV_API_CHANGES_SET: {
797 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
798 				error = EINVAL;
799 				goto parse_out;
800 			}
801 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
802 				error = EINVAL;
803 				goto parse_out;
804 			}
805 			break;
806 		}
807 
808 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
809 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
810 				error = EINVAL;
811 				goto parse_out;
812 			}
813 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
814 				error = EINVAL;
815 				goto parse_out;
816 			}
817 			break;
818 		}
819 
820 		case 48: /* undocumented TLV */
821 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
822 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
823 			/* ignore, not used by current driver */
824 			break;
825 
826 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
827 			if ((error = iwm_firmware_store_section(sc,
828 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
829 			    tlv_len)) != 0)
830 				goto parse_out;
831 			break;
832 
833 		case IWM_UCODE_TLV_PAGING:
834 			if (tlv_len != sizeof(uint32_t)) {
835 				error = EINVAL;
836 				goto parse_out;
837 			}
838 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
839 
840 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
841 			    "%s: Paging: paging enabled (size = %u bytes)\n",
842 			    __func__, paging_mem_size);
843 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
844 				device_printf(sc->sc_dev,
845 					"%s: Paging: driver supports up to %u bytes for paging image\n",
846 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
847 				error = EINVAL;
848 				goto out;
849 			}
850 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
851 				device_printf(sc->sc_dev,
852 				    "%s: Paging: image isn't multiple %u\n",
853 				    __func__, IWM_FW_PAGING_SIZE);
854 				error = EINVAL;
855 				goto out;
856 			}
857 
858 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
859 			    paging_mem_size;
860 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
861 			sc->sc_fw.img[usniffer_img].paging_mem_size =
862 			    paging_mem_size;
863 			break;
864 
865 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
866 			if (tlv_len != sizeof(uint32_t)) {
867 				error = EINVAL;
868 				goto parse_out;
869 			}
870 			capa->n_scan_channels =
871 			    le32_to_cpup((const uint32_t *)tlv_data);
872 			break;
873 
874 		case IWM_UCODE_TLV_FW_VERSION:
875 			if (tlv_len != sizeof(uint32_t) * 3) {
876 				error = EINVAL;
877 				goto parse_out;
878 			}
879 			ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
880 			    "%d.%d.%d",
881 			    le32toh(((const uint32_t *)tlv_data)[0]),
882 			    le32toh(((const uint32_t *)tlv_data)[1]),
883 			    le32toh(((const uint32_t *)tlv_data)[2]));
884 			break;
885 
886 		case IWM_UCODE_TLV_FW_MEM_SEG:
887 			break;
888 
889 		default:
890 			device_printf(sc->sc_dev,
891 			    "%s: unknown firmware section %d, abort\n",
892 			    __func__, tlv_type);
893 			error = EINVAL;
894 			goto parse_out;
895 		}
896 	}
897 
898 	KASSERT(error == 0, ("unhandled error"));
899 
900  parse_out:
901 	if (error) {
902 		device_printf(sc->sc_dev, "firmware parse error %d, "
903 		    "section type %d\n", error, tlv_type);
904 	}
905 
906  out:
907 	if (error) {
908 		if (fw->fw_fp != NULL)
909 			iwm_fw_info_free(fw);
910 	}
911 
912 	return error;
913 }
914 
915 /*
916  * DMA resource routines
917  */
918 
919 /* fwmem is used to load firmware onto the card */
920 static int
921 iwm_alloc_fwmem(struct iwm_softc *sc)
922 {
923 	/* Must be aligned on a 16-byte boundary. */
924 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
925 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
926 }
927 
928 /* tx scheduler rings.  not used? */
929 static int
930 iwm_alloc_sched(struct iwm_softc *sc)
931 {
932 	/* TX scheduler rings must be aligned on a 1KB boundary. */
933 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
934 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
935 }
936 
937 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
938 static int
939 iwm_alloc_kw(struct iwm_softc *sc)
940 {
941 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
942 }
943 
944 /* interrupt cause table */
945 static int
946 iwm_alloc_ict(struct iwm_softc *sc)
947 {
948 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
949 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
950 }
951 
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
954 {
955 	bus_size_t size;
956 	int i, error;
957 
958 	ring->cur = 0;
959 
960 	/* Allocate RX descriptors (256-byte aligned). */
961 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963 	if (error != 0) {
964 		device_printf(sc->sc_dev,
965 		    "could not allocate RX ring DMA memory\n");
966 		goto fail;
967 	}
968 	ring->desc = ring->desc_dma.vaddr;
969 
970 	/* Allocate RX status area (16-byte aligned). */
971 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972 	    sizeof(*ring->stat), 16);
973 	if (error != 0) {
974 		device_printf(sc->sc_dev,
975 		    "could not allocate RX status DMA memory\n");
976 		goto fail;
977 	}
978 	ring->stat = ring->stat_dma.vaddr;
979 
980         /* Create RX buffer DMA tag. */
981 #if defined(__DragonFly__)
982         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
983 				   0,
984 				   BUS_SPACE_MAXADDR_32BIT,
985 				   BUS_SPACE_MAXADDR,
986 				   NULL, NULL,
987 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
988 				   BUS_DMA_NOWAIT, &ring->data_dmat);
989 #else
990         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
991             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
992             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
993 #endif
994         if (error != 0) {
995                 device_printf(sc->sc_dev,
996                     "%s: could not create RX buf DMA tag, error %d\n",
997                     __func__, error);
998                 goto fail;
999         }
1000 
1001 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1002 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1003 	if (error != 0) {
1004 		device_printf(sc->sc_dev,
1005 		    "%s: could not create RX buf DMA map, error %d\n",
1006 		    __func__, error);
1007 		goto fail;
1008 	}
1009 	/*
1010 	 * Allocate and map RX buffers.
1011 	 */
1012 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1013 		struct iwm_rx_data *data = &ring->data[i];
1014 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1015 		if (error != 0) {
1016 			device_printf(sc->sc_dev,
1017 			    "%s: could not create RX buf DMA map, error %d\n",
1018 			    __func__, error);
1019 			goto fail;
1020 		}
1021 		data->m = NULL;
1022 
1023 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1024 			goto fail;
1025 		}
1026 	}
1027 	return 0;
1028 
1029 fail:	iwm_free_rx_ring(sc, ring);
1030 	return error;
1031 }
1032 
1033 static void
1034 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1035 {
1036 	/* Reset the ring state */
1037 	ring->cur = 0;
1038 
1039 	/*
1040 	 * The hw rx ring index in shared memory must also be cleared,
1041 	 * otherwise the discrepancy can cause reprocessing chaos.
1042 	 */
1043 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1044 }
1045 
1046 static void
1047 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1048 {
1049 	int i;
1050 
1051 	iwm_dma_contig_free(&ring->desc_dma);
1052 	iwm_dma_contig_free(&ring->stat_dma);
1053 
1054 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1055 		struct iwm_rx_data *data = &ring->data[i];
1056 
1057 		if (data->m != NULL) {
1058 			bus_dmamap_sync(ring->data_dmat, data->map,
1059 			    BUS_DMASYNC_POSTREAD);
1060 			bus_dmamap_unload(ring->data_dmat, data->map);
1061 			m_freem(data->m);
1062 			data->m = NULL;
1063 		}
1064 		if (data->map != NULL) {
1065 			bus_dmamap_destroy(ring->data_dmat, data->map);
1066 			data->map = NULL;
1067 		}
1068 	}
1069 	if (ring->spare_map != NULL) {
1070 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1071 		ring->spare_map = NULL;
1072 	}
1073 	if (ring->data_dmat != NULL) {
1074 		bus_dma_tag_destroy(ring->data_dmat);
1075 		ring->data_dmat = NULL;
1076 	}
1077 }
1078 
1079 static int
1080 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1081 {
1082 	bus_addr_t paddr;
1083 	bus_size_t size;
1084 	size_t maxsize;
1085 	int nsegments;
1086 	int i, error;
1087 
1088 	ring->qid = qid;
1089 	ring->queued = 0;
1090 	ring->cur = 0;
1091 
1092 	/* Allocate TX descriptors (256-byte aligned). */
1093 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1094 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1095 	if (error != 0) {
1096 		device_printf(sc->sc_dev,
1097 		    "could not allocate TX ring DMA memory\n");
1098 		goto fail;
1099 	}
1100 	ring->desc = ring->desc_dma.vaddr;
1101 
1102 	/*
1103 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1104 	 * to allocate commands space for other rings.
1105 	 */
1106 	if (qid > IWM_MVM_CMD_QUEUE)
1107 		return 0;
1108 
1109 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1110 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1111 	if (error != 0) {
1112 		device_printf(sc->sc_dev,
1113 		    "could not allocate TX cmd DMA memory\n");
1114 		goto fail;
1115 	}
1116 	ring->cmd = ring->cmd_dma.vaddr;
1117 
1118 	/* FW commands may require more mapped space than packets. */
1119 	if (qid == IWM_MVM_CMD_QUEUE) {
1120 		maxsize = IWM_RBUF_SIZE;
1121 		nsegments = 1;
1122 	} else {
1123 		maxsize = MCLBYTES;
1124 		nsegments = IWM_MAX_SCATTER - 2;
1125 	}
1126 
1127 #if defined(__DragonFly__)
1128 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1129 				   0,
1130 				   BUS_SPACE_MAXADDR_32BIT,
1131 				   BUS_SPACE_MAXADDR,
1132 				   NULL, NULL,
1133 				   maxsize, nsegments, maxsize,
1134 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1135 #else
1136 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1137 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1138             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1139 #endif
1140 	if (error != 0) {
1141 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1142 		goto fail;
1143 	}
1144 
1145 	paddr = ring->cmd_dma.paddr;
1146 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1147 		struct iwm_tx_data *data = &ring->data[i];
1148 
1149 		data->cmd_paddr = paddr;
1150 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1151 		    + offsetof(struct iwm_tx_cmd, scratch);
1152 		paddr += sizeof(struct iwm_device_cmd);
1153 
1154 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1155 		if (error != 0) {
1156 			device_printf(sc->sc_dev,
1157 			    "could not create TX buf DMA map\n");
1158 			goto fail;
1159 		}
1160 	}
1161 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1162 	    ("invalid physical address"));
1163 	return 0;
1164 
1165 fail:	iwm_free_tx_ring(sc, ring);
1166 	return error;
1167 }
1168 
1169 static void
1170 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1171 {
1172 	int i;
1173 
1174 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1175 		struct iwm_tx_data *data = &ring->data[i];
1176 
1177 		if (data->m != NULL) {
1178 			bus_dmamap_sync(ring->data_dmat, data->map,
1179 			    BUS_DMASYNC_POSTWRITE);
1180 			bus_dmamap_unload(ring->data_dmat, data->map);
1181 			m_freem(data->m);
1182 			data->m = NULL;
1183 		}
1184 	}
1185 	/* Clear TX descriptors. */
1186 	memset(ring->desc, 0, ring->desc_dma.size);
1187 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1188 	    BUS_DMASYNC_PREWRITE);
1189 	sc->qfullmsk &= ~(1 << ring->qid);
1190 	ring->queued = 0;
1191 	ring->cur = 0;
1192 
1193 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1194 		iwm_pcie_clear_cmd_in_flight(sc);
1195 }
1196 
1197 static void
1198 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1199 {
1200 	int i;
1201 
1202 	iwm_dma_contig_free(&ring->desc_dma);
1203 	iwm_dma_contig_free(&ring->cmd_dma);
1204 
1205 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1206 		struct iwm_tx_data *data = &ring->data[i];
1207 
1208 		if (data->m != NULL) {
1209 			bus_dmamap_sync(ring->data_dmat, data->map,
1210 			    BUS_DMASYNC_POSTWRITE);
1211 			bus_dmamap_unload(ring->data_dmat, data->map);
1212 			m_freem(data->m);
1213 			data->m = NULL;
1214 		}
1215 		if (data->map != NULL) {
1216 			bus_dmamap_destroy(ring->data_dmat, data->map);
1217 			data->map = NULL;
1218 		}
1219 	}
1220 	if (ring->data_dmat != NULL) {
1221 		bus_dma_tag_destroy(ring->data_dmat);
1222 		ring->data_dmat = NULL;
1223 	}
1224 }
1225 
1226 /*
1227  * High-level hardware frobbing routines
1228  */
1229 
1230 static void
1231 iwm_enable_interrupts(struct iwm_softc *sc)
1232 {
1233 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1234 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1235 }
1236 
1237 static void
1238 iwm_restore_interrupts(struct iwm_softc *sc)
1239 {
1240 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1241 }
1242 
1243 static void
1244 iwm_disable_interrupts(struct iwm_softc *sc)
1245 {
1246 	/* disable interrupts */
1247 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1248 
1249 	/* acknowledge all interrupts */
1250 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1251 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1252 }
1253 
1254 static void
1255 iwm_ict_reset(struct iwm_softc *sc)
1256 {
1257 	iwm_disable_interrupts(sc);
1258 
1259 	/* Reset ICT table. */
1260 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1261 	sc->ict_cur = 0;
1262 
1263 	/* Set physical address of ICT table (4KB aligned). */
1264 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1265 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1266 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1267 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1268 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1269 
1270 	/* Switch to ICT interrupt mode in driver. */
1271 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1272 
1273 	/* Re-enable interrupts. */
1274 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1275 	iwm_enable_interrupts(sc);
1276 }
1277 
1278 /*
1279  * Since this .. hard-resets things, it's time to actually
1280  * mark the first vap (if any) as having no mac context.
1281  * It's annoying, but since the driver is potentially being
1282  * stop/start'ed whilst active (thanks openbsd port!) we
1283  * have to correctly track this.
1284  */
1285 static void
1286 iwm_stop_device(struct iwm_softc *sc)
1287 {
1288 	struct ieee80211com *ic = &sc->sc_ic;
1289 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1290 	int chnl, qid;
1291 	uint32_t mask = 0;
1292 
1293 	/* tell the device to stop sending interrupts */
1294 	iwm_disable_interrupts(sc);
1295 
1296 	/*
1297 	 * FreeBSD-local: mark the first vap as not-uploaded,
1298 	 * so the next transition through auth/assoc
1299 	 * will correctly populate the MAC context.
1300 	 */
1301 	if (vap) {
1302 		struct iwm_vap *iv = IWM_VAP(vap);
1303 		iv->phy_ctxt = NULL;
1304 		iv->is_uploaded = 0;
1305 	}
1306 
1307 	/* device going down, Stop using ICT table */
1308 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1309 
1310 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1311 
1312 	if (iwm_nic_lock(sc)) {
1313 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1314 
1315 		/* Stop each Tx DMA channel */
1316 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1317 			IWM_WRITE(sc,
1318 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1319 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1320 		}
1321 
1322 		/* Wait for DMA channels to be idle */
1323 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1324 		    5000)) {
1325 			device_printf(sc->sc_dev,
1326 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1327 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1328 		}
1329 		iwm_nic_unlock(sc);
1330 	}
1331 	iwm_pcie_rx_stop(sc);
1332 
1333 	/* Stop RX ring. */
1334 	iwm_reset_rx_ring(sc, &sc->rxq);
1335 
1336 	/* Reset all TX rings. */
1337 	for (qid = 0; qid < nitems(sc->txq); qid++)
1338 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1339 
1340 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1341 		/* Power-down device's busmaster DMA clocks */
1342 		if (iwm_nic_lock(sc)) {
1343 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1344 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1345 			iwm_nic_unlock(sc);
1346 		}
1347 		DELAY(5);
1348 	}
1349 
1350 	/* Make sure (redundant) we've released our request to stay awake */
1351 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1352 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1353 
1354 	/* Stop the device, and put it in low power state */
1355 	iwm_apm_stop(sc);
1356 
1357 	/* stop and reset the on-board processor */
1358 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1359 	DELAY(1000);
1360 
1361 	/*
1362 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1363 	 * This is a bug in certain verions of the hardware.
1364 	 * Certain devices also keep sending HW RF kill interrupt all
1365 	 * the time, unless the interrupt is ACKed even if the interrupt
1366 	 * should be masked. Re-ACK all the interrupts here.
1367 	 */
1368 	iwm_disable_interrupts(sc);
1369 
1370 	/*
1371 	 * Even if we stop the HW, we still want the RF kill
1372 	 * interrupt
1373 	 */
1374 	iwm_enable_rfkill_int(sc);
1375 	iwm_check_rfkill(sc);
1376 }
1377 
1378 static void
1379 iwm_mvm_nic_config(struct iwm_softc *sc)
1380 {
1381 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1382 	uint32_t reg_val = 0;
1383 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1384 
1385 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1386 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1387 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1388 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1389 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1390 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1391 
1392 	/* SKU control */
1393 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1394 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1395 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1396 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1397 
1398 	/* radio configuration */
1399 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1400 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1401 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1402 
1403 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1404 
1405 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1406 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1407 	    radio_cfg_step, radio_cfg_dash);
1408 
1409 	/*
1410 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1411 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1412 	 * to lose ownership and not being able to obtain it back.
1413 	 */
1414 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1415 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1416 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1417 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1418 	}
1419 }
1420 
1421 static int
1422 iwm_nic_rx_init(struct iwm_softc *sc)
1423 {
1424 	/*
1425 	 * Initialize RX ring.  This is from the iwn driver.
1426 	 */
1427 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1428 
1429 	/* Stop Rx DMA */
1430 	iwm_pcie_rx_stop(sc);
1431 
1432 	if (!iwm_nic_lock(sc))
1433 		return EBUSY;
1434 
1435 	/* reset and flush pointers */
1436 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1437 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1438 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1439 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1440 
1441 	/* Set physical address of RX ring (256-byte aligned). */
1442 	IWM_WRITE(sc,
1443 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1444 
1445 	/* Set physical address of RX status (16-byte aligned). */
1446 	IWM_WRITE(sc,
1447 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1448 
1449 #if defined(__DragonFly__)
1450 	/* Force serialization (probably not needed but don't trust the HW) */
1451 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1452 #endif
1453 
1454 	/* Enable Rx DMA
1455 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1456 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1457 	 *      the credit mechanism in 5000 HW RX FIFO
1458 	 * Direct rx interrupts to hosts
1459 	 * Rx buffer size 4 or 8k or 12k
1460 	 * RB timeout 0x10
1461 	 * 256 RBDs
1462 	 */
1463 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1464 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1465 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1466 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1467 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1468 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1469 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1470 
1471 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1472 
1473 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1474 	if (sc->cfg->host_interrupt_operation_mode)
1475 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1476 
1477 	/*
1478 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1479 	 *
1480 	 * This value should initially be 0 (before preparing any
1481 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1482 	 */
1483 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1484 
1485 	iwm_nic_unlock(sc);
1486 
1487 	return 0;
1488 }
1489 
1490 static int
1491 iwm_nic_tx_init(struct iwm_softc *sc)
1492 {
1493 	int qid;
1494 
1495 	if (!iwm_nic_lock(sc))
1496 		return EBUSY;
1497 
1498 	/* Deactivate TX scheduler. */
1499 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1500 
1501 	/* Set physical address of "keep warm" page (16-byte aligned). */
1502 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1503 
1504 	/* Initialize TX rings. */
1505 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1506 		struct iwm_tx_ring *txq = &sc->txq[qid];
1507 
1508 		/* Set physical address of TX ring (256-byte aligned). */
1509 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1510 		    txq->desc_dma.paddr >> 8);
1511 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1512 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1513 		    __func__,
1514 		    qid, txq->desc,
1515 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1516 	}
1517 
1518 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1519 
1520 	iwm_nic_unlock(sc);
1521 
1522 	return 0;
1523 }
1524 
1525 static int
1526 iwm_nic_init(struct iwm_softc *sc)
1527 {
1528 	int error;
1529 
1530 	iwm_apm_init(sc);
1531 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1532 		iwm_set_pwr(sc);
1533 
1534 	iwm_mvm_nic_config(sc);
1535 
1536 	if ((error = iwm_nic_rx_init(sc)) != 0)
1537 		return error;
1538 
1539 	/*
1540 	 * Ditto for TX, from iwn
1541 	 */
1542 	if ((error = iwm_nic_tx_init(sc)) != 0)
1543 		return error;
1544 
1545 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1546 	    "%s: shadow registers enabled\n", __func__);
1547 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1548 
1549 	return 0;
1550 }
1551 
1552 int
1553 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1554 {
1555 	if (!iwm_nic_lock(sc)) {
1556 		device_printf(sc->sc_dev,
1557 		    "%s: cannot enable txq %d\n",
1558 		    __func__,
1559 		    qid);
1560 		return EBUSY;
1561 	}
1562 
1563 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1564 
1565 	if (qid == IWM_MVM_CMD_QUEUE) {
1566 		/* unactivate before configuration */
1567 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1568 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1569 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1570 
1571 		iwm_nic_unlock(sc);
1572 
1573 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1574 
1575 		if (!iwm_nic_lock(sc)) {
1576 			device_printf(sc->sc_dev,
1577 			    "%s: cannot enable txq %d\n", __func__, qid);
1578 			return EBUSY;
1579 		}
1580 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1581 		iwm_nic_unlock(sc);
1582 
1583 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1584 		/* Set scheduler window size and frame limit. */
1585 		iwm_write_mem32(sc,
1586 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1587 		    sizeof(uint32_t),
1588 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1589 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1590 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1591 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1592 
1593 		if (!iwm_nic_lock(sc)) {
1594 			device_printf(sc->sc_dev,
1595 			    "%s: cannot enable txq %d\n", __func__, qid);
1596 			return EBUSY;
1597 		}
1598 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1599 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1600 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1601 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1602 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1603 	} else {
1604 		struct iwm_scd_txq_cfg_cmd cmd;
1605 		int error;
1606 
1607 		iwm_nic_unlock(sc);
1608 
1609 		memset(&cmd, 0, sizeof(cmd));
1610 		cmd.scd_queue = qid;
1611 		cmd.enable = 1;
1612 		cmd.sta_id = sta_id;
1613 		cmd.tx_fifo = fifo;
1614 		cmd.aggregate = 0;
1615 		cmd.window = IWM_FRAME_LIMIT;
1616 
1617 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1618 		    sizeof(cmd), &cmd);
1619 		if (error) {
1620 			device_printf(sc->sc_dev,
1621 			    "cannot enable txq %d\n", qid);
1622 			return error;
1623 		}
1624 
1625 		if (!iwm_nic_lock(sc))
1626 			return EBUSY;
1627 	}
1628 
1629 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1630 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1631 
1632 	iwm_nic_unlock(sc);
1633 
1634 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1635 	    __func__, qid, fifo);
1636 
1637 	return 0;
1638 }
1639 
1640 static int
1641 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1642 {
1643 	int error, chnl;
1644 
1645 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1646 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1647 
1648 	if (!iwm_nic_lock(sc))
1649 		return EBUSY;
1650 
1651 	iwm_ict_reset(sc);
1652 
1653 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1654 	if (scd_base_addr != 0 &&
1655 	    scd_base_addr != sc->scd_base_addr) {
1656 		device_printf(sc->sc_dev,
1657 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1658 		    __func__, sc->scd_base_addr, scd_base_addr);
1659 	}
1660 
1661 	iwm_nic_unlock(sc);
1662 
1663 	/* reset context data, TX status and translation data */
1664 	error = iwm_write_mem(sc,
1665 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1666 	    NULL, clear_dwords);
1667 	if (error)
1668 		return EBUSY;
1669 
1670 	if (!iwm_nic_lock(sc))
1671 		return EBUSY;
1672 
1673 	/* Set physical address of TX scheduler rings (1KB aligned). */
1674 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1675 
1676 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1677 
1678 	iwm_nic_unlock(sc);
1679 
1680 	/* enable command channel */
1681 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1682 	if (error)
1683 		return error;
1684 
1685 	if (!iwm_nic_lock(sc))
1686 		return EBUSY;
1687 
1688 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1689 
1690 	/* Enable DMA channels. */
1691 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1692 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1693 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1694 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1695 	}
1696 
1697 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1698 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1699 
1700 	iwm_nic_unlock(sc);
1701 
1702 	/* Enable L1-Active */
1703 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1704 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1705 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1706 	}
1707 
1708 	return error;
1709 }
1710 
1711 /*
1712  * NVM read access and content parsing.  We do not support
1713  * external NVM or writing NVM.
1714  * iwlwifi/mvm/nvm.c
1715  */
1716 
1717 /* Default NVM size to read */
1718 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1719 
1720 #define IWM_NVM_WRITE_OPCODE 1
1721 #define IWM_NVM_READ_OPCODE 0
1722 
1723 /* load nvm chunk response */
1724 enum {
1725 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1726 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1727 };
1728 
1729 static int
1730 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1731 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1732 {
1733 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1734 		.offset = htole16(offset),
1735 		.length = htole16(length),
1736 		.type = htole16(section),
1737 		.op_code = IWM_NVM_READ_OPCODE,
1738 	};
1739 	struct iwm_nvm_access_resp *nvm_resp;
1740 	struct iwm_rx_packet *pkt;
1741 	struct iwm_host_cmd cmd = {
1742 		.id = IWM_NVM_ACCESS_CMD,
1743 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1744 		.data = { &nvm_access_cmd, },
1745 	};
1746 	int ret, bytes_read, offset_read;
1747 	uint8_t *resp_data;
1748 
1749 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1750 
1751 	ret = iwm_send_cmd(sc, &cmd);
1752 	if (ret) {
1753 		device_printf(sc->sc_dev,
1754 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1755 		return ret;
1756 	}
1757 
1758 	pkt = cmd.resp_pkt;
1759 
1760 	/* Extract NVM response */
1761 	nvm_resp = (void *)pkt->data;
1762 	ret = le16toh(nvm_resp->status);
1763 	bytes_read = le16toh(nvm_resp->length);
1764 	offset_read = le16toh(nvm_resp->offset);
1765 	resp_data = nvm_resp->data;
1766 	if (ret) {
1767 		if ((offset != 0) &&
1768 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1769 			/*
1770 			 * meaning of NOT_VALID_ADDRESS:
1771 			 * driver try to read chunk from address that is
1772 			 * multiple of 2K and got an error since addr is empty.
1773 			 * meaning of (offset != 0): driver already
1774 			 * read valid data from another chunk so this case
1775 			 * is not an error.
1776 			 */
1777 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1778 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1779 				    offset);
1780 			*len = 0;
1781 			ret = 0;
1782 		} else {
1783 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1784 				    "NVM access command failed with status %d\n", ret);
1785 			ret = EIO;
1786 		}
1787 		goto exit;
1788 	}
1789 
1790 	if (offset_read != offset) {
1791 		device_printf(sc->sc_dev,
1792 		    "NVM ACCESS response with invalid offset %d\n",
1793 		    offset_read);
1794 		ret = EINVAL;
1795 		goto exit;
1796 	}
1797 
1798 	if (bytes_read > length) {
1799 		device_printf(sc->sc_dev,
1800 		    "NVM ACCESS response with too much data "
1801 		    "(%d bytes requested, %d bytes received)\n",
1802 		    length, bytes_read);
1803 		ret = EINVAL;
1804 		goto exit;
1805 	}
1806 
1807 	/* Write data to NVM */
1808 	memcpy(data + offset, resp_data, bytes_read);
1809 	*len = bytes_read;
1810 
1811  exit:
1812 	iwm_free_resp(sc, &cmd);
1813 	return ret;
1814 }
1815 
1816 /*
1817  * Reads an NVM section completely.
1818  * NICs prior to 7000 family don't have a real NVM, but just read
1819  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1820  * by uCode, we need to manually check in this case that we don't
1821  * overflow and try to read more than the EEPROM size.
1822  * For 7000 family NICs, we supply the maximal size we can read, and
1823  * the uCode fills the response with as much data as we can,
1824  * without overflowing, so no check is needed.
1825  */
1826 static int
1827 iwm_nvm_read_section(struct iwm_softc *sc,
1828 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1829 {
1830 	uint16_t seglen, length, offset = 0;
1831 	int ret;
1832 
1833 	/* Set nvm section read length */
1834 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1835 
1836 	seglen = length;
1837 
1838 	/* Read the NVM until exhausted (reading less than requested) */
1839 	while (seglen == length) {
1840 		/* Check no memory assumptions fail and cause an overflow */
1841 		if ((size_read + offset + length) >
1842 		    sc->cfg->eeprom_size) {
1843 			device_printf(sc->sc_dev,
1844 			    "EEPROM size is too small for NVM\n");
1845 			return ENOBUFS;
1846 		}
1847 
1848 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1849 		if (ret) {
1850 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1851 				    "Cannot read NVM from section %d offset %d, length %d\n",
1852 				    section, offset, length);
1853 			return ret;
1854 		}
1855 		offset += seglen;
1856 	}
1857 
1858 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1859 		    "NVM section %d read completed\n", section);
1860 	*len = offset;
1861 	return 0;
1862 }
1863 
1864 /* NVM offsets (in words) definitions */
1865 enum iwm_nvm_offsets {
1866 	/* NVM HW-Section offset (in words) definitions */
1867 	IWM_HW_ADDR = 0x15,
1868 
1869 /* NVM SW-Section offset (in words) definitions */
1870 	IWM_NVM_SW_SECTION = 0x1C0,
1871 	IWM_NVM_VERSION = 0,
1872 	IWM_RADIO_CFG = 1,
1873 	IWM_SKU = 2,
1874 	IWM_N_HW_ADDRS = 3,
1875 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1876 
1877 /* NVM calibration section offset (in words) definitions */
1878 	IWM_NVM_CALIB_SECTION = 0x2B8,
1879 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1880 };
1881 
1882 enum iwm_8000_nvm_offsets {
1883 	/* NVM HW-Section offset (in words) definitions */
1884 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1885 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1886 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1887 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1888 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1889 
1890 	/* NVM SW-Section offset (in words) definitions */
1891 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1892 	IWM_NVM_VERSION_8000 = 0,
1893 	IWM_RADIO_CFG_8000 = 0,
1894 	IWM_SKU_8000 = 2,
1895 	IWM_N_HW_ADDRS_8000 = 3,
1896 
1897 	/* NVM REGULATORY -Section offset (in words) definitions */
1898 	IWM_NVM_CHANNELS_8000 = 0,
1899 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1900 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1901 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1902 
1903 	/* NVM calibration section offset (in words) definitions */
1904 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1905 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1906 };
1907 
1908 /* SKU Capabilities (actual values from NVM definition) */
1909 enum nvm_sku_bits {
1910 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1911 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1912 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1913 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1914 };
1915 
1916 /* radio config bits (actual values from NVM definition) */
1917 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1918 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1919 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1920 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1921 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1922 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1923 
1924 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1925 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1926 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1927 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1928 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1929 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1930 
1931 /**
1932  * enum iwm_nvm_channel_flags - channel flags in NVM
1933  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1934  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1935  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1936  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1937  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1938  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1939  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1940  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1941  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1942  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1943  */
1944 enum iwm_nvm_channel_flags {
1945 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1946 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1947 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1948 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1949 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1950 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1951 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1952 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1953 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1954 };
1955 
1956 /*
1957  * Translate EEPROM flags to net80211.
1958  */
1959 static uint32_t
1960 iwm_eeprom_channel_flags(uint16_t ch_flags)
1961 {
1962 	uint32_t nflags;
1963 
1964 	nflags = 0;
1965 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1966 		nflags |= IEEE80211_CHAN_PASSIVE;
1967 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1968 		nflags |= IEEE80211_CHAN_NOADHOC;
1969 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1970 		nflags |= IEEE80211_CHAN_DFS;
1971 		/* Just in case. */
1972 		nflags |= IEEE80211_CHAN_NOADHOC;
1973 	}
1974 
1975 	return (nflags);
1976 }
1977 
1978 static void
1979 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1980     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1981     const uint8_t bands[])
1982 {
1983 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1984 	uint32_t nflags;
1985 	uint16_t ch_flags;
1986 	uint8_t ieee;
1987 	int error;
1988 
1989 	for (; ch_idx < ch_num; ch_idx++) {
1990 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1991 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1992 			ieee = iwm_nvm_channels[ch_idx];
1993 		else
1994 			ieee = iwm_nvm_channels_8000[ch_idx];
1995 
1996 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1997 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1998 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1999 			    ieee, ch_flags,
2000 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2001 			    "5.2" : "2.4");
2002 			continue;
2003 		}
2004 
2005 		nflags = iwm_eeprom_channel_flags(ch_flags);
2006 		error = ieee80211_add_channel(chans, maxchans, nchans,
2007 		    ieee, 0, 0, nflags, bands);
2008 		if (error != 0)
2009 			break;
2010 
2011 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2012 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2013 		    ieee, ch_flags,
2014 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2015 		    "5.2" : "2.4");
2016 	}
2017 }
2018 
2019 static void
2020 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2021     struct ieee80211_channel chans[])
2022 {
2023 	struct iwm_softc *sc = ic->ic_softc;
2024 	struct iwm_nvm_data *data = sc->nvm_data;
2025 	uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2026 	size_t ch_num;
2027 
2028 	memset(bands, 0, sizeof(bands));
2029 	/* 1-13: 11b/g channels. */
2030 	setbit(bands, IEEE80211_MODE_11B);
2031 	setbit(bands, IEEE80211_MODE_11G);
2032 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2033 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2034 
2035 	/* 14: 11b channel only. */
2036 	clrbit(bands, IEEE80211_MODE_11G);
2037 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2038 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2039 
2040 	if (data->sku_cap_band_52GHz_enable) {
2041 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2042 			ch_num = nitems(iwm_nvm_channels);
2043 		else
2044 			ch_num = nitems(iwm_nvm_channels_8000);
2045 		memset(bands, 0, sizeof(bands));
2046 		setbit(bands, IEEE80211_MODE_11A);
2047 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2048 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2049 	}
2050 }
2051 
2052 static void
2053 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2054 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2055 {
2056 	const uint8_t *hw_addr;
2057 
2058 	if (mac_override) {
2059 		static const uint8_t reserved_mac[] = {
2060 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2061 		};
2062 
2063 		hw_addr = (const uint8_t *)(mac_override +
2064 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2065 
2066 		/*
2067 		 * Store the MAC address from MAO section.
2068 		 * No byte swapping is required in MAO section
2069 		 */
2070 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2071 
2072 		/*
2073 		 * Force the use of the OTP MAC address in case of reserved MAC
2074 		 * address in the NVM, or if address is given but invalid.
2075 		 */
2076 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2077 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2078 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2079 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2080 			return;
2081 
2082 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2083 		    "%s: mac address from nvm override section invalid\n",
2084 		    __func__);
2085 	}
2086 
2087 	if (nvm_hw) {
2088 		/* read the mac address from WFMP registers */
2089 		uint32_t mac_addr0 =
2090 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2091 		uint32_t mac_addr1 =
2092 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2093 
2094 		hw_addr = (const uint8_t *)&mac_addr0;
2095 		data->hw_addr[0] = hw_addr[3];
2096 		data->hw_addr[1] = hw_addr[2];
2097 		data->hw_addr[2] = hw_addr[1];
2098 		data->hw_addr[3] = hw_addr[0];
2099 
2100 		hw_addr = (const uint8_t *)&mac_addr1;
2101 		data->hw_addr[4] = hw_addr[1];
2102 		data->hw_addr[5] = hw_addr[0];
2103 
2104 		return;
2105 	}
2106 
2107 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2108 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2109 }
2110 
2111 static int
2112 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2113 	    const uint16_t *phy_sku)
2114 {
2115 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2116 		return le16_to_cpup(nvm_sw + IWM_SKU);
2117 
2118 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2119 }
2120 
2121 static int
2122 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2123 {
2124 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2125 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2126 	else
2127 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2128 						IWM_NVM_VERSION_8000));
2129 }
2130 
2131 static int
2132 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2133 		  const uint16_t *phy_sku)
2134 {
2135         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2136                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2137 
2138         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2139 }
2140 
2141 static int
2142 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2143 {
2144 	int n_hw_addr;
2145 
2146 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2147 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2148 
2149 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2150 
2151         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2152 }
2153 
2154 static void
2155 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2156 		  uint32_t radio_cfg)
2157 {
2158 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2159 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2160 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2161 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2162 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2163 		return;
2164 	}
2165 
2166 	/* set the radio configuration for family 8000 */
2167 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2168 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2169 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2170 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2171 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2172 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2173 }
2174 
2175 static int
2176 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2177 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2178 {
2179 #ifdef notyet /* for FAMILY 9000 */
2180 	if (cfg->mac_addr_from_csr) {
2181 		iwm_set_hw_address_from_csr(sc, data);
2182         } else
2183 #endif
2184 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2185 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2186 
2187 		/* The byte order is little endian 16 bit, meaning 214365 */
2188 		data->hw_addr[0] = hw_addr[1];
2189 		data->hw_addr[1] = hw_addr[0];
2190 		data->hw_addr[2] = hw_addr[3];
2191 		data->hw_addr[3] = hw_addr[2];
2192 		data->hw_addr[4] = hw_addr[5];
2193 		data->hw_addr[5] = hw_addr[4];
2194 	} else {
2195 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2196 	}
2197 
2198 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2199 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2200 		return EINVAL;
2201 	}
2202 
2203 	return 0;
2204 }
2205 
2206 static struct iwm_nvm_data *
2207 iwm_parse_nvm_data(struct iwm_softc *sc,
2208 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2209 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2210 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2211 {
2212 	struct iwm_nvm_data *data;
2213 	uint32_t sku, radio_cfg;
2214 	uint16_t lar_config;
2215 
2216 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2217 		data = kmalloc(sizeof(*data) +
2218 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2219 		    M_DEVBUF, M_WAITOK | M_ZERO);
2220 	} else {
2221 		data = kmalloc(sizeof(*data) +
2222 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2223 		    M_DEVBUF, M_WAITOK | M_ZERO);
2224 	}
2225 	if (!data)
2226 		return NULL;
2227 
2228 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2229 
2230 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2231 	iwm_set_radio_cfg(sc, data, radio_cfg);
2232 
2233 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2234 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2235 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2236 	data->sku_cap_11n_enable = 0;
2237 
2238 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2239 
2240 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2241 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2242 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2243 				       IWM_NVM_LAR_OFFSET_8000;
2244 
2245 		lar_config = le16_to_cpup(regulatory + lar_offset);
2246 		data->lar_enabled = !!(lar_config &
2247 				       IWM_NVM_LAR_ENABLED_8000);
2248 	}
2249 
2250 	/* If no valid mac address was found - bail out */
2251 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2252 		kfree(data, M_DEVBUF);
2253 		return NULL;
2254 	}
2255 
2256 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2257 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2258 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2259 	} else {
2260 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2261 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2262 	}
2263 
2264 	return data;
2265 }
2266 
2267 static void
2268 iwm_free_nvm_data(struct iwm_nvm_data *data)
2269 {
2270 	if (data != NULL)
2271 		kfree(data, M_DEVBUF);
2272 }
2273 
2274 static struct iwm_nvm_data *
2275 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2276 {
2277 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2278 
2279 	/* Checking for required sections */
2280 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2281 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2282 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2283 			device_printf(sc->sc_dev,
2284 			    "Can't parse empty OTP/NVM sections\n");
2285 			return NULL;
2286 		}
2287 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2288 		/* SW and REGULATORY sections are mandatory */
2289 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2290 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2291 			device_printf(sc->sc_dev,
2292 			    "Can't parse empty OTP/NVM sections\n");
2293 			return NULL;
2294 		}
2295 		/* MAC_OVERRIDE or at least HW section must exist */
2296 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2297 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2298 			device_printf(sc->sc_dev,
2299 			    "Can't parse mac_address, empty sections\n");
2300 			return NULL;
2301 		}
2302 
2303 		/* PHY_SKU section is mandatory in B0 */
2304 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2305 			device_printf(sc->sc_dev,
2306 			    "Can't parse phy_sku in B0, empty sections\n");
2307 			return NULL;
2308 		}
2309 	} else {
2310 		panic("unknown device family %d\n", sc->cfg->device_family);
2311 	}
2312 
2313 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2314 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2315 	calib = (const uint16_t *)
2316 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2317 	regulatory = (const uint16_t *)
2318 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2319 	mac_override = (const uint16_t *)
2320 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2321 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2322 
2323 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2324 	    phy_sku, regulatory);
2325 }
2326 
2327 static int
2328 iwm_nvm_init(struct iwm_softc *sc)
2329 {
2330 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2331 	int i, ret, section;
2332 	uint32_t size_read = 0;
2333 	uint8_t *nvm_buffer, *temp;
2334 	uint16_t len;
2335 
2336 	memset(nvm_sections, 0, sizeof(nvm_sections));
2337 
2338 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2339 		return EINVAL;
2340 
2341 	/* load NVM values from nic */
2342 	/* Read From FW NVM */
2343 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2344 
2345 	nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2346 	    M_INTWAIT | M_ZERO);
2347 	if (!nvm_buffer)
2348 		return ENOMEM;
2349 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2350 		/* we override the constness for initial read */
2351 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2352 					   &len, size_read);
2353 		if (ret)
2354 			continue;
2355 		size_read += len;
2356 		temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2357 		if (!temp) {
2358 			ret = ENOMEM;
2359 			break;
2360 		}
2361 		memcpy(temp, nvm_buffer, len);
2362 
2363 		nvm_sections[section].data = temp;
2364 		nvm_sections[section].length = len;
2365 	}
2366 	if (!size_read)
2367 		device_printf(sc->sc_dev, "OTP is blank\n");
2368 	kfree(nvm_buffer, M_DEVBUF);
2369 
2370 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2371 	if (!sc->nvm_data)
2372 		return EINVAL;
2373 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2374 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2375 
2376 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2377 		if (nvm_sections[i].data != NULL)
2378 			kfree(nvm_sections[i].data, M_DEVBUF);
2379 	}
2380 
2381 	return 0;
2382 }
2383 
2384 static int
2385 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2386 	const struct iwm_fw_desc *section)
2387 {
2388 	struct iwm_dma_info *dma = &sc->fw_dma;
2389 	uint8_t *v_addr;
2390 	bus_addr_t p_addr;
2391 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2392 	int ret = 0;
2393 
2394 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2395 		    "%s: [%d] uCode section being loaded...\n",
2396 		    __func__, section_num);
2397 
2398 	v_addr = dma->vaddr;
2399 	p_addr = dma->paddr;
2400 
2401 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2402 		uint32_t copy_size, dst_addr;
2403 		int extended_addr = FALSE;
2404 
2405 		copy_size = MIN(chunk_sz, section->len - offset);
2406 		dst_addr = section->offset + offset;
2407 
2408 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2409 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2410 			extended_addr = TRUE;
2411 
2412 		if (extended_addr)
2413 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2414 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2415 
2416 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2417 		    copy_size);
2418 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2419 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2420 						   copy_size);
2421 
2422 		if (extended_addr)
2423 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2424 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2425 
2426 		if (ret) {
2427 			device_printf(sc->sc_dev,
2428 			    "%s: Could not load the [%d] uCode section\n",
2429 			    __func__, section_num);
2430 			break;
2431 		}
2432 	}
2433 
2434 	return ret;
2435 }
2436 
2437 /*
2438  * ucode
2439  */
2440 static int
2441 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2442 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2443 {
2444 	int ret;
2445 
2446 	sc->sc_fw_chunk_done = 0;
2447 
2448 	if (!iwm_nic_lock(sc))
2449 		return EBUSY;
2450 
2451 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2452 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2453 
2454 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2455 	    dst_addr);
2456 
2457 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2458 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2459 
2460 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2461 	    (iwm_get_dma_hi_addr(phy_addr)
2462 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2463 
2464 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2465 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2466 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2467 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2468 
2469 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2470 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2471 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2472 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2473 
2474 	iwm_nic_unlock(sc);
2475 
2476 	/* wait up to 5s for this segment to load */
2477 	ret = 0;
2478 	while (!sc->sc_fw_chunk_done) {
2479 #if defined(__DragonFly__)
2480 		ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2481 #else
2482 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2483 #endif
2484 		if (ret)
2485 			break;
2486 	}
2487 
2488 	if (ret != 0) {
2489 		device_printf(sc->sc_dev,
2490 		    "fw chunk addr 0x%x len %d failed to load\n",
2491 		    dst_addr, byte_cnt);
2492 		return ETIMEDOUT;
2493 	}
2494 
2495 	return 0;
2496 }
2497 
2498 static int
2499 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2500 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2501 {
2502 	int shift_param;
2503 	int i, ret = 0, sec_num = 0x1;
2504 	uint32_t val, last_read_idx = 0;
2505 
2506 	if (cpu == 1) {
2507 		shift_param = 0;
2508 		*first_ucode_section = 0;
2509 	} else {
2510 		shift_param = 16;
2511 		(*first_ucode_section)++;
2512 	}
2513 
2514 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2515 		last_read_idx = i;
2516 
2517 		/*
2518 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2519 		 * CPU1 to CPU2.
2520 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2521 		 * CPU2 non paged to CPU2 paging sec.
2522 		 */
2523 		if (!image->sec[i].data ||
2524 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2525 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2526 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2527 				    "Break since Data not valid or Empty section, sec = %d\n",
2528 				    i);
2529 			break;
2530 		}
2531 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2532 		if (ret)
2533 			return ret;
2534 
2535 		/* Notify the ucode of the loaded section number and status */
2536 		if (iwm_nic_lock(sc)) {
2537 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2538 			val = val | (sec_num << shift_param);
2539 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2540 			sec_num = (sec_num << 1) | 0x1;
2541 			iwm_nic_unlock(sc);
2542 		}
2543 	}
2544 
2545 	*first_ucode_section = last_read_idx;
2546 
2547 	iwm_enable_interrupts(sc);
2548 
2549 	if (iwm_nic_lock(sc)) {
2550 		if (cpu == 1)
2551 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2552 		else
2553 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2554 		iwm_nic_unlock(sc);
2555 	}
2556 
2557 	return 0;
2558 }
2559 
2560 static int
2561 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2562 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2563 {
2564 	int shift_param;
2565 	int i, ret = 0;
2566 	uint32_t last_read_idx = 0;
2567 
2568 	if (cpu == 1) {
2569 		shift_param = 0;
2570 		*first_ucode_section = 0;
2571 	} else {
2572 		shift_param = 16;
2573 		(*first_ucode_section)++;
2574 	}
2575 
2576 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2577 		last_read_idx = i;
2578 
2579 		/*
2580 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2581 		 * CPU1 to CPU2.
2582 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2583 		 * CPU2 non paged to CPU2 paging sec.
2584 		 */
2585 		if (!image->sec[i].data ||
2586 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2587 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2588 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2589 				    "Break since Data not valid or Empty section, sec = %d\n",
2590 				     i);
2591 			break;
2592 		}
2593 
2594 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2595 		if (ret)
2596 			return ret;
2597 	}
2598 
2599 	*first_ucode_section = last_read_idx;
2600 
2601 	return 0;
2602 
2603 }
2604 
2605 static int
2606 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2607 {
2608 	int ret = 0;
2609 	int first_ucode_section;
2610 
2611 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2612 		     image->is_dual_cpus ? "Dual" : "Single");
2613 
2614 	/* load to FW the binary non secured sections of CPU1 */
2615 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2616 	if (ret)
2617 		return ret;
2618 
2619 	if (image->is_dual_cpus) {
2620 		/* set CPU2 header address */
2621 		if (iwm_nic_lock(sc)) {
2622 			iwm_write_prph(sc,
2623 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2624 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2625 			iwm_nic_unlock(sc);
2626 		}
2627 
2628 		/* load to FW the binary sections of CPU2 */
2629 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2630 						 &first_ucode_section);
2631 		if (ret)
2632 			return ret;
2633 	}
2634 
2635 	iwm_enable_interrupts(sc);
2636 
2637 	/* release CPU reset */
2638 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2639 
2640 	return 0;
2641 }
2642 
2643 int
2644 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2645 	const struct iwm_fw_img *image)
2646 {
2647 	int ret = 0;
2648 	int first_ucode_section;
2649 
2650 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2651 		    image->is_dual_cpus ? "Dual" : "Single");
2652 
2653 	/* configure the ucode to be ready to get the secured image */
2654 	/* release CPU reset */
2655 	if (iwm_nic_lock(sc)) {
2656 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2657 		    IWM_RELEASE_CPU_RESET_BIT);
2658 		iwm_nic_unlock(sc);
2659 	}
2660 
2661 	/* load to FW the binary Secured sections of CPU1 */
2662 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2663 	    &first_ucode_section);
2664 	if (ret)
2665 		return ret;
2666 
2667 	/* load to FW the binary sections of CPU2 */
2668 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2669 	    &first_ucode_section);
2670 }
2671 
2672 /* XXX Get rid of this definition */
2673 static inline void
2674 iwm_enable_fw_load_int(struct iwm_softc *sc)
2675 {
2676 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2677 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2678 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2679 }
2680 
2681 /* XXX Add proper rfkill support code */
2682 static int
2683 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2684 {
2685 	int ret;
2686 
2687 	/* This may fail if AMT took ownership of the device */
2688 	if (iwm_prepare_card_hw(sc)) {
2689 		device_printf(sc->sc_dev,
2690 		    "%s: Exit HW not ready\n", __func__);
2691 		ret = EIO;
2692 		goto out;
2693 	}
2694 
2695 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2696 
2697 	iwm_disable_interrupts(sc);
2698 
2699 	/* make sure rfkill handshake bits are cleared */
2700 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2701 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2702 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2703 
2704 	/* clear (again), then enable host interrupts */
2705 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2706 
2707 	ret = iwm_nic_init(sc);
2708 	if (ret) {
2709 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2710 		goto out;
2711 	}
2712 
2713 	/*
2714 	 * Now, we load the firmware and don't want to be interrupted, even
2715 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2716 	 * FH_TX interrupt which is needed to load the firmware). If the
2717 	 * RF-Kill switch is toggled, we will find out after having loaded
2718 	 * the firmware and return the proper value to the caller.
2719 	 */
2720 	iwm_enable_fw_load_int(sc);
2721 
2722 	/* really make sure rfkill handshake bits are cleared */
2723 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2724 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2725 
2726 	/* Load the given image to the HW */
2727 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2728 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2729 	else
2730 		ret = iwm_pcie_load_given_ucode(sc, fw);
2731 
2732 	/* XXX re-check RF-Kill state */
2733 
2734 out:
2735 	return ret;
2736 }
2737 
2738 static int
2739 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2740 {
2741 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2742 		.valid = htole32(valid_tx_ant),
2743 	};
2744 
2745 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2746 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2747 }
2748 
2749 static int
2750 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2751 {
2752 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2753 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2754 
2755 	/* Set parameters */
2756 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2757 	phy_cfg_cmd.calib_control.event_trigger =
2758 	    sc->sc_default_calib[ucode_type].event_trigger;
2759 	phy_cfg_cmd.calib_control.flow_trigger =
2760 	    sc->sc_default_calib[ucode_type].flow_trigger;
2761 
2762 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2763 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2764 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2765 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2766 }
2767 
2768 static int
2769 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2770 {
2771 	struct iwm_mvm_alive_data *alive_data = data;
2772 	struct iwm_mvm_alive_resp_ver1 *palive1;
2773 	struct iwm_mvm_alive_resp_ver2 *palive2;
2774 	struct iwm_mvm_alive_resp *palive;
2775 
2776 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2777 		palive1 = (void *)pkt->data;
2778 
2779 		sc->support_umac_log = FALSE;
2780                 sc->error_event_table =
2781                         le32toh(palive1->error_event_table_ptr);
2782                 sc->log_event_table =
2783                         le32toh(palive1->log_event_table_ptr);
2784                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2785 
2786                 alive_data->valid = le16toh(palive1->status) ==
2787                                     IWM_ALIVE_STATUS_OK;
2788                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2789 			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2790 			     le16toh(palive1->status), palive1->ver_type,
2791                              palive1->ver_subtype, palive1->flags);
2792 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2793 		palive2 = (void *)pkt->data;
2794 		sc->error_event_table =
2795 			le32toh(palive2->error_event_table_ptr);
2796 		sc->log_event_table =
2797 			le32toh(palive2->log_event_table_ptr);
2798 		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2799 		sc->umac_error_event_table =
2800                         le32toh(palive2->error_info_addr);
2801 
2802 		alive_data->valid = le16toh(palive2->status) ==
2803 				    IWM_ALIVE_STATUS_OK;
2804 		if (sc->umac_error_event_table)
2805 			sc->support_umac_log = TRUE;
2806 
2807 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2808 			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2809 			    le16toh(palive2->status), palive2->ver_type,
2810 			    palive2->ver_subtype, palive2->flags);
2811 
2812 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2813 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2814 			    palive2->umac_major, palive2->umac_minor);
2815 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2816 		palive = (void *)pkt->data;
2817 
2818 		sc->error_event_table =
2819 			le32toh(palive->error_event_table_ptr);
2820 		sc->log_event_table =
2821 			le32toh(palive->log_event_table_ptr);
2822 		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2823 		sc->umac_error_event_table =
2824 			le32toh(palive->error_info_addr);
2825 
2826 		alive_data->valid = le16toh(palive->status) ==
2827 				    IWM_ALIVE_STATUS_OK;
2828 		if (sc->umac_error_event_table)
2829 			sc->support_umac_log = TRUE;
2830 
2831 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2832 			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2833 			    le16toh(palive->status), palive->ver_type,
2834 			    palive->ver_subtype, palive->flags);
2835 
2836 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2837 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2838 			    le32toh(palive->umac_major),
2839 			    le32toh(palive->umac_minor));
2840 	}
2841 
2842 	return TRUE;
2843 }
2844 
2845 static int
2846 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2847 	struct iwm_rx_packet *pkt, void *data)
2848 {
2849 	struct iwm_phy_db *phy_db = data;
2850 
2851 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2852 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2853 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2854 			    __func__, pkt->hdr.code);
2855 		}
2856 		return TRUE;
2857 	}
2858 
2859 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2860 		device_printf(sc->sc_dev,
2861 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2862 	}
2863 
2864 	return FALSE;
2865 }
2866 
2867 static int
2868 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2869 	enum iwm_ucode_type ucode_type)
2870 {
2871 	struct iwm_notification_wait alive_wait;
2872 	struct iwm_mvm_alive_data alive_data;
2873 	const struct iwm_fw_img *fw;
2874 	enum iwm_ucode_type old_type = sc->cur_ucode;
2875 	int error;
2876 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2877 
2878 	fw = &sc->sc_fw.img[ucode_type];
2879 	sc->cur_ucode = ucode_type;
2880 	sc->ucode_loaded = FALSE;
2881 
2882 	memset(&alive_data, 0, sizeof(alive_data));
2883 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2884 				   alive_cmd, NELEM(alive_cmd),
2885 				   iwm_alive_fn, &alive_data);
2886 
2887 	error = iwm_start_fw(sc, fw);
2888 	if (error) {
2889 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2890 		sc->cur_ucode = old_type;
2891 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2892 		return error;
2893 	}
2894 
2895 	/*
2896 	 * Some things may run in the background now, but we
2897 	 * just wait for the ALIVE notification here.
2898 	 */
2899 	IWM_UNLOCK(sc);
2900 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2901 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2902 	IWM_LOCK(sc);
2903 	if (error) {
2904 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2905 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2906 			if (iwm_nic_lock(sc)) {
2907 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2908 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2909 				iwm_nic_unlock(sc);
2910 			}
2911 			device_printf(sc->sc_dev,
2912 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2913 			    a, b);
2914 		}
2915 		sc->cur_ucode = old_type;
2916 		return error;
2917 	}
2918 
2919 	if (!alive_data.valid) {
2920 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2921 		    __func__);
2922 		sc->cur_ucode = old_type;
2923 		return EIO;
2924 	}
2925 
2926 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2927 
2928 	/*
2929 	 * configure and operate fw paging mechanism.
2930 	 * driver configures the paging flow only once, CPU2 paging image
2931 	 * included in the IWM_UCODE_INIT image.
2932 	 */
2933 	if (fw->paging_mem_size) {
2934 		error = iwm_save_fw_paging(sc, fw);
2935 		if (error) {
2936 			device_printf(sc->sc_dev,
2937 			    "%s: failed to save the FW paging image\n",
2938 			    __func__);
2939 			return error;
2940 		}
2941 
2942 		error = iwm_send_paging_cmd(sc, fw);
2943 		if (error) {
2944 			device_printf(sc->sc_dev,
2945 			    "%s: failed to send the paging cmd\n", __func__);
2946 			iwm_free_fw_paging(sc);
2947 			return error;
2948 		}
2949 	}
2950 
2951 	if (!error)
2952 		sc->ucode_loaded = TRUE;
2953 	return error;
2954 }
2955 
2956 /*
2957  * mvm misc bits
2958  */
2959 
2960 static int
2961 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2962 {
2963 	struct iwm_notification_wait calib_wait;
2964 	static const uint16_t init_complete[] = {
2965 		IWM_INIT_COMPLETE_NOTIF,
2966 		IWM_CALIB_RES_NOTIF_PHY_DB
2967 	};
2968 	int ret;
2969 
2970 	/* do not operate with rfkill switch turned on */
2971 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2972 		device_printf(sc->sc_dev,
2973 		    "radio is disabled by hardware switch\n");
2974 		return EPERM;
2975 	}
2976 
2977 	iwm_init_notification_wait(sc->sc_notif_wait,
2978 				   &calib_wait,
2979 				   init_complete,
2980 				   NELEM(init_complete),
2981 				   iwm_wait_phy_db_entry,
2982 				   sc->sc_phy_db);
2983 
2984 	/* Will also start the device */
2985 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2986 	if (ret) {
2987 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2988 		    ret);
2989 		goto error;
2990 	}
2991 
2992 	if (justnvm) {
2993 		/* Read nvm */
2994 		ret = iwm_nvm_init(sc);
2995 		if (ret) {
2996 			device_printf(sc->sc_dev, "failed to read nvm\n");
2997 			goto error;
2998 		}
2999 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3000 		goto error;
3001 	}
3002 
3003 	ret = iwm_send_bt_init_conf(sc);
3004 	if (ret) {
3005 		device_printf(sc->sc_dev,
3006 		    "failed to send bt coex configuration: %d\n", ret);
3007 		goto error;
3008 	}
3009 
3010 	/* Send TX valid antennas before triggering calibrations */
3011 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3012 	if (ret) {
3013 		device_printf(sc->sc_dev,
3014 		    "failed to send antennas before calibration: %d\n", ret);
3015 		goto error;
3016 	}
3017 
3018 	/*
3019 	 * Send phy configurations command to init uCode
3020 	 * to start the 16.0 uCode init image internal calibrations.
3021 	 */
3022 	ret = iwm_send_phy_cfg_cmd(sc);
3023 	if (ret) {
3024 		device_printf(sc->sc_dev,
3025 		    "%s: Failed to run INIT calibrations: %d\n",
3026 		    __func__, ret);
3027 		goto error;
3028 	}
3029 
3030 	/*
3031 	 * Nothing to do but wait for the init complete notification
3032 	 * from the firmware.
3033 	 */
3034 	IWM_UNLOCK(sc);
3035 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3036 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3037 	IWM_LOCK(sc);
3038 
3039 
3040 	goto out;
3041 
3042 error:
3043 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3044 out:
3045 	return ret;
3046 }
3047 
3048 /*
3049  * receive side
3050  */
3051 
3052 /* (re)stock rx ring, called at init-time and at runtime */
3053 static int
3054 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3055 {
3056 	struct iwm_rx_ring *ring = &sc->rxq;
3057 	struct iwm_rx_data *data = &ring->data[idx];
3058 	struct mbuf *m;
3059 	bus_dmamap_t dmamap;
3060 	bus_dma_segment_t seg;
3061 	int nsegs, error;
3062 
3063 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3064 	if (m == NULL)
3065 		return ENOBUFS;
3066 
3067 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3068 #if defined(__DragonFly__)
3069 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3070 	    m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3071 #else
3072 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3073 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3074 #endif
3075 	if (error != 0) {
3076 		device_printf(sc->sc_dev,
3077 		    "%s: can't map mbuf, error %d\n", __func__, error);
3078 		m_freem(m);
3079 		return error;
3080 	}
3081 
3082 	if (data->m != NULL)
3083 		bus_dmamap_unload(ring->data_dmat, data->map);
3084 
3085 	/* Swap ring->spare_map with data->map */
3086 	dmamap = data->map;
3087 	data->map = ring->spare_map;
3088 	ring->spare_map = dmamap;
3089 
3090 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3091 	data->m = m;
3092 
3093 	/* Update RX descriptor. */
3094 	KKASSERT((seg.ds_addr & 255) == 0);
3095 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3096 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3097 	    BUS_DMASYNC_PREWRITE);
3098 
3099 	return 0;
3100 }
3101 
3102 /*
3103  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3104  * values are reported by the fw as positive values - need to negate
3105  * to obtain their dBM.  Account for missing antennas by replacing 0
3106  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3107  */
3108 static int
3109 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3110 {
3111 	int energy_a, energy_b, energy_c, max_energy;
3112 	uint32_t val;
3113 
3114 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3115 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3116 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3117 	energy_a = energy_a ? -energy_a : -256;
3118 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3119 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3120 	energy_b = energy_b ? -energy_b : -256;
3121 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3122 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3123 	energy_c = energy_c ? -energy_c : -256;
3124 	max_energy = MAX(energy_a, energy_b);
3125 	max_energy = MAX(max_energy, energy_c);
3126 
3127 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3128 	    "energy In A %d B %d C %d , and max %d\n",
3129 	    energy_a, energy_b, energy_c, max_energy);
3130 
3131 	return max_energy;
3132 }
3133 
3134 static void
3135 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3136 {
3137 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3138 
3139 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3140 
3141 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3142 }
3143 
3144 /*
3145  * Retrieve the average noise (in dBm) among receivers.
3146  */
3147 static int
3148 iwm_get_noise(struct iwm_softc *sc,
3149 	const struct iwm_mvm_statistics_rx_non_phy *stats)
3150 {
3151 	int i, total, nbant, noise;
3152 
3153 	total = nbant = noise = 0;
3154 	for (i = 0; i < 3; i++) {
3155 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3156 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3157 		    __func__, i, noise);
3158 
3159 		if (noise) {
3160 			total += noise;
3161 			nbant++;
3162 		}
3163 	}
3164 
3165 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3166 	    __func__, nbant, total);
3167 #if 0
3168 	/* There should be at least one antenna but check anyway. */
3169 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3170 #else
3171 	/* For now, just hard-code it to -96 to be safe */
3172 	return (-96);
3173 #endif
3174 }
3175 
3176 static void
3177 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3178 {
3179 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3180 
3181 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3182 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3183 }
3184 
3185 /*
3186  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3187  *
3188  * Handles the actual data of the Rx packet from the fw
3189  */
3190 static boolean_t
3191 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3192 	boolean_t stolen)
3193 {
3194 	struct ieee80211com *ic = &sc->sc_ic;
3195 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3196 	struct ieee80211_frame *wh;
3197 	struct ieee80211_node *ni;
3198 	struct ieee80211_rx_stats rxs;
3199 	struct iwm_rx_phy_info *phy_info;
3200 	struct iwm_rx_mpdu_res_start *rx_res;
3201 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3202 	uint32_t len;
3203 	uint32_t rx_pkt_status;
3204 	int rssi;
3205 
3206 	phy_info = &sc->sc_last_phy_info;
3207 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3208 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3209 	len = le16toh(rx_res->byte_count);
3210 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3211 
3212 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3213 		device_printf(sc->sc_dev,
3214 		    "dsp size out of range [0,20]: %d\n",
3215 		    phy_info->cfg_phy_cnt);
3216 		return FALSE;
3217 	}
3218 
3219 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3220 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3221 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3222 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3223 		return FALSE; /* drop */
3224 	}
3225 
3226 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3227 	/* Note: RSSI is absolute (ie a -ve value) */
3228 	if (rssi < IWM_MIN_DBM)
3229 		rssi = IWM_MIN_DBM;
3230 	else if (rssi > IWM_MAX_DBM)
3231 		rssi = IWM_MAX_DBM;
3232 
3233 	/* Map it to relative value */
3234 	rssi = rssi - sc->sc_noise;
3235 
3236 	/* replenish ring for the buffer we're going to feed to the sharks */
3237 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3238 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3239 		    __func__);
3240 		return FALSE;
3241 	}
3242 
3243 	m->m_data = pkt->data + sizeof(*rx_res);
3244 	m->m_pkthdr.len = m->m_len = len;
3245 
3246 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3247 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3248 
3249 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3250 
3251 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3252 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3253 	    __func__,
3254 	    le16toh(phy_info->channel),
3255 	    le16toh(phy_info->phy_flags));
3256 
3257 	/*
3258 	 * Populate an RX state struct with the provided information.
3259 	 */
3260 	bzero(&rxs, sizeof(rxs));
3261 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3262 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3263 	rxs.c_ieee = le16toh(phy_info->channel);
3264 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3265 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3266 	} else {
3267 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3268 	}
3269 	/* rssi is in 1/2db units */
3270 	rxs.rssi = rssi * 2;
3271 	rxs.nf = sc->sc_noise;
3272 
3273 	if (ieee80211_radiotap_active_vap(vap)) {
3274 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3275 
3276 		tap->wr_flags = 0;
3277 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3278 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3279 		tap->wr_chan_freq = htole16(rxs.c_freq);
3280 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3281 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3282 		tap->wr_dbm_antsignal = (int8_t)rssi;
3283 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3284 		tap->wr_tsft = phy_info->system_timestamp;
3285 		switch (phy_info->rate) {
3286 		/* CCK rates. */
3287 		case  10: tap->wr_rate =   2; break;
3288 		case  20: tap->wr_rate =   4; break;
3289 		case  55: tap->wr_rate =  11; break;
3290 		case 110: tap->wr_rate =  22; break;
3291 		/* OFDM rates. */
3292 		case 0xd: tap->wr_rate =  12; break;
3293 		case 0xf: tap->wr_rate =  18; break;
3294 		case 0x5: tap->wr_rate =  24; break;
3295 		case 0x7: tap->wr_rate =  36; break;
3296 		case 0x9: tap->wr_rate =  48; break;
3297 		case 0xb: tap->wr_rate =  72; break;
3298 		case 0x1: tap->wr_rate =  96; break;
3299 		case 0x3: tap->wr_rate = 108; break;
3300 		/* Unknown rate: should not happen. */
3301 		default:  tap->wr_rate =   0;
3302 		}
3303 	}
3304 
3305 	IWM_UNLOCK(sc);
3306 	if (ni != NULL) {
3307 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3308 		ieee80211_input_mimo(ni, m, &rxs);
3309 		ieee80211_free_node(ni);
3310 	} else {
3311 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3312 		ieee80211_input_mimo_all(ic, m, &rxs);
3313 	}
3314 	IWM_LOCK(sc);
3315 
3316 	return TRUE;
3317 }
3318 
3319 static int
3320 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3321 	struct iwm_node *in)
3322 {
3323 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3324 	struct ieee80211_node *ni = &in->in_ni;
3325 	struct ieee80211vap *vap = ni->ni_vap;
3326 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3327 	int failack = tx_resp->failure_frame;
3328 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3329 	boolean_t rate_matched;
3330 	uint8_t tx_resp_rate;
3331 	int ret;
3332 
3333 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3334 
3335 	/* Update rate control statistics. */
3336 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3337 	    __func__,
3338 	    (int) le16toh(tx_resp->status.status),
3339 	    (int) le16toh(tx_resp->status.sequence),
3340 	    tx_resp->frame_count,
3341 	    tx_resp->bt_kill_count,
3342 	    tx_resp->failure_rts,
3343 	    tx_resp->failure_frame,
3344 	    le32toh(tx_resp->initial_rate),
3345 	    (int) le16toh(tx_resp->wireless_media_time));
3346 
3347 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3348 
3349 	/* For rate control, ignore frames sent at different initial rate */
3350 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3351 
3352 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3353 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3354 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3355 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3356 	}
3357 
3358 	if (status != IWM_TX_STATUS_SUCCESS &&
3359 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3360 		if (rate_matched) {
3361 			ieee80211_ratectl_tx_complete(vap, ni,
3362 			    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3363 		}
3364 		ret = 1;
3365 	} else {
3366 		if (rate_matched) {
3367 			ieee80211_ratectl_tx_complete(vap, ni,
3368 			    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3369 		}
3370 		ret = 0;
3371 	}
3372 
3373 	if (rate_matched) {
3374 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3375 		new_rate = vap->iv_bss->ni_txrate;
3376 		if (new_rate != 0 && new_rate != cur_rate) {
3377 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3378 			iwm_setrates(sc, in, rix);
3379 			iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3380 		}
3381 	}
3382 
3383 	return ret;
3384 }
3385 
3386 static void
3387 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3388 {
3389 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3390 	int idx = cmd_hdr->idx;
3391 	int qid = cmd_hdr->qid;
3392 	struct iwm_tx_ring *ring = &sc->txq[qid];
3393 	struct iwm_tx_data *txd = &ring->data[idx];
3394 	struct iwm_node *in = txd->in;
3395 	struct mbuf *m = txd->m;
3396 	int status;
3397 
3398 	KASSERT(txd->done == 0, ("txd not done"));
3399 	KASSERT(txd->in != NULL, ("txd without node"));
3400 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3401 
3402 	sc->sc_tx_timer = 0;
3403 
3404 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3405 
3406 	/* Unmap and free mbuf. */
3407 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3408 	bus_dmamap_unload(ring->data_dmat, txd->map);
3409 
3410 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3411 	    "free txd %p, in %p\n", txd, txd->in);
3412 	txd->done = 1;
3413 	txd->m = NULL;
3414 	txd->in = NULL;
3415 
3416 	ieee80211_tx_complete(&in->in_ni, m, status);
3417 
3418 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3419 		sc->qfullmsk &= ~(1 << ring->qid);
3420 		if (sc->qfullmsk == 0) {
3421 			iwm_start(sc);
3422 		}
3423 	}
3424 }
3425 
3426 /*
3427  * transmit side
3428  */
3429 
3430 /*
3431  * Process a "command done" firmware notification.  This is where we wakeup
3432  * processes waiting for a synchronous command completion.
3433  * from if_iwn
3434  */
3435 static void
3436 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3437 {
3438 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3439 	struct iwm_tx_data *data;
3440 
3441 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3442 		return;	/* Not a command ack. */
3443 	}
3444 
3445 	data = &ring->data[pkt->hdr.idx];
3446 
3447 	/* If the command was mapped in an mbuf, free it. */
3448 	if (data->m != NULL) {
3449 		bus_dmamap_sync(ring->data_dmat, data->map,
3450 		    BUS_DMASYNC_POSTWRITE);
3451 		bus_dmamap_unload(ring->data_dmat, data->map);
3452 		m_freem(data->m);
3453 		data->m = NULL;
3454 	}
3455 	wakeup(&ring->desc[pkt->hdr.idx]);
3456 
3457 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3458 		device_printf(sc->sc_dev,
3459 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3460 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3461 		/* XXX call iwm_force_nmi() */
3462 	}
3463 
3464 	KKASSERT(ring->queued > 0);
3465 	ring->queued--;
3466 	if (ring->queued == 0)
3467 		iwm_pcie_clear_cmd_in_flight(sc);
3468 }
3469 
3470 #if 0
3471 /*
3472  * necessary only for block ack mode
3473  */
3474 void
3475 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3476 	uint16_t len)
3477 {
3478 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3479 	uint16_t w_val;
3480 
3481 	scd_bc_tbl = sc->sched_dma.vaddr;
3482 
3483 	len += 8; /* magic numbers came naturally from paris */
3484 	len = roundup(len, 4) / 4;
3485 
3486 	w_val = htole16(sta_id << 12 | len);
3487 
3488 	/* Update TX scheduler. */
3489 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3490 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3491 	    BUS_DMASYNC_PREWRITE);
3492 
3493 	/* I really wonder what this is ?!? */
3494 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3495 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3496 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3497 		    BUS_DMASYNC_PREWRITE);
3498 	}
3499 }
3500 #endif
3501 
3502 /*
3503  * Fill in the rate related information for a transmit command.
3504  */
3505 static uint8_t
3506 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3507 	struct mbuf *m, struct iwm_tx_cmd *tx)
3508 {
3509 	struct ieee80211com *ic = &sc->sc_ic;
3510 	struct ieee80211_node *ni = &in->in_ni;
3511 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3512 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3513 	const struct iwm_rate *rinfo;
3514 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3515 	int ridx, rate_flags;
3516 
3517 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3518 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3519 
3520 	if (type == IEEE80211_FC0_TYPE_MGT) {
3521 		ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3522 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3523 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3524 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3525                 ridx = iwm_rate2ridx(sc, tp->mcastrate);
3526 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3527 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3528         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3529                 ridx = iwm_rate2ridx(sc, tp->ucastrate);
3530 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3531 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3532         } else if (m->m_flags & M_EAPOL) {
3533                 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3534 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3535 		    "%s: EAPOL (%d)\n", __func__, tp->mgmtrate);
3536 	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3537 		/* This is the index into the programmed table */
3538 		tx->initial_rate_index = 0;
3539 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3540 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA (%d)\n",
3541 		    __func__, ni->ni_txrate);
3542 		return ni->ni_txrate;
3543 	} else {
3544 		ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3545 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3546 		    "%s: DEFAULT (%d)\n", __func__, tp->mgmtrate);
3547 	}
3548 
3549 	/*
3550 	 * Sanity check ridx, and provide fallback. If the rate lookup
3551 	 * ever fails, iwm_rate2ridx() will already print an error message.
3552 	 */
3553 	if (ridx < 0 || ridx > IWM_RIDX_MAX) {
3554 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
3555 			/*
3556 			 * XXX this assumes the mode is either 11a or not 11a;
3557 			 * definitely won't work for 11n.
3558 			 */
3559 			ridx = IWM_RIDX_OFDM;
3560 		} else {
3561 			ridx = IWM_RIDX_CCK;
3562 		}
3563 	}
3564 
3565 	rinfo = &iwm_rates[ridx];
3566 
3567 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3568 	    "%s: frame type=%d, ridx=%d, rate=%d, CCK=%d\n",
3569 	    __func__, type, ridx, rinfo->rate, !! (IWM_RIDX_IS_CCK(ridx)));
3570 
3571 	/* XXX TODO: hard-coded TX antenna? */
3572 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3573 	if (IWM_RIDX_IS_CCK(ridx))
3574 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3575 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3576 
3577 	return rinfo->rate;
3578 }
3579 
3580 #define TB0_SIZE 16
3581 static int
3582 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3583 {
3584 	struct ieee80211com *ic = &sc->sc_ic;
3585 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3586 	struct iwm_node *in = IWM_NODE(ni);
3587 	struct iwm_tx_ring *ring;
3588 	struct iwm_tx_data *data;
3589 	struct iwm_tfd *desc;
3590 	struct iwm_device_cmd *cmd;
3591 	struct iwm_tx_cmd *tx;
3592 	struct ieee80211_frame *wh;
3593 	struct ieee80211_key *k = NULL;
3594 #if !defined(__DragonFly__)
3595 	struct mbuf *m1;
3596 #endif
3597 	uint32_t flags;
3598 	u_int hdrlen;
3599 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3600 	int nsegs;
3601 	uint8_t rate, tid, type;
3602 	int i, totlen, error, pad;
3603 
3604 	wh = mtod(m, struct ieee80211_frame *);
3605 	hdrlen = ieee80211_anyhdrsize(wh);
3606 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3607 	tid = 0;
3608 	ring = &sc->txq[ac];
3609 	desc = &ring->desc[ring->cur];
3610 	memset(desc, 0, sizeof(*desc));
3611 	data = &ring->data[ring->cur];
3612 
3613 	/* Fill out iwm_tx_cmd to send to the firmware */
3614 	cmd = &ring->cmd[ring->cur];
3615 	cmd->hdr.code = IWM_TX_CMD;
3616 	cmd->hdr.flags = 0;
3617 	cmd->hdr.qid = ring->qid;
3618 	cmd->hdr.idx = ring->cur;
3619 
3620 	tx = (void *)cmd->data;
3621 	memset(tx, 0, sizeof(*tx));
3622 
3623 	rate = iwm_tx_fill_cmd(sc, in, m, tx);
3624 
3625 	/* Encrypt the frame if need be. */
3626 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3627 		/* Retrieve key for TX && do software encryption. */
3628 		k = ieee80211_crypto_encap(ni, m);
3629 		if (k == NULL) {
3630 			m_freem(m);
3631 			return (ENOBUFS);
3632 		}
3633 		/* 802.11 header may have moved. */
3634 		wh = mtod(m, struct ieee80211_frame *);
3635 	}
3636 
3637 	if (ieee80211_radiotap_active_vap(vap)) {
3638 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3639 
3640 		tap->wt_flags = 0;
3641 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3642 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3643 		tap->wt_rate = rate;
3644 		if (k != NULL)
3645 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3646 		ieee80211_radiotap_tx(vap, m);
3647 	}
3648 
3649 
3650 	totlen = m->m_pkthdr.len;
3651 
3652 	flags = 0;
3653 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3654 		flags |= IWM_TX_CMD_FLG_ACK;
3655 	}
3656 
3657 	if (type == IEEE80211_FC0_TYPE_DATA
3658 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3659 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3660 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3661 	}
3662 
3663 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3664 	    type != IEEE80211_FC0_TYPE_DATA)
3665 		tx->sta_id = sc->sc_aux_sta.sta_id;
3666 	else
3667 		tx->sta_id = IWM_STATION_ID;
3668 
3669 	if (type == IEEE80211_FC0_TYPE_MGT) {
3670 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3671 
3672 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3673 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3674 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3675 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3676 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3677 		} else {
3678 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3679 		}
3680 	} else {
3681 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3682 	}
3683 
3684 	if (hdrlen & 3) {
3685 		/* First segment length must be a multiple of 4. */
3686 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3687 		pad = 4 - (hdrlen & 3);
3688 	} else
3689 		pad = 0;
3690 
3691 	tx->driver_txop = 0;
3692 	tx->next_frame_len = 0;
3693 
3694 	tx->len = htole16(totlen);
3695 	tx->tid_tspec = tid;
3696 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3697 
3698 	/* Set physical address of "scratch area". */
3699 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3700 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3701 
3702 	/* Copy 802.11 header in TX command. */
3703 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3704 
3705 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3706 
3707 	tx->sec_ctl = 0;
3708 	tx->tx_flags |= htole32(flags);
3709 
3710 	/* Trim 802.11 header. */
3711 	m_adj(m, hdrlen);
3712 #if defined(__DragonFly__)
3713 	error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3714 					    segs, IWM_MAX_SCATTER - 2,
3715 					    &nsegs, BUS_DMA_NOWAIT);
3716 #else
3717 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3718 	    segs, &nsegs, BUS_DMA_NOWAIT);
3719 #endif
3720 	if (error != 0) {
3721 #if defined(__DragonFly__)
3722 		device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3723 		    error);
3724 		m_freem(m);
3725 		return error;
3726 #else
3727 		if (error != EFBIG) {
3728 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3729 			    error);
3730 			m_freem(m);
3731 			return error;
3732 		}
3733 		/* Too many DMA segments, linearize mbuf. */
3734 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3735 		if (m1 == NULL) {
3736 			device_printf(sc->sc_dev,
3737 			    "%s: could not defrag mbuf\n", __func__);
3738 			m_freem(m);
3739 			return (ENOBUFS);
3740 		}
3741 		m = m1;
3742 
3743 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3744 		    segs, &nsegs, BUS_DMA_NOWAIT);
3745 		if (error != 0) {
3746 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3747 			    error);
3748 			m_freem(m);
3749 			return error;
3750 		}
3751 #endif
3752 	}
3753 	data->m = m;
3754 	data->in = in;
3755 	data->done = 0;
3756 
3757 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3758 	    "sending txd %p, in %p\n", data, data->in);
3759 	KASSERT(data->in != NULL, ("node is NULL"));
3760 
3761 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3762 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3763 	    ring->qid, ring->cur, totlen, nsegs,
3764 	    le32toh(tx->tx_flags),
3765 	    le32toh(tx->rate_n_flags),
3766 	    tx->initial_rate_index
3767 	    );
3768 
3769 	/* Fill TX descriptor. */
3770 	desc->num_tbs = 2 + nsegs;
3771 
3772 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3773 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3774 	    (TB0_SIZE << 4);
3775 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3776 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3777 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3778 	      + hdrlen + pad - TB0_SIZE) << 4);
3779 
3780 	/* Other DMA segments are for data payload. */
3781 	for (i = 0; i < nsegs; i++) {
3782 		seg = &segs[i];
3783 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3784 		desc->tbs[i+2].hi_n_len = \
3785 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3786 		    | ((seg->ds_len) << 4);
3787 	}
3788 
3789 	bus_dmamap_sync(ring->data_dmat, data->map,
3790 	    BUS_DMASYNC_PREWRITE);
3791 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3792 	    BUS_DMASYNC_PREWRITE);
3793 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3794 	    BUS_DMASYNC_PREWRITE);
3795 
3796 #if 0
3797 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3798 #endif
3799 
3800 	/* Kick TX ring. */
3801 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3802 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3803 
3804 	/* Mark TX ring as full if we reach a certain threshold. */
3805 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3806 		sc->qfullmsk |= 1 << ring->qid;
3807 	}
3808 
3809 	return 0;
3810 }
3811 
3812 static int
3813 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3814     const struct ieee80211_bpf_params *params)
3815 {
3816 	struct ieee80211com *ic = ni->ni_ic;
3817 	struct iwm_softc *sc = ic->ic_softc;
3818 	int error = 0;
3819 
3820 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3821 	    "->%s begin\n", __func__);
3822 
3823 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3824 		m_freem(m);
3825 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3826 		    "<-%s not RUNNING\n", __func__);
3827 		return (ENETDOWN);
3828         }
3829 
3830 	IWM_LOCK(sc);
3831 	/* XXX fix this */
3832         if (params == NULL) {
3833 		error = iwm_tx(sc, m, ni, 0);
3834 	} else {
3835 		error = iwm_tx(sc, m, ni, 0);
3836 	}
3837 	sc->sc_tx_timer = 5;
3838 	IWM_UNLOCK(sc);
3839 
3840         return (error);
3841 }
3842 
3843 /*
3844  * mvm/tx.c
3845  */
3846 
3847 /*
3848  * Note that there are transports that buffer frames before they reach
3849  * the firmware. This means that after flush_tx_path is called, the
3850  * queue might not be empty. The race-free way to handle this is to:
3851  * 1) set the station as draining
3852  * 2) flush the Tx path
3853  * 3) wait for the transport queues to be empty
3854  */
3855 int
3856 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3857 {
3858 	int ret;
3859 	struct iwm_tx_path_flush_cmd flush_cmd = {
3860 		.queues_ctl = htole32(tfd_msk),
3861 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3862 	};
3863 
3864 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3865 	    sizeof(flush_cmd), &flush_cmd);
3866 	if (ret)
3867                 device_printf(sc->sc_dev,
3868 		    "Flushing tx queue failed: %d\n", ret);
3869 	return ret;
3870 }
3871 
3872 static int
3873 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3874 {
3875 	struct iwm_time_quota_cmd cmd;
3876 	int i, idx, ret, num_active_macs, quota, quota_rem;
3877 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3878 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3879 	uint16_t id;
3880 
3881 	memset(&cmd, 0, sizeof(cmd));
3882 
3883 	/* currently, PHY ID == binding ID */
3884 	if (ivp) {
3885 		id = ivp->phy_ctxt->id;
3886 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3887 		colors[id] = ivp->phy_ctxt->color;
3888 
3889 		if (1)
3890 			n_ifs[id] = 1;
3891 	}
3892 
3893 	/*
3894 	 * The FW's scheduling session consists of
3895 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3896 	 * equally between all the bindings that require quota
3897 	 */
3898 	num_active_macs = 0;
3899 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3900 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3901 		num_active_macs += n_ifs[i];
3902 	}
3903 
3904 	quota = 0;
3905 	quota_rem = 0;
3906 	if (num_active_macs) {
3907 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3908 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3909 	}
3910 
3911 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3912 		if (colors[i] < 0)
3913 			continue;
3914 
3915 		cmd.quotas[idx].id_and_color =
3916 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3917 
3918 		if (n_ifs[i] <= 0) {
3919 			cmd.quotas[idx].quota = htole32(0);
3920 			cmd.quotas[idx].max_duration = htole32(0);
3921 		} else {
3922 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3923 			cmd.quotas[idx].max_duration = htole32(0);
3924 		}
3925 		idx++;
3926 	}
3927 
3928 	/* Give the remainder of the session to the first binding */
3929 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3930 
3931 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3932 	    sizeof(cmd), &cmd);
3933 	if (ret)
3934 		device_printf(sc->sc_dev,
3935 		    "%s: Failed to send quota: %d\n", __func__, ret);
3936 	return ret;
3937 }
3938 
3939 /*
3940  * ieee80211 routines
3941  */
3942 
3943 /*
3944  * Change to AUTH state in 80211 state machine.  Roughly matches what
3945  * Linux does in bss_info_changed().
3946  */
3947 static int
3948 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3949 {
3950 	struct ieee80211_node *ni;
3951 	struct iwm_node *in;
3952 	struct iwm_vap *iv = IWM_VAP(vap);
3953 	uint32_t duration;
3954 	int error;
3955 
3956 	/*
3957 	 * XXX i have a feeling that the vap node is being
3958 	 * freed from underneath us. Grr.
3959 	 */
3960 	ni = ieee80211_ref_node(vap->iv_bss);
3961 	in = IWM_NODE(ni);
3962 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3963 	    "%s: called; vap=%p, bss ni=%p\n",
3964 	    __func__,
3965 	    vap,
3966 	    ni);
3967 
3968 	in->in_assoc = 0;
3969 
3970 	/*
3971 	 * Firmware bug - it'll crash if the beacon interval is less
3972 	 * than 16. We can't avoid connecting at all, so refuse the
3973 	 * station state change, this will cause net80211 to abandon
3974 	 * attempts to connect to this AP, and eventually wpa_s will
3975 	 * blacklist the AP...
3976 	 */
3977 	if (ni->ni_intval < 16) {
3978 		device_printf(sc->sc_dev,
3979 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3980 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3981 		error = EINVAL;
3982 		goto out;
3983 	}
3984 
3985 	error = iwm_allow_mcast(vap, sc);
3986 	if (error) {
3987 		device_printf(sc->sc_dev,
3988 		    "%s: failed to set multicast\n", __func__);
3989 		goto out;
3990 	}
3991 
3992 	/*
3993 	 * This is where it deviates from what Linux does.
3994 	 *
3995 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3996 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3997 	 * and always does a mac_ctx_changed().
3998 	 *
3999 	 * The openbsd port doesn't attempt to do that - it reset things
4000 	 * at odd states and does the add here.
4001 	 *
4002 	 * So, until the state handling is fixed (ie, we never reset
4003 	 * the NIC except for a firmware failure, which should drag
4004 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4005 	 * contexts that are required), let's do a dirty hack here.
4006 	 */
4007 	if (iv->is_uploaded) {
4008 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4009 			device_printf(sc->sc_dev,
4010 			    "%s: failed to update MAC\n", __func__);
4011 			goto out;
4012 		}
4013 	} else {
4014 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4015 			device_printf(sc->sc_dev,
4016 			    "%s: failed to add MAC\n", __func__);
4017 			goto out;
4018 		}
4019 	}
4020 
4021 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4022 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4023 		device_printf(sc->sc_dev,
4024 		    "%s: failed update phy ctxt\n", __func__);
4025 		goto out;
4026 	}
4027 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4028 
4029 	if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4030 		device_printf(sc->sc_dev,
4031 		    "%s: binding update cmd\n", __func__);
4032 		goto out;
4033 	}
4034 	/*
4035 	 * Authentication becomes unreliable when powersaving is left enabled
4036 	 * here. Powersaving will be activated again when association has
4037 	 * finished or is aborted.
4038 	 */
4039 	iv->ps_disabled = TRUE;
4040 	error = iwm_mvm_power_update_mac(sc);
4041 	iv->ps_disabled = FALSE;
4042 	if (error != 0) {
4043 		device_printf(sc->sc_dev,
4044 		    "%s: failed to update power management\n",
4045 		    __func__);
4046 		goto out;
4047 	}
4048 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4049 		device_printf(sc->sc_dev,
4050 		    "%s: failed to add sta\n", __func__);
4051 		goto out;
4052 	}
4053 
4054 	/*
4055 	 * Prevent the FW from wandering off channel during association
4056 	 * by "protecting" the session with a time event.
4057 	 */
4058 	/* XXX duration is in units of TU, not MS */
4059 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4060 	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4061 	DELAY(100);
4062 
4063 	error = 0;
4064 out:
4065 	ieee80211_free_node(ni);
4066 	return (error);
4067 }
4068 
4069 static int
4070 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4071 {
4072 	uint32_t tfd_msk;
4073 
4074 	/*
4075 	 * Ok, so *technically* the proper set of calls for going
4076 	 * from RUN back to SCAN is:
4077 	 *
4078 	 * iwm_mvm_power_mac_disable(sc, in);
4079 	 * iwm_mvm_mac_ctxt_changed(sc, vap);
4080 	 * iwm_mvm_rm_sta(sc, in);
4081 	 * iwm_mvm_update_quotas(sc, NULL);
4082 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4083 	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4084 	 * iwm_mvm_mac_ctxt_remove(sc, in);
4085 	 *
4086 	 * However, that freezes the device not matter which permutations
4087 	 * and modifications are attempted.  Obviously, this driver is missing
4088 	 * something since it works in the Linux driver, but figuring out what
4089 	 * is missing is a little more complicated.  Now, since we're going
4090 	 * back to nothing anyway, we'll just do a complete device reset.
4091 	 * Up your's, device!
4092 	 */
4093 	/*
4094 	 * Just using 0xf for the queues mask is fine as long as we only
4095 	 * get here from RUN state.
4096 	 */
4097 	tfd_msk = 0xf;
4098 	iwm_xmit_queue_drain(sc);
4099 	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4100 	/*
4101 	 * We seem to get away with just synchronously sending the
4102 	 * IWM_TXPATH_FLUSH command.
4103 	 */
4104 //	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4105 	iwm_stop_device(sc);
4106 	iwm_init_hw(sc);
4107 	if (in)
4108 		in->in_assoc = 0;
4109 	return 0;
4110 
4111 #if 0
4112 	int error;
4113 
4114 	iwm_mvm_power_mac_disable(sc, in);
4115 
4116 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4117 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4118 		return error;
4119 	}
4120 
4121 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4122 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4123 		return error;
4124 	}
4125 	error = iwm_mvm_rm_sta(sc, in);
4126 	in->in_assoc = 0;
4127 	iwm_mvm_update_quotas(sc, NULL);
4128 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4129 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4130 		return error;
4131 	}
4132 	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4133 
4134 	iwm_mvm_mac_ctxt_remove(sc, in);
4135 
4136 	return error;
4137 #endif
4138 }
4139 
4140 static struct ieee80211_node *
4141 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4142 {
4143 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4144 	    M_INTWAIT | M_ZERO);
4145 }
4146 
4147 static uint8_t
4148 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4149 {
4150 	uint8_t plcp = rate_n_flags & 0xff;
4151 	int i;
4152 
4153 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4154 		if (iwm_rates[i].plcp == plcp)
4155 			return iwm_rates[i].rate;
4156 	}
4157 	return 0;
4158 }
4159 
4160 uint8_t
4161 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4162 {
4163 	int i;
4164 	uint8_t rval;
4165 
4166 	for (i = 0; i < rs->rs_nrates; i++) {
4167 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4168 		if (rval == iwm_rates[ridx].rate)
4169 			return rs->rs_rates[i];
4170 	}
4171 
4172 	return 0;
4173 }
4174 
4175 static int
4176 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4177 {
4178 	int i;
4179 
4180 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4181 		if (iwm_rates[i].rate == rate)
4182 			return i;
4183 	}
4184 
4185 	device_printf(sc->sc_dev,
4186 	    "%s: WARNING: device rate for %u not found!\n",
4187 	    __func__, rate);
4188 
4189 	return -1;
4190 }
4191 
4192 static void
4193 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4194 {
4195 	struct ieee80211_node *ni = &in->in_ni;
4196 	struct iwm_lq_cmd *lq = &in->in_lq;
4197 	struct ieee80211_rateset *rs = &ni->ni_rates;
4198 	int nrates = rs->rs_nrates;
4199 	int i, ridx, tab = 0;
4200 	int txant = 0;
4201 
4202 	KKASSERT(rix >= 0 && rix < nrates);
4203 
4204 	if (nrates > nitems(lq->rs_table)) {
4205 		device_printf(sc->sc_dev,
4206 		    "%s: node supports %d rates, driver handles "
4207 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4208 		return;
4209 	}
4210 	if (nrates == 0) {
4211 		device_printf(sc->sc_dev,
4212 		    "%s: node supports 0 rates, odd!\n", __func__);
4213 		return;
4214 	}
4215 	nrates = imin(rix + 1, nrates);
4216 
4217 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4218 	    "%s: nrates=%d\n", __func__, nrates);
4219 
4220 	/* then construct a lq_cmd based on those */
4221 	memset(lq, 0, sizeof(*lq));
4222 	lq->sta_id = IWM_STATION_ID;
4223 
4224 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4225 	if (ni->ni_flags & IEEE80211_NODE_HT)
4226 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4227 
4228 	/*
4229 	 * are these used? (we don't do SISO or MIMO)
4230 	 * need to set them to non-zero, though, or we get an error.
4231 	 */
4232 	lq->single_stream_ant_msk = 1;
4233 	lq->dual_stream_ant_msk = 1;
4234 
4235 	/*
4236 	 * Build the actual rate selection table.
4237 	 * The lowest bits are the rates.  Additionally,
4238 	 * CCK needs bit 9 to be set.  The rest of the bits
4239 	 * we add to the table select the tx antenna
4240 	 * Note that we add the rates in the highest rate first
4241 	 * (opposite of ni_rates).
4242 	 */
4243 	for (i = 0; i < nrates; i++) {
4244 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4245 		int nextant;
4246 
4247 		/* Map 802.11 rate to HW rate index. */
4248 		ridx = iwm_rate2ridx(sc, rate);
4249 		if (ridx == -1)
4250 			continue;
4251 
4252 		if (txant == 0)
4253 			txant = iwm_mvm_get_valid_tx_ant(sc);
4254 		nextant = 1<<(ffs(txant)-1);
4255 		txant &= ~nextant;
4256 
4257 		tab = iwm_rates[ridx].plcp;
4258 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4259 		if (IWM_RIDX_IS_CCK(ridx))
4260 			tab |= IWM_RATE_MCS_CCK_MSK;
4261 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4262 		    "station rate i=%d, rate=%d, hw=%x\n",
4263 		    i, iwm_rates[ridx].rate, tab);
4264 		lq->rs_table[i] = htole32(tab);
4265 	}
4266 	/* then fill the rest with the lowest possible rate */
4267 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4268 		KASSERT(tab != 0, ("invalid tab"));
4269 		lq->rs_table[i] = htole32(tab);
4270 	}
4271 }
4272 
4273 static int
4274 iwm_media_change(struct ifnet *ifp)
4275 {
4276 	struct ieee80211vap *vap = ifp->if_softc;
4277 	struct ieee80211com *ic = vap->iv_ic;
4278 	struct iwm_softc *sc = ic->ic_softc;
4279 	int error;
4280 
4281 	error = ieee80211_media_change(ifp);
4282 	if (error != ENETRESET)
4283 		return error;
4284 
4285 	IWM_LOCK(sc);
4286 	if (ic->ic_nrunning > 0) {
4287 		iwm_stop(sc);
4288 		iwm_init(sc);
4289 	}
4290 	IWM_UNLOCK(sc);
4291 	return error;
4292 }
4293 
4294 
4295 static int
4296 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4297 {
4298 	struct iwm_vap *ivp = IWM_VAP(vap);
4299 	struct ieee80211com *ic = vap->iv_ic;
4300 	struct iwm_softc *sc = ic->ic_softc;
4301 	struct iwm_node *in;
4302 	int error;
4303 
4304 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4305 	    "switching state %s -> %s\n",
4306 	    ieee80211_state_name[vap->iv_state],
4307 	    ieee80211_state_name[nstate]);
4308 	IEEE80211_UNLOCK(ic);
4309 	IWM_LOCK(sc);
4310 
4311 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4312 		iwm_led_blink_stop(sc);
4313 
4314 	/* disable beacon filtering if we're hopping out of RUN */
4315 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4316 		iwm_mvm_disable_beacon_filter(sc);
4317 
4318 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4319 			in->in_assoc = 0;
4320 
4321 		if (nstate == IEEE80211_S_INIT) {
4322 			IWM_UNLOCK(sc);
4323 			IEEE80211_LOCK(ic);
4324 			error = ivp->iv_newstate(vap, nstate, arg);
4325 			IEEE80211_UNLOCK(ic);
4326 			IWM_LOCK(sc);
4327 			iwm_release(sc, NULL);
4328 			IWM_UNLOCK(sc);
4329 			IEEE80211_LOCK(ic);
4330 			return error;
4331 		}
4332 
4333 		/*
4334 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4335 		 * above then the card will be completely reinitialized,
4336 		 * so the driver must do everything necessary to bring the card
4337 		 * from INIT to SCAN.
4338 		 *
4339 		 * Additionally, upon receiving deauth frame from AP,
4340 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4341 		 * state. This will also fail with this driver, so bring the FSM
4342 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4343 		 *
4344 		 * XXX TODO: fix this for FreeBSD!
4345 		 */
4346 		if (nstate == IEEE80211_S_SCAN ||
4347 		    nstate == IEEE80211_S_AUTH ||
4348 		    nstate == IEEE80211_S_ASSOC) {
4349 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4350 			    "Force transition to INIT; MGT=%d\n", arg);
4351 			IWM_UNLOCK(sc);
4352 			IEEE80211_LOCK(ic);
4353 			/* Always pass arg as -1 since we can't Tx right now. */
4354 			/*
4355 			 * XXX arg is just ignored anyway when transitioning
4356 			 *     to IEEE80211_S_INIT.
4357 			 */
4358 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4359 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4360 			    "Going INIT->SCAN\n");
4361 			nstate = IEEE80211_S_SCAN;
4362 			IEEE80211_UNLOCK(ic);
4363 			IWM_LOCK(sc);
4364 		}
4365 	}
4366 
4367 	switch (nstate) {
4368 	case IEEE80211_S_INIT:
4369 	case IEEE80211_S_SCAN:
4370 		if (vap->iv_state == IEEE80211_S_AUTH ||
4371 		    vap->iv_state == IEEE80211_S_ASSOC) {
4372 			int myerr;
4373 			IWM_UNLOCK(sc);
4374 			IEEE80211_LOCK(ic);
4375 			myerr = ivp->iv_newstate(vap, nstate, arg);
4376 			IEEE80211_UNLOCK(ic);
4377 			IWM_LOCK(sc);
4378 			error = iwm_mvm_rm_sta(sc, vap, FALSE);
4379                         if (error) {
4380                                 device_printf(sc->sc_dev,
4381 				    "%s: Failed to remove station: %d\n",
4382 				    __func__, error);
4383 			}
4384 			error = iwm_mvm_mac_ctxt_changed(sc, vap);
4385                         if (error) {
4386                                 device_printf(sc->sc_dev,
4387                                     "%s: Failed to change mac context: %d\n",
4388                                     __func__, error);
4389                         }
4390                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4391                         if (error) {
4392                                 device_printf(sc->sc_dev,
4393                                     "%s: Failed to remove channel ctx: %d\n",
4394                                     __func__, error);
4395                         }
4396 			ivp->phy_ctxt = NULL;
4397 			error = iwm_mvm_power_update_mac(sc);
4398 			if (error != 0) {
4399 				device_printf(sc->sc_dev,
4400 				    "%s: failed to update power management\n",
4401 				    __func__);
4402 			}
4403 			IWM_UNLOCK(sc);
4404 			IEEE80211_LOCK(ic);
4405 			return myerr;
4406 		}
4407 		break;
4408 
4409 	case IEEE80211_S_AUTH:
4410 		if ((error = iwm_auth(vap, sc)) != 0) {
4411 			device_printf(sc->sc_dev,
4412 			    "%s: could not move to auth state: %d\n",
4413 			    __func__, error);
4414 		}
4415 		break;
4416 
4417 	case IEEE80211_S_ASSOC:
4418 		/*
4419 		 * EBS may be disabled due to previous failures reported by FW.
4420 		 * Reset EBS status here assuming environment has been changed.
4421 		 */
4422 		sc->last_ebs_successful = TRUE;
4423 		break;
4424 
4425 	case IEEE80211_S_RUN:
4426 		in = IWM_NODE(vap->iv_bss);
4427 		/* Update the association state, now we have it all */
4428 		/* (eg associd comes in at this point */
4429 		error = iwm_mvm_update_sta(sc, in);
4430 		if (error != 0) {
4431 			device_printf(sc->sc_dev,
4432 			    "%s: failed to update STA\n", __func__);
4433 			IWM_UNLOCK(sc);
4434 			IEEE80211_LOCK(ic);
4435 			return error;
4436 		}
4437 		in->in_assoc = 1;
4438 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4439 		if (error != 0) {
4440 			device_printf(sc->sc_dev,
4441 			    "%s: failed to update MAC: %d\n", __func__, error);
4442 		}
4443 
4444 		iwm_mvm_sf_update(sc, vap, FALSE);
4445 		iwm_mvm_enable_beacon_filter(sc, ivp);
4446 		iwm_mvm_power_update_mac(sc);
4447 		iwm_mvm_update_quotas(sc, ivp);
4448 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4449 		iwm_setrates(sc, in, rix);
4450 
4451 		if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4452 			device_printf(sc->sc_dev,
4453 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4454 		}
4455 
4456 		iwm_mvm_led_enable(sc);
4457 		break;
4458 
4459 	default:
4460 		break;
4461 	}
4462 	IWM_UNLOCK(sc);
4463 	IEEE80211_LOCK(ic);
4464 
4465 	return (ivp->iv_newstate(vap, nstate, arg));
4466 }
4467 
4468 void
4469 iwm_endscan_cb(void *arg, int pending)
4470 {
4471 	struct iwm_softc *sc = arg;
4472 	struct ieee80211com *ic = &sc->sc_ic;
4473 
4474 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4475 	    "%s: scan ended\n",
4476 	    __func__);
4477 
4478 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4479 }
4480 
4481 static int
4482 iwm_send_bt_init_conf(struct iwm_softc *sc)
4483 {
4484 	struct iwm_bt_coex_cmd bt_cmd;
4485 
4486 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4487 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4488 
4489 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4490 	    &bt_cmd);
4491 }
4492 
4493 static boolean_t
4494 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4495 {
4496 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4497 	boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa,
4498 					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4499 
4500 	if (iwm_lar_disable)
4501 		return FALSE;
4502 
4503 	/*
4504 	 * Enable LAR only if it is supported by the FW (TLV) &&
4505 	 * enabled in the NVM
4506 	 */
4507 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4508 		return nvm_lar && tlv_lar;
4509 	else
4510 		return tlv_lar;
4511 }
4512 
4513 static boolean_t
4514 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4515 {
4516 	return fw_has_api(&sc->sc_fw.ucode_capa,
4517 			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4518 	       fw_has_capa(&sc->sc_fw.ucode_capa,
4519 			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4520 }
4521 
4522 static int
4523 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4524 {
4525 	struct iwm_mcc_update_cmd mcc_cmd;
4526 	struct iwm_host_cmd hcmd = {
4527 		.id = IWM_MCC_UPDATE_CMD,
4528 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4529 		.data = { &mcc_cmd },
4530 	};
4531 	int ret;
4532 #ifdef IWM_DEBUG
4533 	struct iwm_rx_packet *pkt;
4534 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4535 	struct iwm_mcc_update_resp *mcc_resp;
4536 	int n_channels;
4537 	uint16_t mcc;
4538 #endif
4539 	int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa,
4540 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4541 
4542 	if (!iwm_mvm_is_lar_supported(sc)) {
4543 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4544 		    __func__);
4545 		return 0;
4546 	}
4547 
4548 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4549 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4550 	if (iwm_mvm_is_wifi_mcc_supported(sc))
4551 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4552 	else
4553 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4554 
4555 	if (resp_v2)
4556 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4557 	else
4558 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4559 
4560 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4561 	    "send MCC update to FW with '%c%c' src = %d\n",
4562 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4563 
4564 	ret = iwm_send_cmd(sc, &hcmd);
4565 	if (ret)
4566 		return ret;
4567 
4568 #ifdef IWM_DEBUG
4569 	pkt = hcmd.resp_pkt;
4570 
4571 	/* Extract MCC response */
4572 	if (resp_v2) {
4573 		mcc_resp = (void *)pkt->data;
4574 		mcc = mcc_resp->mcc;
4575 		n_channels =  le32toh(mcc_resp->n_channels);
4576 	} else {
4577 		mcc_resp_v1 = (void *)pkt->data;
4578 		mcc = mcc_resp_v1->mcc;
4579 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4580 	}
4581 
4582 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4583 	if (mcc == 0)
4584 		mcc = 0x3030;  /* "00" - world */
4585 
4586 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4587 	    "regulatory domain '%c%c' (%d channels available)\n",
4588 	    mcc >> 8, mcc & 0xff, n_channels);
4589 #endif
4590 	iwm_free_resp(sc, &hcmd);
4591 
4592 	return 0;
4593 }
4594 
4595 static void
4596 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4597 {
4598 	struct iwm_host_cmd cmd = {
4599 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4600 		.len = { sizeof(uint32_t), },
4601 		.data = { &backoff, },
4602 	};
4603 
4604 	if (iwm_send_cmd(sc, &cmd) != 0) {
4605 		device_printf(sc->sc_dev,
4606 		    "failed to change thermal tx backoff\n");
4607 	}
4608 }
4609 
4610 static int
4611 iwm_init_hw(struct iwm_softc *sc)
4612 {
4613 	struct ieee80211com *ic = &sc->sc_ic;
4614 	int error, i, ac;
4615 
4616 	sc->sf_state = IWM_SF_UNINIT;
4617 
4618 	if ((error = iwm_start_hw(sc)) != 0) {
4619 		kprintf("iwm_start_hw: failed %d\n", error);
4620 		return error;
4621 	}
4622 
4623 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4624 		kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4625 		return error;
4626 	}
4627 
4628 	/*
4629 	 * should stop and start HW since that INIT
4630 	 * image just loaded
4631 	 */
4632 	iwm_stop_device(sc);
4633 	sc->sc_ps_disabled = FALSE;
4634 	if ((error = iwm_start_hw(sc)) != 0) {
4635 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4636 		return error;
4637 	}
4638 
4639 	/* omstart, this time with the regular firmware */
4640 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4641 	if (error) {
4642 		device_printf(sc->sc_dev, "could not load firmware\n");
4643 		goto error;
4644 	}
4645 
4646 	error = iwm_mvm_sf_update(sc, NULL, FALSE);
4647 	if (error)
4648 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4649 
4650 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4651 		device_printf(sc->sc_dev, "bt init conf failed\n");
4652 		goto error;
4653 	}
4654 
4655 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4656 	if (error != 0) {
4657 		device_printf(sc->sc_dev, "antenna config failed\n");
4658 		goto error;
4659 	}
4660 
4661 	/* Send phy db control command and then phy db calibration */
4662 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4663 		goto error;
4664 
4665 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4666 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4667 		goto error;
4668 	}
4669 
4670 	/* Add auxiliary station for scanning */
4671 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4672 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4673 		goto error;
4674 	}
4675 
4676 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4677 		/*
4678 		 * The channel used here isn't relevant as it's
4679 		 * going to be overwritten in the other flows.
4680 		 * For now use the first channel we have.
4681 		 */
4682 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4683 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4684 			goto error;
4685 	}
4686 
4687 	/* Initialize tx backoffs to the minimum. */
4688 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4689 		iwm_mvm_tt_tx_backoff(sc, 0);
4690 
4691 	error = iwm_mvm_power_update_device(sc);
4692 	if (error)
4693 		goto error;
4694 
4695 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4696 		goto error;
4697 
4698 	if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4699 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4700 			goto error;
4701 	}
4702 
4703 	/* Enable Tx queues. */
4704 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4705 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4706 		    iwm_mvm_ac_to_tx_fifo[ac]);
4707 		if (error)
4708 			goto error;
4709 	}
4710 
4711 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4712 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4713 		goto error;
4714 	}
4715 
4716 	return 0;
4717 
4718  error:
4719 	iwm_stop_device(sc);
4720 	return error;
4721 }
4722 
4723 /* Allow multicast from our BSSID. */
4724 static int
4725 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4726 {
4727 	struct ieee80211_node *ni = vap->iv_bss;
4728 	struct iwm_mcast_filter_cmd *cmd;
4729 	size_t size;
4730 	int error;
4731 
4732 	size = roundup(sizeof(*cmd), 4);
4733 	cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4734 	if (cmd == NULL)
4735 		return ENOMEM;
4736 	cmd->filter_own = 1;
4737 	cmd->port_id = 0;
4738 	cmd->count = 0;
4739 	cmd->pass_all = 1;
4740 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4741 
4742 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4743 	    IWM_CMD_SYNC, size, cmd);
4744 	kfree(cmd, M_DEVBUF);
4745 
4746 	return (error);
4747 }
4748 
4749 /*
4750  * ifnet interfaces
4751  */
4752 
4753 static void
4754 iwm_init(struct iwm_softc *sc)
4755 {
4756 	int error;
4757 
4758 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4759 		return;
4760 	}
4761 	sc->sc_generation++;
4762 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4763 
4764 	if ((error = iwm_init_hw(sc)) != 0) {
4765 		kprintf("iwm_init_hw failed %d\n", error);
4766 		iwm_stop(sc);
4767 		return;
4768 	}
4769 
4770 	/*
4771 	 * Ok, firmware loaded and we are jogging
4772 	 */
4773 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4774 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4775 }
4776 
4777 static int
4778 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4779 {
4780 	struct iwm_softc *sc;
4781 	int error;
4782 
4783 	sc = ic->ic_softc;
4784 
4785 	IWM_LOCK(sc);
4786 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4787 		IWM_UNLOCK(sc);
4788 		return (ENXIO);
4789 	}
4790 	error = mbufq_enqueue(&sc->sc_snd, m);
4791 	if (error) {
4792 		IWM_UNLOCK(sc);
4793 		return (error);
4794 	}
4795 	iwm_start(sc);
4796 	IWM_UNLOCK(sc);
4797 	return (0);
4798 }
4799 
4800 /*
4801  * Dequeue packets from sendq and call send.
4802  */
4803 static void
4804 iwm_start(struct iwm_softc *sc)
4805 {
4806 	struct ieee80211_node *ni;
4807 	struct mbuf *m;
4808 	int ac = 0;
4809 
4810 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4811 	while (sc->qfullmsk == 0 &&
4812 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4813 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4814 		if (iwm_tx(sc, m, ni, ac) != 0) {
4815 			if_inc_counter(ni->ni_vap->iv_ifp,
4816 			    IFCOUNTER_OERRORS, 1);
4817 			ieee80211_free_node(ni);
4818 			continue;
4819 		}
4820 		sc->sc_tx_timer = 15;
4821 	}
4822 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4823 }
4824 
4825 static void
4826 iwm_stop(struct iwm_softc *sc)
4827 {
4828 
4829 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4830 	sc->sc_flags |= IWM_FLAG_STOPPED;
4831 	sc->sc_generation++;
4832 	iwm_led_blink_stop(sc);
4833 	sc->sc_tx_timer = 0;
4834 	iwm_stop_device(sc);
4835 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4836 }
4837 
4838 static void
4839 iwm_watchdog(void *arg)
4840 {
4841 	struct iwm_softc *sc = arg;
4842 
4843 	if (sc->sc_tx_timer > 0) {
4844 		if (--sc->sc_tx_timer == 0) {
4845 			device_printf(sc->sc_dev, "device timeout\n");
4846 #ifdef IWM_DEBUG
4847 			iwm_nic_error(sc);
4848 #endif
4849 			iwm_stop(sc);
4850 #if defined(__DragonFly__)
4851 			++sc->sc_ic.ic_oerrors;
4852 #else
4853 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4854 #endif
4855 			return;
4856 		}
4857 	}
4858 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4859 }
4860 
4861 static void
4862 iwm_parent(struct ieee80211com *ic)
4863 {
4864 	struct iwm_softc *sc = ic->ic_softc;
4865 	int startall = 0;
4866 
4867 	IWM_LOCK(sc);
4868 	if (ic->ic_nrunning > 0) {
4869 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4870 			iwm_init(sc);
4871 			startall = 1;
4872 		}
4873 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4874 		iwm_stop(sc);
4875 	IWM_UNLOCK(sc);
4876 	if (startall)
4877 		ieee80211_start_all(ic);
4878 }
4879 
4880 /*
4881  * The interrupt side of things
4882  */
4883 
4884 /*
4885  * error dumping routines are from iwlwifi/mvm/utils.c
4886  */
4887 
4888 /*
4889  * Note: This structure is read from the device with IO accesses,
4890  * and the reading already does the endian conversion. As it is
4891  * read with uint32_t-sized accesses, any members with a different size
4892  * need to be ordered correctly though!
4893  */
4894 struct iwm_error_event_table {
4895 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4896 	uint32_t error_id;		/* type of error */
4897 	uint32_t trm_hw_status0;	/* TRM HW status */
4898 	uint32_t trm_hw_status1;	/* TRM HW status */
4899 	uint32_t blink2;		/* branch link */
4900 	uint32_t ilink1;		/* interrupt link */
4901 	uint32_t ilink2;		/* interrupt link */
4902 	uint32_t data1;		/* error-specific data */
4903 	uint32_t data2;		/* error-specific data */
4904 	uint32_t data3;		/* error-specific data */
4905 	uint32_t bcon_time;		/* beacon timer */
4906 	uint32_t tsf_low;		/* network timestamp function timer */
4907 	uint32_t tsf_hi;		/* network timestamp function timer */
4908 	uint32_t gp1;		/* GP1 timer register */
4909 	uint32_t gp2;		/* GP2 timer register */
4910 	uint32_t fw_rev_type;	/* firmware revision type */
4911 	uint32_t major;		/* uCode version major */
4912 	uint32_t minor;		/* uCode version minor */
4913 	uint32_t hw_ver;		/* HW Silicon version */
4914 	uint32_t brd_ver;		/* HW board version */
4915 	uint32_t log_pc;		/* log program counter */
4916 	uint32_t frame_ptr;		/* frame pointer */
4917 	uint32_t stack_ptr;		/* stack pointer */
4918 	uint32_t hcmd;		/* last host command header */
4919 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4920 				 * rxtx_flag */
4921 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4922 				 * host_flag */
4923 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4924 				 * enc_flag */
4925 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4926 				 * time_flag */
4927 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4928 				 * wico interrupt */
4929 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4930 	uint32_t wait_event;		/* wait event() caller address */
4931 	uint32_t l2p_control;	/* L2pControlField */
4932 	uint32_t l2p_duration;	/* L2pDurationField */
4933 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4934 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4935 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4936 				 * (LMPM_PMG_SEL) */
4937 	uint32_t u_timestamp;	/* indicate when the date and time of the
4938 				 * compilation */
4939 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4940 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4941 
4942 /*
4943  * UMAC error struct - relevant starting from family 8000 chip.
4944  * Note: This structure is read from the device with IO accesses,
4945  * and the reading already does the endian conversion. As it is
4946  * read with u32-sized accesses, any members with a different size
4947  * need to be ordered correctly though!
4948  */
4949 struct iwm_umac_error_event_table {
4950 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4951 	uint32_t error_id;	/* type of error */
4952 	uint32_t blink1;	/* branch link */
4953 	uint32_t blink2;	/* branch link */
4954 	uint32_t ilink1;	/* interrupt link */
4955 	uint32_t ilink2;	/* interrupt link */
4956 	uint32_t data1;		/* error-specific data */
4957 	uint32_t data2;		/* error-specific data */
4958 	uint32_t data3;		/* error-specific data */
4959 	uint32_t umac_major;
4960 	uint32_t umac_minor;
4961 	uint32_t frame_pointer;	/* core register 27*/
4962 	uint32_t stack_pointer;	/* core register 28 */
4963 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4964 	uint32_t nic_isr_pref;	/* ISR status register */
4965 } __packed;
4966 
4967 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4968 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4969 
4970 #ifdef IWM_DEBUG
4971 struct {
4972 	const char *name;
4973 	uint8_t num;
4974 } advanced_lookup[] = {
4975 	{ "NMI_INTERRUPT_WDG", 0x34 },
4976 	{ "SYSASSERT", 0x35 },
4977 	{ "UCODE_VERSION_MISMATCH", 0x37 },
4978 	{ "BAD_COMMAND", 0x38 },
4979 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4980 	{ "FATAL_ERROR", 0x3D },
4981 	{ "NMI_TRM_HW_ERR", 0x46 },
4982 	{ "NMI_INTERRUPT_TRM", 0x4C },
4983 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4984 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4985 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4986 	{ "NMI_INTERRUPT_HOST", 0x66 },
4987 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
4988 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
4989 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4990 	{ "ADVANCED_SYSASSERT", 0 },
4991 };
4992 
4993 static const char *
4994 iwm_desc_lookup(uint32_t num)
4995 {
4996 	int i;
4997 
4998 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4999 		if (advanced_lookup[i].num == num)
5000 			return advanced_lookup[i].name;
5001 
5002 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5003 	return advanced_lookup[i].name;
5004 }
5005 
5006 static void
5007 iwm_nic_umac_error(struct iwm_softc *sc)
5008 {
5009 	struct iwm_umac_error_event_table table;
5010 	uint32_t base;
5011 
5012 	base = sc->umac_error_event_table;
5013 
5014 	if (base < 0x800000) {
5015 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5016 		    base);
5017 		return;
5018 	}
5019 
5020 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5021 		device_printf(sc->sc_dev, "reading errlog failed\n");
5022 		return;
5023 	}
5024 
5025 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5026 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5027 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5028 		    sc->sc_flags, table.valid);
5029 	}
5030 
5031 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5032 		iwm_desc_lookup(table.error_id));
5033 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5034 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5035 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5036 	    table.ilink1);
5037 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5038 	    table.ilink2);
5039 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5040 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5041 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5042 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5043 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5044 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5045 	    table.frame_pointer);
5046 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5047 	    table.stack_pointer);
5048 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5049 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5050 	    table.nic_isr_pref);
5051 }
5052 
5053 /*
5054  * Support for dumping the error log seemed like a good idea ...
5055  * but it's mostly hex junk and the only sensible thing is the
5056  * hw/ucode revision (which we know anyway).  Since it's here,
5057  * I'll just leave it in, just in case e.g. the Intel guys want to
5058  * help us decipher some "ADVANCED_SYSASSERT" later.
5059  */
5060 static void
5061 iwm_nic_error(struct iwm_softc *sc)
5062 {
5063 	struct iwm_error_event_table table;
5064 	uint32_t base;
5065 
5066 	device_printf(sc->sc_dev, "dumping device error log\n");
5067 	base = sc->error_event_table;
5068 	if (base < 0x800000) {
5069 		device_printf(sc->sc_dev,
5070 		    "Invalid error log pointer 0x%08x\n", base);
5071 		return;
5072 	}
5073 
5074 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5075 		device_printf(sc->sc_dev, "reading errlog failed\n");
5076 		return;
5077 	}
5078 
5079 	if (!table.valid) {
5080 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5081 		return;
5082 	}
5083 
5084 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5085 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5086 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5087 		    sc->sc_flags, table.valid);
5088 	}
5089 
5090 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5091 	    iwm_desc_lookup(table.error_id));
5092 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5093 	    table.trm_hw_status0);
5094 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5095 	    table.trm_hw_status1);
5096 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5097 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5098 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5099 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5100 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5101 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5102 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5103 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5104 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5105 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5106 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5107 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5108 	    table.fw_rev_type);
5109 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5110 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5111 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5112 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5113 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5114 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5115 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5116 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5117 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5118 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5119 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5120 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5121 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5122 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5123 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5124 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5125 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5126 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5127 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5128 
5129 	if (sc->umac_error_event_table)
5130 		iwm_nic_umac_error(sc);
5131 }
5132 #endif
5133 
5134 static void
5135 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5136 {
5137 	struct ieee80211com *ic = &sc->sc_ic;
5138 	struct iwm_cmd_response *cresp;
5139 	struct mbuf *m1;
5140 	uint32_t offset = 0;
5141 	uint32_t maxoff = IWM_RBUF_SIZE;
5142 	uint32_t nextoff;
5143 	boolean_t stolen = FALSE;
5144 
5145 #define HAVEROOM(a)	\
5146     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5147 
5148 	while (HAVEROOM(offset)) {
5149 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5150 		    offset);
5151 		int qid, idx, code, len;
5152 
5153 		qid = pkt->hdr.qid;
5154 		idx = pkt->hdr.idx;
5155 
5156 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5157 
5158 		/*
5159 		 * randomly get these from the firmware, no idea why.
5160 		 * they at least seem harmless, so just ignore them for now
5161 		 */
5162 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5163 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5164 			break;
5165 		}
5166 
5167 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5168 		    "rx packet qid=%d idx=%d type=%x\n",
5169 		    qid & ~0x80, pkt->hdr.idx, code);
5170 
5171 		len = iwm_rx_packet_len(pkt);
5172 		len += sizeof(uint32_t); /* account for status word */
5173 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5174 
5175 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5176 
5177 		switch (code) {
5178 		case IWM_REPLY_RX_PHY_CMD:
5179 			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5180 			break;
5181 
5182 		case IWM_REPLY_RX_MPDU_CMD: {
5183 			/*
5184 			 * If this is the last frame in the RX buffer, we
5185 			 * can directly feed the mbuf to the sharks here.
5186 			 */
5187 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5188 			    struct iwm_rx_packet *, nextoff);
5189 			if (!HAVEROOM(nextoff) ||
5190 			    (nextpkt->hdr.code == 0 &&
5191 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5192 			     nextpkt->hdr.idx == 0) ||
5193 			    (nextpkt->len_n_flags ==
5194 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5195 				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5196 					stolen = FALSE;
5197 					/* Make sure we abort the loop */
5198 					nextoff = maxoff;
5199 				}
5200 				break;
5201 			}
5202 
5203 			/*
5204 			 * Use m_copym instead of m_split, because that
5205 			 * makes it easier to keep a valid rx buffer in
5206 			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5207 			 *
5208 			 * We need to start m_copym() at offset 0, to get the
5209 			 * M_PKTHDR flag preserved.
5210 			 */
5211 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5212 			if (m1) {
5213 				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5214 					stolen = TRUE;
5215 				else
5216 					m_freem(m1);
5217 			}
5218 			break;
5219 		}
5220 
5221 		case IWM_TX_CMD:
5222 			iwm_mvm_rx_tx_cmd(sc, pkt);
5223 			break;
5224 
5225 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5226 			struct iwm_missed_beacons_notif *resp;
5227 			int missed;
5228 
5229 			/* XXX look at mac_id to determine interface ID */
5230 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5231 
5232 			resp = (void *)pkt->data;
5233 			missed = le32toh(resp->consec_missed_beacons);
5234 
5235 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5236 			    "%s: MISSED_BEACON: mac_id=%d, "
5237 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5238 			    "num_rx=%d\n",
5239 			    __func__,
5240 			    le32toh(resp->mac_id),
5241 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5242 			    le32toh(resp->consec_missed_beacons),
5243 			    le32toh(resp->num_expected_beacons),
5244 			    le32toh(resp->num_recvd_beacons));
5245 
5246 			/* Be paranoid */
5247 			if (vap == NULL)
5248 				break;
5249 
5250 			/* XXX no net80211 locking? */
5251 			if (vap->iv_state == IEEE80211_S_RUN &&
5252 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5253 				if (missed > vap->iv_bmissthreshold) {
5254 					/* XXX bad locking; turn into task */
5255 					IWM_UNLOCK(sc);
5256 					ieee80211_beacon_miss(ic);
5257 					IWM_LOCK(sc);
5258 				}
5259 			}
5260 
5261 			break; }
5262 
5263 		case IWM_MFUART_LOAD_NOTIFICATION:
5264 			break;
5265 
5266 		case IWM_MVM_ALIVE:
5267 			break;
5268 
5269 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5270 			break;
5271 
5272 		case IWM_STATISTICS_NOTIFICATION:
5273 			iwm_mvm_handle_rx_statistics(sc, pkt);
5274 			break;
5275 
5276 		case IWM_NVM_ACCESS_CMD:
5277 		case IWM_MCC_UPDATE_CMD:
5278 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5279 				memcpy(sc->sc_cmd_resp,
5280 				    pkt, sizeof(sc->sc_cmd_resp));
5281 			}
5282 			break;
5283 
5284 		case IWM_MCC_CHUB_UPDATE_CMD: {
5285 			struct iwm_mcc_chub_notif *notif;
5286 			notif = (void *)pkt->data;
5287 
5288 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5289 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5290 			sc->sc_fw_mcc[2] = '\0';
5291 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5292 			    "fw source %d sent CC '%s'\n",
5293 			    notif->source_id, sc->sc_fw_mcc);
5294 			break;
5295 		}
5296 
5297 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5298 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5299 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5300 			struct iwm_dts_measurement_notif_v1 *notif;
5301 
5302 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5303 				device_printf(sc->sc_dev,
5304 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5305 				break;
5306 			}
5307 			notif = (void *)pkt->data;
5308 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5309 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5310 			    notif->temp);
5311 			break;
5312 		}
5313 
5314 		case IWM_PHY_CONFIGURATION_CMD:
5315 		case IWM_TX_ANT_CONFIGURATION_CMD:
5316 		case IWM_ADD_STA:
5317 		case IWM_MAC_CONTEXT_CMD:
5318 		case IWM_REPLY_SF_CFG_CMD:
5319 		case IWM_POWER_TABLE_CMD:
5320 		case IWM_PHY_CONTEXT_CMD:
5321 		case IWM_BINDING_CONTEXT_CMD:
5322 		case IWM_TIME_EVENT_CMD:
5323 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5324 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5325 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5326 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5327 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5328 		case IWM_REPLY_BEACON_FILTERING_CMD:
5329 		case IWM_MAC_PM_POWER_TABLE:
5330 		case IWM_TIME_QUOTA_CMD:
5331 		case IWM_REMOVE_STA:
5332 		case IWM_TXPATH_FLUSH:
5333 		case IWM_LQ_CMD:
5334 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5335 				 IWM_FW_PAGING_BLOCK_CMD):
5336 		case IWM_BT_CONFIG:
5337 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5338 			cresp = (void *)pkt->data;
5339 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5340 				memcpy(sc->sc_cmd_resp,
5341 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5342 			}
5343 			break;
5344 
5345 		/* ignore */
5346 		case IWM_PHY_DB_CMD:
5347 			break;
5348 
5349 		case IWM_INIT_COMPLETE_NOTIF:
5350 			break;
5351 
5352 		case IWM_SCAN_OFFLOAD_COMPLETE:
5353 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5354 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5355 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5356 				ieee80211_runtask(ic, &sc->sc_es_task);
5357 			}
5358 			break;
5359 
5360 		case IWM_SCAN_ITERATION_COMPLETE: {
5361 			struct iwm_lmac_scan_complete_notif *notif;
5362 			notif = (void *)pkt->data;
5363 			break;
5364 		}
5365 
5366 		case IWM_SCAN_COMPLETE_UMAC:
5367 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5368 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5369 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5370 				ieee80211_runtask(ic, &sc->sc_es_task);
5371 			}
5372 			break;
5373 
5374 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5375 			struct iwm_umac_scan_iter_complete_notif *notif;
5376 			notif = (void *)pkt->data;
5377 
5378 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5379 			    "complete, status=0x%x, %d channels scanned\n",
5380 			    notif->status, notif->scanned_channels);
5381 			break;
5382 		}
5383 
5384 		case IWM_REPLY_ERROR: {
5385 			struct iwm_error_resp *resp;
5386 			resp = (void *)pkt->data;
5387 
5388 			device_printf(sc->sc_dev,
5389 			    "firmware error 0x%x, cmd 0x%x\n",
5390 			    le32toh(resp->error_type),
5391 			    resp->cmd_id);
5392 			break;
5393 		}
5394 
5395 		case IWM_TIME_EVENT_NOTIFICATION: {
5396 			struct iwm_time_event_notif *notif;
5397 			notif = (void *)pkt->data;
5398 
5399 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5400 			    "TE notif status = 0x%x action = 0x%x\n",
5401 			    notif->status, notif->action);
5402 			break;
5403 		}
5404 
5405 		/*
5406 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5407 		 * messages. Just ignore them for now.
5408 		 */
5409 		case IWM_DEBUG_LOG_MSG:
5410 			break;
5411 
5412 		case IWM_MCAST_FILTER_CMD:
5413 			break;
5414 
5415 		case IWM_SCD_QUEUE_CFG: {
5416 			struct iwm_scd_txq_cfg_rsp *rsp;
5417 			rsp = (void *)pkt->data;
5418 
5419 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5420 			    "queue cfg token=0x%x sta_id=%d "
5421 			    "tid=%d scd_queue=%d\n",
5422 			    rsp->token, rsp->sta_id, rsp->tid,
5423 			    rsp->scd_queue);
5424 			break;
5425 		}
5426 
5427 		default:
5428 			device_printf(sc->sc_dev,
5429 			    "frame %d/%d %x UNHANDLED (this should "
5430 			    "not happen)\n", qid & ~0x80, idx,
5431 			    pkt->len_n_flags);
5432 			break;
5433 		}
5434 
5435 		/*
5436 		 * Why test bit 0x80?  The Linux driver:
5437 		 *
5438 		 * There is one exception:  uCode sets bit 15 when it
5439 		 * originates the response/notification, i.e. when the
5440 		 * response/notification is not a direct response to a
5441 		 * command sent by the driver.  For example, uCode issues
5442 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5443 		 * it is not a direct response to any driver command.
5444 		 *
5445 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5446 		 * uses a slightly different format for pkt->hdr, and "qid"
5447 		 * is actually the upper byte of a two-byte field.
5448 		 */
5449 		if (!(qid & (1 << 7)))
5450 			iwm_cmd_done(sc, pkt);
5451 
5452 		offset = nextoff;
5453 	}
5454 	if (stolen)
5455 		m_freem(m);
5456 #undef HAVEROOM
5457 }
5458 
5459 /*
5460  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5461  * Basic structure from if_iwn
5462  */
5463 static void
5464 iwm_notif_intr(struct iwm_softc *sc)
5465 {
5466 	uint16_t hw;
5467 
5468 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5469 	    BUS_DMASYNC_POSTREAD);
5470 
5471 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5472 
5473 	/*
5474 	 * Process responses
5475 	 */
5476 	while (sc->rxq.cur != hw) {
5477 		struct iwm_rx_ring *ring = &sc->rxq;
5478 		struct iwm_rx_data *data = &ring->data[ring->cur];
5479 
5480 		bus_dmamap_sync(ring->data_dmat, data->map,
5481 		    BUS_DMASYNC_POSTREAD);
5482 
5483 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5484 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5485 		iwm_handle_rxb(sc, data->m);
5486 
5487 		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5488 	}
5489 
5490 	/*
5491 	 * Tell the firmware that it can reuse the ring entries that
5492 	 * we have just processed.
5493 	 * Seems like the hardware gets upset unless we align
5494 	 * the write by 8??
5495 	 */
5496 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5497 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5498 }
5499 
5500 static void
5501 iwm_intr(void *arg)
5502 {
5503 	struct iwm_softc *sc = arg;
5504 	int handled = 0;
5505 	int r1, r2, rv = 0;
5506 	int isperiodic = 0;
5507 
5508 #if defined(__DragonFly__)
5509 	if (sc->sc_mem == NULL) {
5510 		kprintf("iwm_intr: detached\n");
5511 		return;
5512 	}
5513 #endif
5514 	IWM_LOCK(sc);
5515 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5516 
5517 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5518 		uint32_t *ict = sc->ict_dma.vaddr;
5519 		int tmp;
5520 
5521 		tmp = htole32(ict[sc->ict_cur]);
5522 		if (!tmp)
5523 			goto out_ena;
5524 
5525 		/*
5526 		 * ok, there was something.  keep plowing until we have all.
5527 		 */
5528 		r1 = r2 = 0;
5529 		while (tmp) {
5530 			r1 |= tmp;
5531 			ict[sc->ict_cur] = 0;
5532 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5533 			tmp = htole32(ict[sc->ict_cur]);
5534 		}
5535 
5536 		/* this is where the fun begins.  don't ask */
5537 		if (r1 == 0xffffffff)
5538 			r1 = 0;
5539 
5540 		/* i am not expected to understand this */
5541 		if (r1 & 0xc0000)
5542 			r1 |= 0x8000;
5543 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5544 	} else {
5545 		r1 = IWM_READ(sc, IWM_CSR_INT);
5546 		/* "hardware gone" (where, fishing?) */
5547 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5548 			goto out;
5549 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5550 	}
5551 	if (r1 == 0 && r2 == 0) {
5552 		goto out_ena;
5553 	}
5554 
5555 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5556 
5557 	/* Safely ignore these bits for debug checks below */
5558 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5559 
5560 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5561 		int i;
5562 		struct ieee80211com *ic = &sc->sc_ic;
5563 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5564 
5565 #ifdef IWM_DEBUG
5566 		iwm_nic_error(sc);
5567 #endif
5568 		/* Dump driver status (TX and RX rings) while we're here. */
5569 		device_printf(sc->sc_dev, "driver status:\n");
5570 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5571 			struct iwm_tx_ring *ring = &sc->txq[i];
5572 			device_printf(sc->sc_dev,
5573 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5574 			    "queued=%-3d\n",
5575 			    i, ring->qid, ring->cur, ring->queued);
5576 		}
5577 		device_printf(sc->sc_dev,
5578 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5579 		device_printf(sc->sc_dev,
5580 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5581 
5582 		/* Don't stop the device; just do a VAP restart */
5583 		IWM_UNLOCK(sc);
5584 
5585 		if (vap == NULL) {
5586 			kprintf("%s: null vap\n", __func__);
5587 			return;
5588 		}
5589 
5590 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5591 		    "restarting\n", __func__, vap->iv_state);
5592 
5593 		ieee80211_restart_all(ic);
5594 		return;
5595 	}
5596 
5597 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5598 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5599 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5600 		iwm_stop(sc);
5601 		rv = 1;
5602 		goto out;
5603 	}
5604 
5605 	/* firmware chunk loaded */
5606 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5607 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5608 		handled |= IWM_CSR_INT_BIT_FH_TX;
5609 		sc->sc_fw_chunk_done = 1;
5610 		wakeup(&sc->sc_fw);
5611 	}
5612 
5613 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5614 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5615 		if (iwm_check_rfkill(sc)) {
5616 			device_printf(sc->sc_dev,
5617 			    "%s: rfkill switch, disabling interface\n",
5618 			    __func__);
5619 			iwm_stop(sc);
5620 		}
5621 	}
5622 
5623 	/*
5624 	 * The Linux driver uses periodic interrupts to avoid races.
5625 	 * We cargo-cult like it's going out of fashion.
5626 	 */
5627 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5628 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5629 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5630 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5631 			IWM_WRITE_1(sc,
5632 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5633 		isperiodic = 1;
5634 	}
5635 
5636 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5637 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5638 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5639 
5640 		iwm_notif_intr(sc);
5641 
5642 		/* enable periodic interrupt, see above */
5643 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5644 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5645 			    IWM_CSR_INT_PERIODIC_ENA);
5646 	}
5647 
5648 	if (__predict_false(r1 & ~handled))
5649 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5650 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5651 	rv = 1;
5652 
5653  out_ena:
5654 	iwm_restore_interrupts(sc);
5655  out:
5656 	IWM_UNLOCK(sc);
5657 	return;
5658 }
5659 
5660 /*
5661  * Autoconf glue-sniffing
5662  */
5663 #define	PCI_VENDOR_INTEL		0x8086
5664 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5665 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5666 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5667 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5668 #define	PCI_PRODUCT_INTEL_WL_3168	0x24fb
5669 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5670 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5671 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5672 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5673 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5674 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5675 #define	PCI_PRODUCT_INTEL_WL_8265	0x24fd
5676 
5677 static const struct iwm_devices {
5678 	uint16_t		device;
5679 	const struct iwm_cfg	*cfg;
5680 } iwm_devices[] = {
5681 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5682 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5683 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5684 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5685 	{ PCI_PRODUCT_INTEL_WL_3168,   &iwm3168_cfg },
5686 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5687 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5688 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5689 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5690 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5691 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5692 	{ PCI_PRODUCT_INTEL_WL_8265,   &iwm8265_cfg },
5693 };
5694 
5695 static int
5696 iwm_probe(device_t dev)
5697 {
5698 	int i;
5699 
5700 	for (i = 0; i < nitems(iwm_devices); i++) {
5701 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5702 		    pci_get_device(dev) == iwm_devices[i].device) {
5703 			device_set_desc(dev, iwm_devices[i].cfg->name);
5704 			return (BUS_PROBE_DEFAULT);
5705 		}
5706 	}
5707 
5708 	return (ENXIO);
5709 }
5710 
5711 static int
5712 iwm_dev_check(device_t dev)
5713 {
5714 	struct iwm_softc *sc;
5715 	uint16_t devid;
5716 	int i;
5717 
5718 	sc = device_get_softc(dev);
5719 
5720 	devid = pci_get_device(dev);
5721 	for (i = 0; i < NELEM(iwm_devices); i++) {
5722 		if (iwm_devices[i].device == devid) {
5723 			sc->cfg = iwm_devices[i].cfg;
5724 			return (0);
5725 		}
5726 	}
5727 	device_printf(dev, "unknown adapter type\n");
5728 	return ENXIO;
5729 }
5730 
5731 /* PCI registers */
5732 #define PCI_CFG_RETRY_TIMEOUT	0x041
5733 
5734 static int
5735 iwm_pci_attach(device_t dev)
5736 {
5737 	struct iwm_softc *sc;
5738 	int count, error, rid;
5739 	uint16_t reg;
5740 #if defined(__DragonFly__)
5741 	int irq_flags;
5742 #endif
5743 
5744 	sc = device_get_softc(dev);
5745 
5746 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5747 	 * PCI Tx retries from interfering with C3 CPU state */
5748 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5749 
5750 	/* Enable bus-mastering and hardware bug workaround. */
5751 	pci_enable_busmaster(dev);
5752 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5753 	/* if !MSI */
5754 	if (reg & PCIM_STATUS_INTxSTATE) {
5755 		reg &= ~PCIM_STATUS_INTxSTATE;
5756 	}
5757 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5758 
5759 	rid = PCIR_BAR(0);
5760 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5761 	    RF_ACTIVE);
5762 	if (sc->sc_mem == NULL) {
5763 		device_printf(sc->sc_dev, "can't map mem space\n");
5764 		return (ENXIO);
5765 	}
5766 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5767 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5768 
5769 	/* Install interrupt handler. */
5770 	count = 1;
5771 	rid = 0;
5772 #if defined(__DragonFly__)
5773 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5774 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5775 #else
5776 	if (pci_alloc_msi(dev, &count) == 0)
5777 		rid = 1;
5778 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5779 	    (rid != 0 ? 0 : RF_SHAREABLE));
5780 #endif
5781 	if (sc->sc_irq == NULL) {
5782 		device_printf(dev, "can't map interrupt\n");
5783 			return (ENXIO);
5784 	}
5785 #if defined(__DragonFly__)
5786 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5787 			       iwm_intr, sc, &sc->sc_ih,
5788 			       &wlan_global_serializer);
5789 #else
5790 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5791 	    NULL, iwm_intr, sc, &sc->sc_ih);
5792 #endif
5793 	if (sc->sc_ih == NULL) {
5794 		device_printf(dev, "can't establish interrupt");
5795 #if defined(__DragonFly__)
5796                 pci_release_msi(dev);
5797 #endif
5798 			return (ENXIO);
5799 	}
5800 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5801 
5802 	return (0);
5803 }
5804 
5805 static void
5806 iwm_pci_detach(device_t dev)
5807 {
5808 	struct iwm_softc *sc = device_get_softc(dev);
5809 
5810 	if (sc->sc_irq != NULL) {
5811 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5812 		bus_release_resource(dev, SYS_RES_IRQ,
5813 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5814 		pci_release_msi(dev);
5815 #if defined(__DragonFly__)
5816 		sc->sc_irq = NULL;
5817 #endif
5818         }
5819 	if (sc->sc_mem != NULL) {
5820 		bus_release_resource(dev, SYS_RES_MEMORY,
5821 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5822 #if defined(__DragonFly__)
5823 		sc->sc_mem = NULL;
5824 #endif
5825 	}
5826 }
5827 
5828 
5829 
5830 static int
5831 iwm_attach(device_t dev)
5832 {
5833 	struct iwm_softc *sc = device_get_softc(dev);
5834 	struct ieee80211com *ic = &sc->sc_ic;
5835 	int error;
5836 	int txq_i, i;
5837 
5838 	sc->sc_dev = dev;
5839 	sc->sc_attached = 1;
5840 	IWM_LOCK_INIT(sc);
5841 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5842 #if defined(__DragonFly__)
5843 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5844 #else
5845 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5846 #endif
5847 	callout_init(&sc->sc_led_blink_to);
5848 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5849 
5850 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5851 	if (sc->sc_notif_wait == NULL) {
5852 		device_printf(dev, "failed to init notification wait struct\n");
5853 		goto fail;
5854 	}
5855 
5856 	sc->sf_state = IWM_SF_UNINIT;
5857 
5858 	/* Init phy db */
5859 	sc->sc_phy_db = iwm_phy_db_init(sc);
5860 	if (!sc->sc_phy_db) {
5861 		device_printf(dev, "Cannot init phy_db\n");
5862 		goto fail;
5863 	}
5864 
5865 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5866 	sc->last_ebs_successful = TRUE;
5867 
5868 	/* PCI attach */
5869 	error = iwm_pci_attach(dev);
5870 	if (error != 0)
5871 		goto fail;
5872 
5873 	sc->sc_wantresp = -1;
5874 
5875 	/* Match device id */
5876 	error = iwm_dev_check(dev);
5877 	if (error != 0)
5878 		goto fail;
5879 
5880 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5881 	/*
5882 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5883 	 * changed, and now the revision step also includes bit 0-1 (no more
5884 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5885 	 * in the old format.
5886 	 */
5887 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5888 		int ret;
5889 		uint32_t hw_step;
5890 
5891 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5892 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5893 
5894 		if (iwm_prepare_card_hw(sc) != 0) {
5895 			device_printf(dev, "could not initialize hardware\n");
5896 			goto fail;
5897 		}
5898 
5899 		/*
5900 		 * In order to recognize C step the driver should read the
5901 		 * chip version id located at the AUX bus MISC address.
5902 		 */
5903 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5904 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5905 		DELAY(2);
5906 
5907 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5908 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5909 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5910 				   25000);
5911 		if (!ret) {
5912 			device_printf(sc->sc_dev,
5913 			    "Failed to wake up the nic\n");
5914 			goto fail;
5915 		}
5916 
5917 		if (iwm_nic_lock(sc)) {
5918 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5919 			hw_step |= IWM_ENABLE_WFPM;
5920 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5921 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5922 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5923 			if (hw_step == 0x3)
5924 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5925 						(IWM_SILICON_C_STEP << 2);
5926 			iwm_nic_unlock(sc);
5927 		} else {
5928 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5929 			goto fail;
5930 		}
5931 	}
5932 
5933 	/* special-case 7265D, it has the same PCI IDs. */
5934 	if (sc->cfg == &iwm7265_cfg &&
5935 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5936 		sc->cfg = &iwm7265d_cfg;
5937 	}
5938 
5939 	/* Allocate DMA memory for firmware transfers. */
5940 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5941 		device_printf(dev, "could not allocate memory for firmware\n");
5942 		goto fail;
5943 	}
5944 
5945 	/* Allocate "Keep Warm" page. */
5946 	if ((error = iwm_alloc_kw(sc)) != 0) {
5947 		device_printf(dev, "could not allocate keep warm page\n");
5948 		goto fail;
5949 	}
5950 
5951 	/* We use ICT interrupts */
5952 	if ((error = iwm_alloc_ict(sc)) != 0) {
5953 		device_printf(dev, "could not allocate ICT table\n");
5954 		goto fail;
5955 	}
5956 
5957 	/* Allocate TX scheduler "rings". */
5958 	if ((error = iwm_alloc_sched(sc)) != 0) {
5959 		device_printf(dev, "could not allocate TX scheduler rings\n");
5960 		goto fail;
5961 	}
5962 
5963 	/* Allocate TX rings */
5964 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5965 		if ((error = iwm_alloc_tx_ring(sc,
5966 		    &sc->txq[txq_i], txq_i)) != 0) {
5967 			device_printf(dev,
5968 			    "could not allocate TX ring %d\n",
5969 			    txq_i);
5970 			goto fail;
5971 		}
5972 	}
5973 
5974 	/* Allocate RX ring. */
5975 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5976 		device_printf(dev, "could not allocate RX ring\n");
5977 		goto fail;
5978 	}
5979 
5980 	/* Clear pending interrupts. */
5981 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5982 
5983 	ic->ic_softc = sc;
5984 	ic->ic_name = device_get_nameunit(sc->sc_dev);
5985 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
5986 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
5987 
5988 	/* Set device capabilities. */
5989 	ic->ic_caps =
5990 	    IEEE80211_C_STA |
5991 	    IEEE80211_C_WPA |		/* WPA/RSN */
5992 	    IEEE80211_C_WME |
5993 	    IEEE80211_C_PMGT |
5994 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
5995 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
5996 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
5997 	    ;
5998 	/* Advertise full-offload scanning */
5999 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6000 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6001 		sc->sc_phyctxt[i].id = i;
6002 		sc->sc_phyctxt[i].color = 0;
6003 		sc->sc_phyctxt[i].ref = 0;
6004 		sc->sc_phyctxt[i].channel = NULL;
6005 	}
6006 
6007 	/* Default noise floor */
6008 	sc->sc_noise = -96;
6009 
6010 	/* Max RSSI */
6011 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6012 
6013 #ifdef IWM_DEBUG
6014 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6015 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6016 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6017 #endif
6018 
6019 	error = iwm_read_firmware(sc);
6020 	if (error) {
6021 		goto fail;
6022 	} else if (sc->sc_fw.fw_fp == NULL) {
6023 		/*
6024 		 * XXX Add a solution for properly deferring firmware load
6025 		 *     during bootup.
6026 		 */
6027 		goto fail;
6028 	} else {
6029 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6030 		sc->sc_preinit_hook.ich_arg = sc;
6031 		sc->sc_preinit_hook.ich_desc = "iwm";
6032 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6033 			device_printf(dev,
6034 			    "config_intrhook_establish failed\n");
6035 			goto fail;
6036 		}
6037 	}
6038 
6039 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6040 	    "<-%s\n", __func__);
6041 
6042 	return 0;
6043 
6044 	/* Free allocated memory if something failed during attachment. */
6045 fail:
6046 	iwm_detach_local(sc, 0);
6047 
6048 	return ENXIO;
6049 }
6050 
6051 static int
6052 iwm_is_valid_ether_addr(uint8_t *addr)
6053 {
6054 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6055 
6056 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6057 		return (FALSE);
6058 
6059 	return (TRUE);
6060 }
6061 
6062 static int
6063 iwm_wme_update(struct ieee80211com *ic)
6064 {
6065 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6066 	struct iwm_softc *sc = ic->ic_softc;
6067 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6068 	struct iwm_vap *ivp = IWM_VAP(vap);
6069 	struct iwm_node *in;
6070 	struct wmeParams tmp[WME_NUM_AC];
6071 	int aci, error;
6072 
6073 	if (vap == NULL)
6074 		return (0);
6075 
6076 	IEEE80211_LOCK(ic);
6077 	for (aci = 0; aci < WME_NUM_AC; aci++)
6078 		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6079 	IEEE80211_UNLOCK(ic);
6080 
6081 	IWM_LOCK(sc);
6082 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6083 		const struct wmeParams *ac = &tmp[aci];
6084 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6085 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6086 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6087 		ivp->queue_params[aci].edca_txop =
6088 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6089 	}
6090 	ivp->have_wme = TRUE;
6091 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6092 		in = IWM_NODE(vap->iv_bss);
6093 		if (in->in_assoc) {
6094 			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6095 				device_printf(sc->sc_dev,
6096 				    "%s: failed to update MAC\n", __func__);
6097 			}
6098 		}
6099 	}
6100 	IWM_UNLOCK(sc);
6101 
6102 	return (0);
6103 #undef IWM_EXP2
6104 }
6105 
6106 static void
6107 iwm_preinit(void *arg)
6108 {
6109 	struct iwm_softc *sc = arg;
6110 	device_t dev = sc->sc_dev;
6111 	struct ieee80211com *ic = &sc->sc_ic;
6112 	int error;
6113 
6114 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6115 	    "->%s\n", __func__);
6116 
6117 	IWM_LOCK(sc);
6118 	if ((error = iwm_start_hw(sc)) != 0) {
6119 		device_printf(dev, "could not initialize hardware\n");
6120 		IWM_UNLOCK(sc);
6121 		goto fail;
6122 	}
6123 
6124 	error = iwm_run_init_mvm_ucode(sc, 1);
6125 	iwm_stop_device(sc);
6126 	if (error) {
6127 		IWM_UNLOCK(sc);
6128 		goto fail;
6129 	}
6130 	device_printf(dev,
6131 	    "hw rev 0x%x, fw ver %s, address %s\n",
6132 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6133 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6134 
6135 	/* not all hardware can do 5GHz band */
6136 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6137 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6138 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6139 	IWM_UNLOCK(sc);
6140 
6141 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6142 	    ic->ic_channels);
6143 
6144 	/*
6145 	 * At this point we've committed - if we fail to do setup,
6146 	 * we now also have to tear down the net80211 state.
6147 	 */
6148 	ieee80211_ifattach(ic);
6149 	ic->ic_vap_create = iwm_vap_create;
6150 	ic->ic_vap_delete = iwm_vap_delete;
6151 	ic->ic_raw_xmit = iwm_raw_xmit;
6152 	ic->ic_node_alloc = iwm_node_alloc;
6153 	ic->ic_scan_start = iwm_scan_start;
6154 	ic->ic_scan_end = iwm_scan_end;
6155 	ic->ic_update_mcast = iwm_update_mcast;
6156 	ic->ic_getradiocaps = iwm_init_channel_map;
6157 	ic->ic_set_channel = iwm_set_channel;
6158 	ic->ic_scan_curchan = iwm_scan_curchan;
6159 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6160 	ic->ic_wme.wme_update = iwm_wme_update;
6161 	ic->ic_parent = iwm_parent;
6162 	ic->ic_transmit = iwm_transmit;
6163 	iwm_radiotap_attach(sc);
6164 	if (bootverbose)
6165 		ieee80211_announce(ic);
6166 
6167 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6168 	    "<-%s\n", __func__);
6169 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6170 
6171 	return;
6172 fail:
6173 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6174 	iwm_detach_local(sc, 0);
6175 }
6176 
6177 /*
6178  * Attach the interface to 802.11 radiotap.
6179  */
6180 static void
6181 iwm_radiotap_attach(struct iwm_softc *sc)
6182 {
6183         struct ieee80211com *ic = &sc->sc_ic;
6184 
6185 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6186 	    "->%s begin\n", __func__);
6187         ieee80211_radiotap_attach(ic,
6188             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6189                 IWM_TX_RADIOTAP_PRESENT,
6190             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6191                 IWM_RX_RADIOTAP_PRESENT);
6192 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6193 	    "->%s end\n", __func__);
6194 }
6195 
6196 static struct ieee80211vap *
6197 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6198     enum ieee80211_opmode opmode, int flags,
6199     const uint8_t bssid[IEEE80211_ADDR_LEN],
6200     const uint8_t mac[IEEE80211_ADDR_LEN])
6201 {
6202 	struct iwm_vap *ivp;
6203 	struct ieee80211vap *vap;
6204 
6205 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6206 		return NULL;
6207 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6208 	vap = &ivp->iv_vap;
6209 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6210 	vap->iv_bmissthreshold = 10;            /* override default */
6211 	/* Override with driver methods. */
6212 	ivp->iv_newstate = vap->iv_newstate;
6213 	vap->iv_newstate = iwm_newstate;
6214 
6215 	ivp->id = IWM_DEFAULT_MACID;
6216 	ivp->color = IWM_DEFAULT_COLOR;
6217 
6218 	ivp->have_wme = FALSE;
6219 	ivp->ps_disabled = FALSE;
6220 
6221 	ieee80211_ratectl_init(vap);
6222 	/* Complete setup. */
6223 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6224 	    mac);
6225 	ic->ic_opmode = opmode;
6226 
6227 	return vap;
6228 }
6229 
6230 static void
6231 iwm_vap_delete(struct ieee80211vap *vap)
6232 {
6233 	struct iwm_vap *ivp = IWM_VAP(vap);
6234 
6235 	ieee80211_ratectl_deinit(vap);
6236 	ieee80211_vap_detach(vap);
6237 	kfree(ivp, M_80211_VAP);
6238 }
6239 
6240 static void
6241 iwm_xmit_queue_drain(struct iwm_softc *sc)
6242 {
6243 	struct mbuf *m;
6244 	struct ieee80211_node *ni;
6245 
6246 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6247 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6248 		ieee80211_free_node(ni);
6249 		m_freem(m);
6250 	}
6251 }
6252 
6253 static void
6254 iwm_scan_start(struct ieee80211com *ic)
6255 {
6256 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6257 	struct iwm_softc *sc = ic->ic_softc;
6258 	int error;
6259 
6260 	IWM_LOCK(sc);
6261 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6262 		/* This should not be possible */
6263 		device_printf(sc->sc_dev,
6264 		    "%s: Previous scan not completed yet\n", __func__);
6265 	}
6266 	if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6267 		error = iwm_mvm_umac_scan(sc);
6268 	else
6269 		error = iwm_mvm_lmac_scan(sc);
6270 	if (error != 0) {
6271 		device_printf(sc->sc_dev, "could not initiate scan\n");
6272 		IWM_UNLOCK(sc);
6273 		ieee80211_cancel_scan(vap);
6274 	} else {
6275 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6276 		iwm_led_blink_start(sc);
6277 		IWM_UNLOCK(sc);
6278 	}
6279 }
6280 
6281 static void
6282 iwm_scan_end(struct ieee80211com *ic)
6283 {
6284 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6285 	struct iwm_softc *sc = ic->ic_softc;
6286 
6287 	IWM_LOCK(sc);
6288 	iwm_led_blink_stop(sc);
6289 	if (vap->iv_state == IEEE80211_S_RUN)
6290 		iwm_mvm_led_enable(sc);
6291 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6292 		/*
6293 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6294 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6295 		 * taskqueue.
6296 		 */
6297 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6298 		iwm_mvm_scan_stop_wait(sc);
6299 	}
6300 	IWM_UNLOCK(sc);
6301 
6302 	/*
6303 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6304 	 * This is to make sure that it won't call ieee80211_scan_done
6305 	 * when we have already started the next scan.
6306 	 */
6307 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6308 }
6309 
6310 static void
6311 iwm_update_mcast(struct ieee80211com *ic)
6312 {
6313 }
6314 
6315 static void
6316 iwm_set_channel(struct ieee80211com *ic)
6317 {
6318 }
6319 
6320 static void
6321 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6322 {
6323 }
6324 
6325 static void
6326 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6327 {
6328 	return;
6329 }
6330 
6331 void
6332 iwm_init_task(void *arg1)
6333 {
6334 	struct iwm_softc *sc = arg1;
6335 
6336 	IWM_LOCK(sc);
6337 	while (sc->sc_flags & IWM_FLAG_BUSY) {
6338 #if defined(__DragonFly__)
6339 		lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6340 #else
6341 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6342 #endif
6343 }
6344 	sc->sc_flags |= IWM_FLAG_BUSY;
6345 	iwm_stop(sc);
6346 	if (sc->sc_ic.ic_nrunning > 0)
6347 		iwm_init(sc);
6348 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6349 	wakeup(&sc->sc_flags);
6350 	IWM_UNLOCK(sc);
6351 }
6352 
6353 static int
6354 iwm_resume(device_t dev)
6355 {
6356 	struct iwm_softc *sc = device_get_softc(dev);
6357 	int do_reinit = 0;
6358 
6359 	/*
6360 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6361 	 * PCI Tx retries from interfering with C3 CPU state.
6362 	 */
6363 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6364 
6365 	if (!sc->sc_attached)
6366 		return 0;
6367 
6368 	iwm_init_task(device_get_softc(dev));
6369 
6370 	IWM_LOCK(sc);
6371 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6372 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6373 		do_reinit = 1;
6374 	}
6375 	IWM_UNLOCK(sc);
6376 
6377 	if (do_reinit)
6378 		ieee80211_resume_all(&sc->sc_ic);
6379 
6380 	return 0;
6381 }
6382 
6383 static int
6384 iwm_suspend(device_t dev)
6385 {
6386 	int do_stop = 0;
6387 	struct iwm_softc *sc = device_get_softc(dev);
6388 
6389 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6390 
6391 	if (!sc->sc_attached)
6392 		return (0);
6393 
6394 	ieee80211_suspend_all(&sc->sc_ic);
6395 
6396 	if (do_stop) {
6397 		IWM_LOCK(sc);
6398 		iwm_stop(sc);
6399 		sc->sc_flags |= IWM_FLAG_SCANNING;
6400 		IWM_UNLOCK(sc);
6401 	}
6402 
6403 	return (0);
6404 }
6405 
6406 static int
6407 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6408 {
6409 	struct iwm_fw_info *fw = &sc->sc_fw;
6410 	device_t dev = sc->sc_dev;
6411 	int i;
6412 
6413 	if (!sc->sc_attached)
6414 		return 0;
6415 	sc->sc_attached = 0;
6416 	if (do_net80211) {
6417 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6418 	}
6419 	callout_drain(&sc->sc_led_blink_to);
6420 	callout_drain(&sc->sc_watchdog_to);
6421 	iwm_stop_device(sc);
6422 	if (do_net80211) {
6423 		IWM_LOCK(sc);
6424 		iwm_xmit_queue_drain(sc);
6425 		IWM_UNLOCK(sc);
6426 		ieee80211_ifdetach(&sc->sc_ic);
6427 	}
6428 
6429 	iwm_phy_db_free(sc->sc_phy_db);
6430 	sc->sc_phy_db = NULL;
6431 
6432 	iwm_free_nvm_data(sc->nvm_data);
6433 
6434 	/* Free descriptor rings */
6435 	iwm_free_rx_ring(sc, &sc->rxq);
6436 	for (i = 0; i < nitems(sc->txq); i++)
6437 		iwm_free_tx_ring(sc, &sc->txq[i]);
6438 
6439 	/* Free firmware */
6440 	if (fw->fw_fp != NULL)
6441 		iwm_fw_info_free(fw);
6442 
6443 	/* Free scheduler */
6444 	iwm_dma_contig_free(&sc->sched_dma);
6445 	iwm_dma_contig_free(&sc->ict_dma);
6446 	iwm_dma_contig_free(&sc->kw_dma);
6447 	iwm_dma_contig_free(&sc->fw_dma);
6448 
6449 	iwm_free_fw_paging(sc);
6450 
6451 	/* Finished with the hardware - detach things */
6452 	iwm_pci_detach(dev);
6453 
6454 	if (sc->sc_notif_wait != NULL) {
6455 		iwm_notification_wait_free(sc->sc_notif_wait);
6456 		sc->sc_notif_wait = NULL;
6457 	}
6458 
6459 	IWM_LOCK_DESTROY(sc);
6460 
6461 	return (0);
6462 }
6463 
6464 static int
6465 iwm_detach(device_t dev)
6466 {
6467 	struct iwm_softc *sc = device_get_softc(dev);
6468 
6469 	return (iwm_detach_local(sc, 1));
6470 }
6471 
6472 static device_method_t iwm_pci_methods[] = {
6473         /* Device interface */
6474         DEVMETHOD(device_probe,         iwm_probe),
6475         DEVMETHOD(device_attach,        iwm_attach),
6476         DEVMETHOD(device_detach,        iwm_detach),
6477         DEVMETHOD(device_suspend,       iwm_suspend),
6478         DEVMETHOD(device_resume,        iwm_resume),
6479 
6480         DEVMETHOD_END
6481 };
6482 
6483 static driver_t iwm_pci_driver = {
6484         "iwm",
6485         iwm_pci_methods,
6486         sizeof (struct iwm_softc)
6487 };
6488 
6489 static devclass_t iwm_devclass;
6490 
6491 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6492 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6493 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6494 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6495