xref: /dragonfly/sys/dev/netif/iwm/if_iwm.c (revision 59b0b316)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *				DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *	 changes to remove per-device network interface (DragonFly has not
110  *	 caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *	malloc -> kmalloc	(in particular, changing improper M_NOWAIT
114  *				specifications to M_INTWAIT.  We still don't
115  *				understand why FreeBSD uses M_NOWAIT for
116  *				critical must-not-fail kmalloc()s).
117  *	free -> kfree
118  *	printf -> kprintf
119  *	(bug fix) memset in iwm_reset_rx_ring.
120  *	(debug)   added several kprintf()s on error
121  *
122  *	header file paths (DFly allows localized path specifications).
123  *	minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *	(safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *	packet counters
128  *	msleep -> lksleep
129  *	mtx -> lk  (mtx functions -> lockmgr functions)
130  *	callout differences
131  *	taskqueue differences
132  *	MSI differences
133  *	bus_setup_intr() differences
134  *	minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138 
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150 
151 #include <machine/endian.h>
152 
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155 
156 #include <net/bpf.h>
157 
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164 
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169 
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174 
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sf.h"
189 #include "if_iwm_sta.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 #include "if_iwm_fw.h"
193 
194 const uint8_t iwm_nvm_channels[] = {
195 	/* 2.4 GHz */
196 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
197 	/* 5 GHz */
198 	36, 40, 44, 48, 52, 56, 60, 64,
199 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
200 	149, 153, 157, 161, 165
201 };
202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
203     "IWM_NUM_CHANNELS is too small");
204 
205 const uint8_t iwm_nvm_channels_8000[] = {
206 	/* 2.4 GHz */
207 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
208 	/* 5 GHz */
209 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
210 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
211 	149, 153, 157, 161, 165, 169, 173, 177, 181
212 };
213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
214     "IWM_NUM_CHANNELS_8000 is too small");
215 
216 #define IWM_NUM_2GHZ_CHANNELS	14
217 #define IWM_N_HW_ADDR_MASK	0xF
218 
219 /*
220  * XXX For now, there's simply a fixed set of rate table entries
221  * that are populated.
222  */
223 const struct iwm_rate {
224 	uint8_t rate;
225 	uint8_t plcp;
226 } iwm_rates[] = {
227 	{   2,	IWM_RATE_1M_PLCP  },
228 	{   4,	IWM_RATE_2M_PLCP  },
229 	{  11,	IWM_RATE_5M_PLCP  },
230 	{  22,	IWM_RATE_11M_PLCP },
231 	{  12,	IWM_RATE_6M_PLCP  },
232 	{  18,	IWM_RATE_9M_PLCP  },
233 	{  24,	IWM_RATE_12M_PLCP },
234 	{  36,	IWM_RATE_18M_PLCP },
235 	{  48,	IWM_RATE_24M_PLCP },
236 	{  72,	IWM_RATE_36M_PLCP },
237 	{  96,	IWM_RATE_48M_PLCP },
238 	{ 108,	IWM_RATE_54M_PLCP },
239 };
240 #define IWM_RIDX_CCK	0
241 #define IWM_RIDX_OFDM	4
242 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
245 
246 struct iwm_nvm_section {
247 	uint16_t length;
248 	uint8_t *data;
249 };
250 
251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
252 #define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
253 
254 struct iwm_mvm_alive_data {
255 	int valid;
256 	uint32_t scd_base_addr;
257 };
258 
259 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
260 static int	iwm_firmware_store_section(struct iwm_softc *,
261                                            enum iwm_ucode_type,
262                                            const uint8_t *, size_t);
263 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
264 static void	iwm_fw_info_free(struct iwm_fw_info *);
265 static int	iwm_read_firmware(struct iwm_softc *);
266 #if !defined(__DragonFly__)
267 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
268 #endif
269 static int	iwm_alloc_fwmem(struct iwm_softc *);
270 static int	iwm_alloc_sched(struct iwm_softc *);
271 static int	iwm_alloc_kw(struct iwm_softc *);
272 static int	iwm_alloc_ict(struct iwm_softc *);
273 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
277                                   int);
278 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 static void	iwm_enable_interrupts(struct iwm_softc *);
281 static void	iwm_restore_interrupts(struct iwm_softc *);
282 static void	iwm_disable_interrupts(struct iwm_softc *);
283 static void	iwm_ict_reset(struct iwm_softc *);
284 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
285 static void	iwm_stop_device(struct iwm_softc *);
286 static void	iwm_mvm_nic_config(struct iwm_softc *);
287 static int	iwm_nic_rx_init(struct iwm_softc *);
288 static int	iwm_nic_tx_init(struct iwm_softc *);
289 static int	iwm_nic_init(struct iwm_softc *);
290 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
291 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292                                    uint16_t, uint8_t *, uint16_t *);
293 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294 				     uint16_t *, uint32_t);
295 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
296 static void	iwm_add_channel_band(struct iwm_softc *,
297 		    struct ieee80211_channel[], int, int *, int, size_t,
298 		    const uint8_t[]);
299 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
300 		    struct ieee80211_channel[]);
301 static struct iwm_nvm_data *
302 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
303 			   const uint16_t *, const uint16_t *,
304 			   const uint16_t *, const uint16_t *,
305 			   const uint16_t *);
306 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
307 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
308 					       struct iwm_nvm_data *,
309 					       const uint16_t *,
310 					       const uint16_t *);
311 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312 			    const uint16_t *);
313 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
314 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315 				  const uint16_t *);
316 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
317 				   const uint16_t *);
318 static void	iwm_set_radio_cfg(const struct iwm_softc *,
319 				  struct iwm_nvm_data *, uint32_t);
320 static struct iwm_nvm_data *
321 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
322 static int	iwm_nvm_init(struct iwm_softc *);
323 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
324 				      const struct iwm_fw_desc *);
325 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
326 					     bus_addr_t, uint32_t);
327 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
328 						const struct iwm_fw_img *,
329 						int, int *);
330 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
331 					   const struct iwm_fw_img *,
332 					   int, int *);
333 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
334 					       const struct iwm_fw_img *);
335 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
336 					  const struct iwm_fw_img *);
337 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
338 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
339 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
340 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
341                                               enum iwm_ucode_type);
342 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
343 static int	iwm_mvm_config_ltr(struct iwm_softc *sc);
344 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
345 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
346 					    struct iwm_rx_phy_info *);
347 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
348                                       struct iwm_rx_packet *);
349 static int	iwm_get_noise(struct iwm_softc *,
350 		    const struct iwm_mvm_statistics_rx_non_phy *);
351 static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
352 		    struct iwm_rx_packet *);
353 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
354 				    uint32_t, boolean_t);
355 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
356                                          struct iwm_rx_packet *,
357 				         struct iwm_node *);
358 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
359 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
360 #if 0
361 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
362                                  uint16_t);
363 #endif
364 static uint8_t	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
365 			struct mbuf *, struct iwm_tx_cmd *);
366 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
367                        struct ieee80211_node *, int);
368 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
369 			     const struct ieee80211_bpf_params *);
370 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
371 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
372 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
373 static struct ieee80211_node *
374 		iwm_node_alloc(struct ieee80211vap *,
375 		               const uint8_t[IEEE80211_ADDR_LEN]);
376 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
377 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
378 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
379 static int	iwm_media_change(struct ifnet *);
380 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
381 static void	iwm_endscan_cb(void *, int);
382 static int	iwm_send_bt_init_conf(struct iwm_softc *);
383 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
384 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
385 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
386 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
387 static int	iwm_init_hw(struct iwm_softc *);
388 static void	iwm_init(struct iwm_softc *);
389 static void	iwm_start(struct iwm_softc *);
390 static void	iwm_stop(struct iwm_softc *);
391 static void	iwm_watchdog(void *);
392 static void	iwm_parent(struct ieee80211com *);
393 #ifdef IWM_DEBUG
394 static const char *
395 		iwm_desc_lookup(uint32_t);
396 static void	iwm_nic_error(struct iwm_softc *);
397 static void	iwm_nic_umac_error(struct iwm_softc *);
398 #endif
399 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
400 static void	iwm_notif_intr(struct iwm_softc *);
401 static void	iwm_intr(void *);
402 static int	iwm_attach(device_t);
403 static int	iwm_is_valid_ether_addr(uint8_t *);
404 static void	iwm_preinit(void *);
405 static int	iwm_detach_local(struct iwm_softc *sc, int);
406 static void	iwm_init_task(void *);
407 static void	iwm_radiotap_attach(struct iwm_softc *);
408 static struct ieee80211vap *
409 		iwm_vap_create(struct ieee80211com *,
410 		               const char [IFNAMSIZ], int,
411 		               enum ieee80211_opmode, int,
412 		               const uint8_t [IEEE80211_ADDR_LEN],
413 		               const uint8_t [IEEE80211_ADDR_LEN]);
414 static void	iwm_vap_delete(struct ieee80211vap *);
415 static void	iwm_xmit_queue_drain(struct iwm_softc *);
416 static void	iwm_scan_start(struct ieee80211com *);
417 static void	iwm_scan_end(struct ieee80211com *);
418 static void	iwm_update_mcast(struct ieee80211com *);
419 static void	iwm_set_channel(struct ieee80211com *);
420 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
421 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
422 static int	iwm_detach(device_t);
423 
424 #if defined(__DragonFly__)
425 static int	iwm_msi_enable = 1;
426 
427 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
428 #endif
429 
430 static int	iwm_lar_disable = 0;
431 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
432 
433 /*
434  * Firmware parser.
435  */
436 
437 static int
438 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
439 {
440 	const struct iwm_fw_cscheme_list *l = (const void *)data;
441 
442 	if (dlen < sizeof(*l) ||
443 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
444 		return EINVAL;
445 
446 	/* we don't actually store anything for now, always use s/w crypto */
447 
448 	return 0;
449 }
450 
451 static int
452 iwm_firmware_store_section(struct iwm_softc *sc,
453     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
454 {
455 	struct iwm_fw_img *fws;
456 	struct iwm_fw_desc *fwone;
457 
458 	if (type >= IWM_UCODE_TYPE_MAX)
459 		return EINVAL;
460 	if (dlen < sizeof(uint32_t))
461 		return EINVAL;
462 
463 	fws = &sc->sc_fw.img[type];
464 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
465 		return EINVAL;
466 
467 	fwone = &fws->sec[fws->fw_count];
468 
469 	/* first 32bit are device load offset */
470 	memcpy(&fwone->offset, data, sizeof(uint32_t));
471 
472 	/* rest is data */
473 	fwone->data = data + sizeof(uint32_t);
474 	fwone->len = dlen - sizeof(uint32_t);
475 
476 	fws->fw_count++;
477 
478 	return 0;
479 }
480 
481 #define IWM_DEFAULT_SCAN_CHANNELS 40
482 
483 struct iwm_tlv_calib_data {
484 	uint32_t ucode_type;
485 	struct iwm_tlv_calib_ctrl calib;
486 } __packed;
487 
488 static int
489 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
490 {
491 	const struct iwm_tlv_calib_data *def_calib = data;
492 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
493 
494 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
495 		device_printf(sc->sc_dev,
496 		    "Wrong ucode_type %u for default "
497 		    "calibration.\n", ucode_type);
498 		return EINVAL;
499 	}
500 
501 	sc->sc_default_calib[ucode_type].flow_trigger =
502 	    def_calib->calib.flow_trigger;
503 	sc->sc_default_calib[ucode_type].event_trigger =
504 	    def_calib->calib.event_trigger;
505 
506 	return 0;
507 }
508 
509 static int
510 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
511 			struct iwm_ucode_capabilities *capa)
512 {
513 	const struct iwm_ucode_api *ucode_api = (const void *)data;
514 	uint32_t api_index = le32toh(ucode_api->api_index);
515 	uint32_t api_flags = le32toh(ucode_api->api_flags);
516 	int i;
517 
518 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
519 		device_printf(sc->sc_dev,
520 		    "api flags index %d larger than supported by driver\n",
521 		    api_index);
522 		/* don't return an error so we can load FW that has more bits */
523 		return 0;
524 	}
525 
526 	for (i = 0; i < 32; i++) {
527 		if (api_flags & (1U << i))
528 			setbit(capa->enabled_api, i + 32 * api_index);
529 	}
530 
531 	return 0;
532 }
533 
534 static int
535 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
536 			   struct iwm_ucode_capabilities *capa)
537 {
538 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
539 	uint32_t api_index = le32toh(ucode_capa->api_index);
540 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
541 	int i;
542 
543 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
544 		device_printf(sc->sc_dev,
545 		    "capa flags index %d larger than supported by driver\n",
546 		    api_index);
547 		/* don't return an error so we can load FW that has more bits */
548 		return 0;
549 	}
550 
551 	for (i = 0; i < 32; i++) {
552 		if (api_flags & (1U << i))
553 			setbit(capa->enabled_capa, i + 32 * api_index);
554 	}
555 
556 	return 0;
557 }
558 
559 static void
560 iwm_fw_info_free(struct iwm_fw_info *fw)
561 {
562 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
563 	fw->fw_fp = NULL;
564 	memset(fw->img, 0, sizeof(fw->img));
565 }
566 
567 static int
568 iwm_read_firmware(struct iwm_softc *sc)
569 {
570 	struct iwm_fw_info *fw = &sc->sc_fw;
571 	const struct iwm_tlv_ucode_header *uhdr;
572 	const struct iwm_ucode_tlv *tlv;
573 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
574 	enum iwm_ucode_tlv_type tlv_type;
575 	const struct firmware *fwp;
576 	const uint8_t *data;
577 	uint32_t tlv_len;
578 	uint32_t usniffer_img;
579 	const uint8_t *tlv_data;
580 	uint32_t paging_mem_size;
581 	int num_of_cpus;
582 	int error = 0;
583 	size_t len;
584 
585 	/*
586 	 * Load firmware into driver memory.
587 	 * fw_fp will be set.
588 	 */
589 	fwp = firmware_get(sc->cfg->fw_name);
590 	if (fwp == NULL) {
591 		device_printf(sc->sc_dev,
592 		    "could not read firmware %s (error %d)\n",
593 		    sc->cfg->fw_name, error);
594 		goto out;
595 	}
596 	fw->fw_fp = fwp;
597 
598 	/* (Re-)Initialize default values. */
599 	capa->flags = 0;
600 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
601 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
602 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
603 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
604 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
605 
606 	/*
607 	 * Parse firmware contents
608 	 */
609 
610 	uhdr = (const void *)fw->fw_fp->data;
611 	if (*(const uint32_t *)fw->fw_fp->data != 0
612 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
613 		device_printf(sc->sc_dev, "invalid firmware %s\n",
614 		    sc->cfg->fw_name);
615 		error = EINVAL;
616 		goto out;
617 	}
618 
619 	ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
620 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
621 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
622 	    IWM_UCODE_API(le32toh(uhdr->ver)));
623 	data = uhdr->data;
624 	len = fw->fw_fp->datasize - sizeof(*uhdr);
625 
626 	while (len >= sizeof(*tlv)) {
627 		len -= sizeof(*tlv);
628 		tlv = (const void *)data;
629 
630 		tlv_len = le32toh(tlv->length);
631 		tlv_type = le32toh(tlv->type);
632 		tlv_data = tlv->data;
633 
634 		if (len < tlv_len) {
635 			device_printf(sc->sc_dev,
636 			    "firmware too short: %zu bytes\n",
637 			    len);
638 			error = EINVAL;
639 			goto parse_out;
640 		}
641 		len -= roundup2(tlv_len, 4);
642 		data += sizeof(tlv) + roundup2(tlv_len, 4);
643 
644 		switch ((int)tlv_type) {
645 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
646 			if (tlv_len != sizeof(uint32_t)) {
647 				device_printf(sc->sc_dev,
648 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
649 				    __func__, tlv_len);
650 				error = EINVAL;
651 				goto parse_out;
652 			}
653 			capa->max_probe_length =
654 			    le32_to_cpup((const uint32_t *)tlv_data);
655 			/* limit it to something sensible */
656 			if (capa->max_probe_length >
657 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
658 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
659 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
660 				    "ridiculous\n", __func__);
661 				error = EINVAL;
662 				goto parse_out;
663 			}
664 			break;
665 		case IWM_UCODE_TLV_PAN:
666 			if (tlv_len) {
667 				device_printf(sc->sc_dev,
668 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
669 				    __func__, tlv_len);
670 				error = EINVAL;
671 				goto parse_out;
672 			}
673 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
674 			break;
675 		case IWM_UCODE_TLV_FLAGS:
676 			if (tlv_len < sizeof(uint32_t)) {
677 				device_printf(sc->sc_dev,
678 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
679 				    __func__, tlv_len);
680 				error = EINVAL;
681 				goto parse_out;
682 			}
683 			if (tlv_len % sizeof(uint32_t)) {
684 				device_printf(sc->sc_dev,
685 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
686 				    __func__, tlv_len);
687 				error = EINVAL;
688 				goto parse_out;
689 			}
690 			/*
691 			 * Apparently there can be many flags, but Linux driver
692 			 * parses only the first one, and so do we.
693 			 *
694 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
695 			 * Intentional or a bug?  Observations from
696 			 * current firmware file:
697 			 *  1) TLV_PAN is parsed first
698 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
699 			 * ==> this resets TLV_PAN to itself... hnnnk
700 			 */
701 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
702 			break;
703 		case IWM_UCODE_TLV_CSCHEME:
704 			if ((error = iwm_store_cscheme(sc,
705 			    tlv_data, tlv_len)) != 0) {
706 				device_printf(sc->sc_dev,
707 				    "%s: iwm_store_cscheme(): returned %d\n",
708 				    __func__, error);
709 				goto parse_out;
710 			}
711 			break;
712 		case IWM_UCODE_TLV_NUM_OF_CPU:
713 			if (tlv_len != sizeof(uint32_t)) {
714 				device_printf(sc->sc_dev,
715 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
716 				    __func__, tlv_len);
717 				error = EINVAL;
718 				goto parse_out;
719 			}
720 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
721 			if (num_of_cpus == 2) {
722 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
723 					TRUE;
724 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
725 					TRUE;
726 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
727 					TRUE;
728 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
729 				device_printf(sc->sc_dev,
730 				    "%s: Driver supports only 1 or 2 CPUs\n",
731 				    __func__);
732 				error = EINVAL;
733 				goto parse_out;
734 			}
735 			break;
736 		case IWM_UCODE_TLV_SEC_RT:
737 			if ((error = iwm_firmware_store_section(sc,
738 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
739 				device_printf(sc->sc_dev,
740 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
741 				    __func__, error);
742 				goto parse_out;
743 			}
744 			break;
745 		case IWM_UCODE_TLV_SEC_INIT:
746 			if ((error = iwm_firmware_store_section(sc,
747 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
748 				device_printf(sc->sc_dev,
749 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
750 				    __func__, error);
751 				goto parse_out;
752 			}
753 			break;
754 		case IWM_UCODE_TLV_SEC_WOWLAN:
755 			if ((error = iwm_firmware_store_section(sc,
756 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
757 				device_printf(sc->sc_dev,
758 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
759 				    __func__, error);
760 				goto parse_out;
761 			}
762 			break;
763 		case IWM_UCODE_TLV_DEF_CALIB:
764 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
765 				device_printf(sc->sc_dev,
766 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
767 				    __func__, tlv_len,
768 				    sizeof(struct iwm_tlv_calib_data));
769 				error = EINVAL;
770 				goto parse_out;
771 			}
772 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
773 				device_printf(sc->sc_dev,
774 				    "%s: iwm_set_default_calib() failed: %d\n",
775 				    __func__, error);
776 				goto parse_out;
777 			}
778 			break;
779 		case IWM_UCODE_TLV_PHY_SKU:
780 			if (tlv_len != sizeof(uint32_t)) {
781 				error = EINVAL;
782 				device_printf(sc->sc_dev,
783 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
784 				    __func__, tlv_len);
785 				goto parse_out;
786 			}
787 			sc->sc_fw.phy_config =
788 			    le32_to_cpup((const uint32_t *)tlv_data);
789 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
790 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
791 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
792 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
793 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
794 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
795 			break;
796 
797 		case IWM_UCODE_TLV_API_CHANGES_SET: {
798 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
799 				error = EINVAL;
800 				goto parse_out;
801 			}
802 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
803 				error = EINVAL;
804 				goto parse_out;
805 			}
806 			break;
807 		}
808 
809 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
810 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
811 				error = EINVAL;
812 				goto parse_out;
813 			}
814 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
815 				error = EINVAL;
816 				goto parse_out;
817 			}
818 			break;
819 		}
820 
821 		case 48: /* undocumented TLV */
822 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
823 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
824 			/* ignore, not used by current driver */
825 			break;
826 
827 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
828 			if ((error = iwm_firmware_store_section(sc,
829 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
830 			    tlv_len)) != 0)
831 				goto parse_out;
832 			break;
833 
834 		case IWM_UCODE_TLV_PAGING:
835 			if (tlv_len != sizeof(uint32_t)) {
836 				error = EINVAL;
837 				goto parse_out;
838 			}
839 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
840 
841 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
842 			    "%s: Paging: paging enabled (size = %u bytes)\n",
843 			    __func__, paging_mem_size);
844 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
845 				device_printf(sc->sc_dev,
846 					"%s: Paging: driver supports up to %u bytes for paging image\n",
847 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
848 				error = EINVAL;
849 				goto out;
850 			}
851 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
852 				device_printf(sc->sc_dev,
853 				    "%s: Paging: image isn't multiple %u\n",
854 				    __func__, IWM_FW_PAGING_SIZE);
855 				error = EINVAL;
856 				goto out;
857 			}
858 
859 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
860 			    paging_mem_size;
861 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
862 			sc->sc_fw.img[usniffer_img].paging_mem_size =
863 			    paging_mem_size;
864 			break;
865 
866 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
867 			if (tlv_len != sizeof(uint32_t)) {
868 				error = EINVAL;
869 				goto parse_out;
870 			}
871 			capa->n_scan_channels =
872 			    le32_to_cpup((const uint32_t *)tlv_data);
873 			break;
874 
875 		case IWM_UCODE_TLV_FW_VERSION:
876 			if (tlv_len != sizeof(uint32_t) * 3) {
877 				error = EINVAL;
878 				goto parse_out;
879 			}
880 			ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
881 			    "%d.%d.%d",
882 			    le32toh(((const uint32_t *)tlv_data)[0]),
883 			    le32toh(((const uint32_t *)tlv_data)[1]),
884 			    le32toh(((const uint32_t *)tlv_data)[2]));
885 			break;
886 
887 		case IWM_UCODE_TLV_FW_MEM_SEG:
888 			break;
889 
890 		default:
891 			device_printf(sc->sc_dev,
892 			    "%s: unknown firmware section %d, abort\n",
893 			    __func__, tlv_type);
894 			error = EINVAL;
895 			goto parse_out;
896 		}
897 	}
898 
899 	KASSERT(error == 0, ("unhandled error"));
900 
901  parse_out:
902 	if (error) {
903 		device_printf(sc->sc_dev, "firmware parse error %d, "
904 		    "section type %d\n", error, tlv_type);
905 	}
906 
907  out:
908 	if (error) {
909 		if (fw->fw_fp != NULL)
910 			iwm_fw_info_free(fw);
911 	}
912 
913 	return error;
914 }
915 
916 /*
917  * DMA resource routines
918  */
919 
920 /* fwmem is used to load firmware onto the card */
921 static int
922 iwm_alloc_fwmem(struct iwm_softc *sc)
923 {
924 	/* Must be aligned on a 16-byte boundary. */
925 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
926 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
927 }
928 
929 /* tx scheduler rings.  not used? */
930 static int
931 iwm_alloc_sched(struct iwm_softc *sc)
932 {
933 	/* TX scheduler rings must be aligned on a 1KB boundary. */
934 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
935 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
936 }
937 
938 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
939 static int
940 iwm_alloc_kw(struct iwm_softc *sc)
941 {
942 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
943 }
944 
945 /* interrupt cause table */
946 static int
947 iwm_alloc_ict(struct iwm_softc *sc)
948 {
949 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
950 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
951 }
952 
953 static int
954 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
955 {
956 	bus_size_t size;
957 	int i, error;
958 
959 	ring->cur = 0;
960 
961 	/* Allocate RX descriptors (256-byte aligned). */
962 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
963 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
964 	if (error != 0) {
965 		device_printf(sc->sc_dev,
966 		    "could not allocate RX ring DMA memory\n");
967 		goto fail;
968 	}
969 	ring->desc = ring->desc_dma.vaddr;
970 
971 	/* Allocate RX status area (16-byte aligned). */
972 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
973 	    sizeof(*ring->stat), 16);
974 	if (error != 0) {
975 		device_printf(sc->sc_dev,
976 		    "could not allocate RX status DMA memory\n");
977 		goto fail;
978 	}
979 	ring->stat = ring->stat_dma.vaddr;
980 
981         /* Create RX buffer DMA tag. */
982 #if defined(__DragonFly__)
983         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
984 				   0,
985 				   BUS_SPACE_MAXADDR_32BIT,
986 				   BUS_SPACE_MAXADDR,
987 				   NULL, NULL,
988 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
989 				   BUS_DMA_NOWAIT, &ring->data_dmat);
990 #else
991         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
992             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
993             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
994 #endif
995         if (error != 0) {
996                 device_printf(sc->sc_dev,
997                     "%s: could not create RX buf DMA tag, error %d\n",
998                     __func__, error);
999                 goto fail;
1000         }
1001 
1002 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1003 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1004 	if (error != 0) {
1005 		device_printf(sc->sc_dev,
1006 		    "%s: could not create RX buf DMA map, error %d\n",
1007 		    __func__, error);
1008 		goto fail;
1009 	}
1010 	/*
1011 	 * Allocate and map RX buffers.
1012 	 */
1013 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1014 		struct iwm_rx_data *data = &ring->data[i];
1015 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1016 		if (error != 0) {
1017 			device_printf(sc->sc_dev,
1018 			    "%s: could not create RX buf DMA map, error %d\n",
1019 			    __func__, error);
1020 			goto fail;
1021 		}
1022 		data->m = NULL;
1023 
1024 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1025 			goto fail;
1026 		}
1027 	}
1028 	return 0;
1029 
1030 fail:	iwm_free_rx_ring(sc, ring);
1031 	return error;
1032 }
1033 
1034 static void
1035 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1036 {
1037 	/* Reset the ring state */
1038 	ring->cur = 0;
1039 
1040 	/*
1041 	 * The hw rx ring index in shared memory must also be cleared,
1042 	 * otherwise the discrepancy can cause reprocessing chaos.
1043 	 */
1044 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1045 }
1046 
1047 static void
1048 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1049 {
1050 	int i;
1051 
1052 	iwm_dma_contig_free(&ring->desc_dma);
1053 	iwm_dma_contig_free(&ring->stat_dma);
1054 
1055 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1056 		struct iwm_rx_data *data = &ring->data[i];
1057 
1058 		if (data->m != NULL) {
1059 			bus_dmamap_sync(ring->data_dmat, data->map,
1060 			    BUS_DMASYNC_POSTREAD);
1061 			bus_dmamap_unload(ring->data_dmat, data->map);
1062 			m_freem(data->m);
1063 			data->m = NULL;
1064 		}
1065 		if (data->map != NULL) {
1066 			bus_dmamap_destroy(ring->data_dmat, data->map);
1067 			data->map = NULL;
1068 		}
1069 	}
1070 	if (ring->spare_map != NULL) {
1071 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1072 		ring->spare_map = NULL;
1073 	}
1074 	if (ring->data_dmat != NULL) {
1075 		bus_dma_tag_destroy(ring->data_dmat);
1076 		ring->data_dmat = NULL;
1077 	}
1078 }
1079 
1080 static int
1081 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1082 {
1083 	bus_addr_t paddr;
1084 	bus_size_t size;
1085 	size_t maxsize;
1086 	int nsegments;
1087 	int i, error;
1088 
1089 	ring->qid = qid;
1090 	ring->queued = 0;
1091 	ring->cur = 0;
1092 
1093 	/* Allocate TX descriptors (256-byte aligned). */
1094 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1095 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1096 	if (error != 0) {
1097 		device_printf(sc->sc_dev,
1098 		    "could not allocate TX ring DMA memory\n");
1099 		goto fail;
1100 	}
1101 	ring->desc = ring->desc_dma.vaddr;
1102 
1103 	/*
1104 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1105 	 * to allocate commands space for other rings.
1106 	 */
1107 	if (qid > IWM_MVM_CMD_QUEUE)
1108 		return 0;
1109 
1110 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1111 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1112 	if (error != 0) {
1113 		device_printf(sc->sc_dev,
1114 		    "could not allocate TX cmd DMA memory\n");
1115 		goto fail;
1116 	}
1117 	ring->cmd = ring->cmd_dma.vaddr;
1118 
1119 	/* FW commands may require more mapped space than packets. */
1120 	if (qid == IWM_MVM_CMD_QUEUE) {
1121 		maxsize = IWM_RBUF_SIZE;
1122 		nsegments = 1;
1123 	} else {
1124 		maxsize = MCLBYTES;
1125 		nsegments = IWM_MAX_SCATTER - 2;
1126 	}
1127 
1128 #if defined(__DragonFly__)
1129 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1130 				   0,
1131 				   BUS_SPACE_MAXADDR_32BIT,
1132 				   BUS_SPACE_MAXADDR,
1133 				   NULL, NULL,
1134 				   maxsize, nsegments, maxsize,
1135 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1136 #else
1137 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1138 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1139             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1140 #endif
1141 	if (error != 0) {
1142 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1143 		goto fail;
1144 	}
1145 
1146 	paddr = ring->cmd_dma.paddr;
1147 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1148 		struct iwm_tx_data *data = &ring->data[i];
1149 
1150 		data->cmd_paddr = paddr;
1151 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1152 		    + offsetof(struct iwm_tx_cmd, scratch);
1153 		paddr += sizeof(struct iwm_device_cmd);
1154 
1155 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1156 		if (error != 0) {
1157 			device_printf(sc->sc_dev,
1158 			    "could not create TX buf DMA map\n");
1159 			goto fail;
1160 		}
1161 	}
1162 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1163 	    ("invalid physical address"));
1164 	return 0;
1165 
1166 fail:	iwm_free_tx_ring(sc, ring);
1167 	return error;
1168 }
1169 
1170 static void
1171 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1172 {
1173 	int i;
1174 
1175 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1176 		struct iwm_tx_data *data = &ring->data[i];
1177 
1178 		if (data->m != NULL) {
1179 			bus_dmamap_sync(ring->data_dmat, data->map,
1180 			    BUS_DMASYNC_POSTWRITE);
1181 			bus_dmamap_unload(ring->data_dmat, data->map);
1182 			m_freem(data->m);
1183 			data->m = NULL;
1184 		}
1185 	}
1186 	/* Clear TX descriptors. */
1187 	memset(ring->desc, 0, ring->desc_dma.size);
1188 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1189 	    BUS_DMASYNC_PREWRITE);
1190 	sc->qfullmsk &= ~(1 << ring->qid);
1191 	ring->queued = 0;
1192 	ring->cur = 0;
1193 
1194 	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1195 		iwm_pcie_clear_cmd_in_flight(sc);
1196 }
1197 
1198 static void
1199 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1200 {
1201 	int i;
1202 
1203 	iwm_dma_contig_free(&ring->desc_dma);
1204 	iwm_dma_contig_free(&ring->cmd_dma);
1205 
1206 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1207 		struct iwm_tx_data *data = &ring->data[i];
1208 
1209 		if (data->m != NULL) {
1210 			bus_dmamap_sync(ring->data_dmat, data->map,
1211 			    BUS_DMASYNC_POSTWRITE);
1212 			bus_dmamap_unload(ring->data_dmat, data->map);
1213 			m_freem(data->m);
1214 			data->m = NULL;
1215 		}
1216 		if (data->map != NULL) {
1217 			bus_dmamap_destroy(ring->data_dmat, data->map);
1218 			data->map = NULL;
1219 		}
1220 	}
1221 	if (ring->data_dmat != NULL) {
1222 		bus_dma_tag_destroy(ring->data_dmat);
1223 		ring->data_dmat = NULL;
1224 	}
1225 }
1226 
1227 /*
1228  * High-level hardware frobbing routines
1229  */
1230 
1231 static void
1232 iwm_enable_interrupts(struct iwm_softc *sc)
1233 {
1234 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1235 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1236 }
1237 
1238 static void
1239 iwm_restore_interrupts(struct iwm_softc *sc)
1240 {
1241 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1242 }
1243 
1244 static void
1245 iwm_disable_interrupts(struct iwm_softc *sc)
1246 {
1247 	/* disable interrupts */
1248 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1249 
1250 	/* acknowledge all interrupts */
1251 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1252 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1253 }
1254 
1255 static void
1256 iwm_ict_reset(struct iwm_softc *sc)
1257 {
1258 	iwm_disable_interrupts(sc);
1259 
1260 	/* Reset ICT table. */
1261 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1262 	sc->ict_cur = 0;
1263 
1264 	/* Set physical address of ICT table (4KB aligned). */
1265 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1266 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1267 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1268 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1269 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1270 
1271 	/* Switch to ICT interrupt mode in driver. */
1272 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1273 
1274 	/* Re-enable interrupts. */
1275 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1276 	iwm_enable_interrupts(sc);
1277 }
1278 
1279 /*
1280  * Since this .. hard-resets things, it's time to actually
1281  * mark the first vap (if any) as having no mac context.
1282  * It's annoying, but since the driver is potentially being
1283  * stop/start'ed whilst active (thanks openbsd port!) we
1284  * have to correctly track this.
1285  */
1286 static void
1287 iwm_stop_device(struct iwm_softc *sc)
1288 {
1289 	struct ieee80211com *ic = &sc->sc_ic;
1290 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1291 	int chnl, qid;
1292 	uint32_t mask = 0;
1293 
1294 	/* tell the device to stop sending interrupts */
1295 	iwm_disable_interrupts(sc);
1296 
1297 	/*
1298 	 * FreeBSD-local: mark the first vap as not-uploaded,
1299 	 * so the next transition through auth/assoc
1300 	 * will correctly populate the MAC context.
1301 	 */
1302 	if (vap) {
1303 		struct iwm_vap *iv = IWM_VAP(vap);
1304 		iv->phy_ctxt = NULL;
1305 		iv->is_uploaded = 0;
1306 	}
1307 
1308 	/* device going down, Stop using ICT table */
1309 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1310 
1311 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1312 
1313 	if (iwm_nic_lock(sc)) {
1314 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1315 
1316 		/* Stop each Tx DMA channel */
1317 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1318 			IWM_WRITE(sc,
1319 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1320 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1321 		}
1322 
1323 		/* Wait for DMA channels to be idle */
1324 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1325 		    5000)) {
1326 			device_printf(sc->sc_dev,
1327 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1328 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1329 		}
1330 		iwm_nic_unlock(sc);
1331 	}
1332 	iwm_pcie_rx_stop(sc);
1333 
1334 	/* Stop RX ring. */
1335 	iwm_reset_rx_ring(sc, &sc->rxq);
1336 
1337 	/* Reset all TX rings. */
1338 	for (qid = 0; qid < nitems(sc->txq); qid++)
1339 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1340 
1341 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1342 		/* Power-down device's busmaster DMA clocks */
1343 		if (iwm_nic_lock(sc)) {
1344 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1345 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1346 			iwm_nic_unlock(sc);
1347 		}
1348 		DELAY(5);
1349 	}
1350 
1351 	/* Make sure (redundant) we've released our request to stay awake */
1352 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1353 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1354 
1355 	/* Stop the device, and put it in low power state */
1356 	iwm_apm_stop(sc);
1357 
1358 	/* stop and reset the on-board processor */
1359 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1360 	DELAY(1000);
1361 
1362 	/*
1363 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1364 	 * This is a bug in certain verions of the hardware.
1365 	 * Certain devices also keep sending HW RF kill interrupt all
1366 	 * the time, unless the interrupt is ACKed even if the interrupt
1367 	 * should be masked. Re-ACK all the interrupts here.
1368 	 */
1369 	iwm_disable_interrupts(sc);
1370 
1371 	/*
1372 	 * Even if we stop the HW, we still want the RF kill
1373 	 * interrupt
1374 	 */
1375 	iwm_enable_rfkill_int(sc);
1376 	iwm_check_rfkill(sc);
1377 }
1378 
1379 static void
1380 iwm_mvm_nic_config(struct iwm_softc *sc)
1381 {
1382 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1383 	uint32_t reg_val = 0;
1384 	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1385 
1386 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1387 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1388 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1389 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1390 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1391 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1392 
1393 	/* SKU control */
1394 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1395 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1396 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1397 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1398 
1399 	/* radio configuration */
1400 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1401 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1402 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1403 
1404 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1405 
1406 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1407 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1408 	    radio_cfg_step, radio_cfg_dash);
1409 
1410 	/*
1411 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1412 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1413 	 * to lose ownership and not being able to obtain it back.
1414 	 */
1415 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1416 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1417 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1418 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1419 	}
1420 }
1421 
1422 static int
1423 iwm_nic_rx_init(struct iwm_softc *sc)
1424 {
1425 	/*
1426 	 * Initialize RX ring.  This is from the iwn driver.
1427 	 */
1428 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1429 
1430 	/* Stop Rx DMA */
1431 	iwm_pcie_rx_stop(sc);
1432 
1433 	if (!iwm_nic_lock(sc))
1434 		return EBUSY;
1435 
1436 	/* reset and flush pointers */
1437 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1438 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1439 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1440 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1441 
1442 	/* Set physical address of RX ring (256-byte aligned). */
1443 	IWM_WRITE(sc,
1444 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1445 
1446 	/* Set physical address of RX status (16-byte aligned). */
1447 	IWM_WRITE(sc,
1448 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1449 
1450 #if defined(__DragonFly__)
1451 	/* Force serialization (probably not needed but don't trust the HW) */
1452 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1453 #endif
1454 
1455 	/* Enable Rx DMA
1456 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1457 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1458 	 *      the credit mechanism in 5000 HW RX FIFO
1459 	 * Direct rx interrupts to hosts
1460 	 * Rx buffer size 4 or 8k or 12k
1461 	 * RB timeout 0x10
1462 	 * 256 RBDs
1463 	 */
1464 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1465 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1466 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1467 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1468 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1469 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1470 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1471 
1472 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1473 
1474 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1475 	if (sc->cfg->host_interrupt_operation_mode)
1476 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1477 
1478 	/*
1479 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1480 	 *
1481 	 * This value should initially be 0 (before preparing any
1482 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1483 	 */
1484 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1485 
1486 	iwm_nic_unlock(sc);
1487 
1488 	return 0;
1489 }
1490 
1491 static int
1492 iwm_nic_tx_init(struct iwm_softc *sc)
1493 {
1494 	int qid;
1495 
1496 	if (!iwm_nic_lock(sc))
1497 		return EBUSY;
1498 
1499 	/* Deactivate TX scheduler. */
1500 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1501 
1502 	/* Set physical address of "keep warm" page (16-byte aligned). */
1503 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1504 
1505 	/* Initialize TX rings. */
1506 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1507 		struct iwm_tx_ring *txq = &sc->txq[qid];
1508 
1509 		/* Set physical address of TX ring (256-byte aligned). */
1510 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1511 		    txq->desc_dma.paddr >> 8);
1512 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1513 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1514 		    __func__,
1515 		    qid, txq->desc,
1516 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1517 	}
1518 
1519 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1520 
1521 	iwm_nic_unlock(sc);
1522 
1523 	return 0;
1524 }
1525 
1526 static int
1527 iwm_nic_init(struct iwm_softc *sc)
1528 {
1529 	int error;
1530 
1531 	iwm_apm_init(sc);
1532 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1533 		iwm_set_pwr(sc);
1534 
1535 	iwm_mvm_nic_config(sc);
1536 
1537 	if ((error = iwm_nic_rx_init(sc)) != 0)
1538 		return error;
1539 
1540 	/*
1541 	 * Ditto for TX, from iwn
1542 	 */
1543 	if ((error = iwm_nic_tx_init(sc)) != 0)
1544 		return error;
1545 
1546 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1547 	    "%s: shadow registers enabled\n", __func__);
1548 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1549 
1550 	return 0;
1551 }
1552 
1553 int
1554 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1555 {
1556 	if (!iwm_nic_lock(sc)) {
1557 		device_printf(sc->sc_dev,
1558 		    "%s: cannot enable txq %d\n",
1559 		    __func__,
1560 		    qid);
1561 		return EBUSY;
1562 	}
1563 
1564 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1565 
1566 	if (qid == IWM_MVM_CMD_QUEUE) {
1567 		/* unactivate before configuration */
1568 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1569 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1570 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1571 
1572 		iwm_nic_unlock(sc);
1573 
1574 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1575 
1576 		if (!iwm_nic_lock(sc)) {
1577 			device_printf(sc->sc_dev,
1578 			    "%s: cannot enable txq %d\n", __func__, qid);
1579 			return EBUSY;
1580 		}
1581 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1582 		iwm_nic_unlock(sc);
1583 
1584 		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1585 		/* Set scheduler window size and frame limit. */
1586 		iwm_write_mem32(sc,
1587 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1588 		    sizeof(uint32_t),
1589 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1590 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1591 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1592 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1593 
1594 		if (!iwm_nic_lock(sc)) {
1595 			device_printf(sc->sc_dev,
1596 			    "%s: cannot enable txq %d\n", __func__, qid);
1597 			return EBUSY;
1598 		}
1599 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1600 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1601 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1602 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1603 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1604 	} else {
1605 		struct iwm_scd_txq_cfg_cmd cmd;
1606 		int error;
1607 
1608 		iwm_nic_unlock(sc);
1609 
1610 		memset(&cmd, 0, sizeof(cmd));
1611 		cmd.scd_queue = qid;
1612 		cmd.enable = 1;
1613 		cmd.sta_id = sta_id;
1614 		cmd.tx_fifo = fifo;
1615 		cmd.aggregate = 0;
1616 		cmd.window = IWM_FRAME_LIMIT;
1617 
1618 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1619 		    sizeof(cmd), &cmd);
1620 		if (error) {
1621 			device_printf(sc->sc_dev,
1622 			    "cannot enable txq %d\n", qid);
1623 			return error;
1624 		}
1625 
1626 		if (!iwm_nic_lock(sc))
1627 			return EBUSY;
1628 	}
1629 
1630 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1631 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1632 
1633 	iwm_nic_unlock(sc);
1634 
1635 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1636 	    __func__, qid, fifo);
1637 
1638 	return 0;
1639 }
1640 
1641 static int
1642 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1643 {
1644 	int error, chnl;
1645 
1646 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1647 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1648 
1649 	if (!iwm_nic_lock(sc))
1650 		return EBUSY;
1651 
1652 	iwm_ict_reset(sc);
1653 
1654 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1655 	if (scd_base_addr != 0 &&
1656 	    scd_base_addr != sc->scd_base_addr) {
1657 		device_printf(sc->sc_dev,
1658 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1659 		    __func__, sc->scd_base_addr, scd_base_addr);
1660 	}
1661 
1662 	iwm_nic_unlock(sc);
1663 
1664 	/* reset context data, TX status and translation data */
1665 	error = iwm_write_mem(sc,
1666 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1667 	    NULL, clear_dwords);
1668 	if (error)
1669 		return EBUSY;
1670 
1671 	if (!iwm_nic_lock(sc))
1672 		return EBUSY;
1673 
1674 	/* Set physical address of TX scheduler rings (1KB aligned). */
1675 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1676 
1677 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1678 
1679 	iwm_nic_unlock(sc);
1680 
1681 	/* enable command channel */
1682 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1683 	if (error)
1684 		return error;
1685 
1686 	if (!iwm_nic_lock(sc))
1687 		return EBUSY;
1688 
1689 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1690 
1691 	/* Enable DMA channels. */
1692 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1693 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1694 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1695 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1696 	}
1697 
1698 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1699 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1700 
1701 	iwm_nic_unlock(sc);
1702 
1703 	/* Enable L1-Active */
1704 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1705 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1706 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1707 	}
1708 
1709 	return error;
1710 }
1711 
1712 /*
1713  * NVM read access and content parsing.  We do not support
1714  * external NVM or writing NVM.
1715  * iwlwifi/mvm/nvm.c
1716  */
1717 
1718 /* Default NVM size to read */
1719 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1720 
1721 #define IWM_NVM_WRITE_OPCODE 1
1722 #define IWM_NVM_READ_OPCODE 0
1723 
1724 /* load nvm chunk response */
1725 enum {
1726 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1727 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1728 };
1729 
1730 static int
1731 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1732 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1733 {
1734 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1735 		.offset = htole16(offset),
1736 		.length = htole16(length),
1737 		.type = htole16(section),
1738 		.op_code = IWM_NVM_READ_OPCODE,
1739 	};
1740 	struct iwm_nvm_access_resp *nvm_resp;
1741 	struct iwm_rx_packet *pkt;
1742 	struct iwm_host_cmd cmd = {
1743 		.id = IWM_NVM_ACCESS_CMD,
1744 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1745 		.data = { &nvm_access_cmd, },
1746 	};
1747 	int ret, bytes_read, offset_read;
1748 	uint8_t *resp_data;
1749 
1750 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1751 
1752 	ret = iwm_send_cmd(sc, &cmd);
1753 	if (ret) {
1754 		device_printf(sc->sc_dev,
1755 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1756 		return ret;
1757 	}
1758 
1759 	pkt = cmd.resp_pkt;
1760 
1761 	/* Extract NVM response */
1762 	nvm_resp = (void *)pkt->data;
1763 	ret = le16toh(nvm_resp->status);
1764 	bytes_read = le16toh(nvm_resp->length);
1765 	offset_read = le16toh(nvm_resp->offset);
1766 	resp_data = nvm_resp->data;
1767 	if (ret) {
1768 		if ((offset != 0) &&
1769 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1770 			/*
1771 			 * meaning of NOT_VALID_ADDRESS:
1772 			 * driver try to read chunk from address that is
1773 			 * multiple of 2K and got an error since addr is empty.
1774 			 * meaning of (offset != 0): driver already
1775 			 * read valid data from another chunk so this case
1776 			 * is not an error.
1777 			 */
1778 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1779 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1780 				    offset);
1781 			*len = 0;
1782 			ret = 0;
1783 		} else {
1784 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1785 				    "NVM access command failed with status %d\n", ret);
1786 			ret = EIO;
1787 		}
1788 		goto exit;
1789 	}
1790 
1791 	if (offset_read != offset) {
1792 		device_printf(sc->sc_dev,
1793 		    "NVM ACCESS response with invalid offset %d\n",
1794 		    offset_read);
1795 		ret = EINVAL;
1796 		goto exit;
1797 	}
1798 
1799 	if (bytes_read > length) {
1800 		device_printf(sc->sc_dev,
1801 		    "NVM ACCESS response with too much data "
1802 		    "(%d bytes requested, %d bytes received)\n",
1803 		    length, bytes_read);
1804 		ret = EINVAL;
1805 		goto exit;
1806 	}
1807 
1808 	/* Write data to NVM */
1809 	memcpy(data + offset, resp_data, bytes_read);
1810 	*len = bytes_read;
1811 
1812  exit:
1813 	iwm_free_resp(sc, &cmd);
1814 	return ret;
1815 }
1816 
1817 /*
1818  * Reads an NVM section completely.
1819  * NICs prior to 7000 family don't have a real NVM, but just read
1820  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1821  * by uCode, we need to manually check in this case that we don't
1822  * overflow and try to read more than the EEPROM size.
1823  * For 7000 family NICs, we supply the maximal size we can read, and
1824  * the uCode fills the response with as much data as we can,
1825  * without overflowing, so no check is needed.
1826  */
1827 static int
1828 iwm_nvm_read_section(struct iwm_softc *sc,
1829 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1830 {
1831 	uint16_t seglen, length, offset = 0;
1832 	int ret;
1833 
1834 	/* Set nvm section read length */
1835 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1836 
1837 	seglen = length;
1838 
1839 	/* Read the NVM until exhausted (reading less than requested) */
1840 	while (seglen == length) {
1841 		/* Check no memory assumptions fail and cause an overflow */
1842 		if ((size_read + offset + length) >
1843 		    sc->cfg->eeprom_size) {
1844 			device_printf(sc->sc_dev,
1845 			    "EEPROM size is too small for NVM\n");
1846 			return ENOBUFS;
1847 		}
1848 
1849 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1850 		if (ret) {
1851 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1852 				    "Cannot read NVM from section %d offset %d, length %d\n",
1853 				    section, offset, length);
1854 			return ret;
1855 		}
1856 		offset += seglen;
1857 	}
1858 
1859 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1860 		    "NVM section %d read completed\n", section);
1861 	*len = offset;
1862 	return 0;
1863 }
1864 
1865 /* NVM offsets (in words) definitions */
1866 enum iwm_nvm_offsets {
1867 	/* NVM HW-Section offset (in words) definitions */
1868 	IWM_HW_ADDR = 0x15,
1869 
1870 /* NVM SW-Section offset (in words) definitions */
1871 	IWM_NVM_SW_SECTION = 0x1C0,
1872 	IWM_NVM_VERSION = 0,
1873 	IWM_RADIO_CFG = 1,
1874 	IWM_SKU = 2,
1875 	IWM_N_HW_ADDRS = 3,
1876 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1877 
1878 /* NVM calibration section offset (in words) definitions */
1879 	IWM_NVM_CALIB_SECTION = 0x2B8,
1880 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1881 };
1882 
1883 enum iwm_8000_nvm_offsets {
1884 	/* NVM HW-Section offset (in words) definitions */
1885 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1886 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1887 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1888 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1889 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1890 
1891 	/* NVM SW-Section offset (in words) definitions */
1892 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1893 	IWM_NVM_VERSION_8000 = 0,
1894 	IWM_RADIO_CFG_8000 = 0,
1895 	IWM_SKU_8000 = 2,
1896 	IWM_N_HW_ADDRS_8000 = 3,
1897 
1898 	/* NVM REGULATORY -Section offset (in words) definitions */
1899 	IWM_NVM_CHANNELS_8000 = 0,
1900 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1901 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1902 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1903 
1904 	/* NVM calibration section offset (in words) definitions */
1905 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1906 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1907 };
1908 
1909 /* SKU Capabilities (actual values from NVM definition) */
1910 enum nvm_sku_bits {
1911 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1912 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1913 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1914 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1915 };
1916 
1917 /* radio config bits (actual values from NVM definition) */
1918 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1919 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1920 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1921 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1922 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1923 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1924 
1925 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1926 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1927 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1928 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1929 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1930 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1931 
1932 /**
1933  * enum iwm_nvm_channel_flags - channel flags in NVM
1934  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1935  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1936  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1937  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1938  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1939  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1940  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1941  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1942  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1943  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1944  */
1945 enum iwm_nvm_channel_flags {
1946 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1947 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1948 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1949 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1950 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1951 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1952 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1953 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1954 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1955 };
1956 
1957 /*
1958  * Translate EEPROM flags to net80211.
1959  */
1960 static uint32_t
1961 iwm_eeprom_channel_flags(uint16_t ch_flags)
1962 {
1963 	uint32_t nflags;
1964 
1965 	nflags = 0;
1966 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1967 		nflags |= IEEE80211_CHAN_PASSIVE;
1968 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1969 		nflags |= IEEE80211_CHAN_NOADHOC;
1970 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1971 		nflags |= IEEE80211_CHAN_DFS;
1972 		/* Just in case. */
1973 		nflags |= IEEE80211_CHAN_NOADHOC;
1974 	}
1975 
1976 	return (nflags);
1977 }
1978 
1979 static void
1980 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1981     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1982     const uint8_t bands[])
1983 {
1984 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1985 	uint32_t nflags;
1986 	uint16_t ch_flags;
1987 	uint8_t ieee;
1988 	int error;
1989 
1990 	for (; ch_idx < ch_num; ch_idx++) {
1991 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1992 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1993 			ieee = iwm_nvm_channels[ch_idx];
1994 		else
1995 			ieee = iwm_nvm_channels_8000[ch_idx];
1996 
1997 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1998 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1999 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2000 			    ieee, ch_flags,
2001 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2002 			    "5.2" : "2.4");
2003 			continue;
2004 		}
2005 
2006 		nflags = iwm_eeprom_channel_flags(ch_flags);
2007 		error = ieee80211_add_channel(chans, maxchans, nchans,
2008 		    ieee, 0, 0, nflags, bands);
2009 		if (error != 0)
2010 			break;
2011 
2012 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2013 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2014 		    ieee, ch_flags,
2015 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2016 		    "5.2" : "2.4");
2017 	}
2018 }
2019 
2020 static void
2021 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2022     struct ieee80211_channel chans[])
2023 {
2024 	struct iwm_softc *sc = ic->ic_softc;
2025 	struct iwm_nvm_data *data = sc->nvm_data;
2026 	uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2027 	size_t ch_num;
2028 
2029 	memset(bands, 0, sizeof(bands));
2030 	/* 1-13: 11b/g channels. */
2031 	setbit(bands, IEEE80211_MODE_11B);
2032 	setbit(bands, IEEE80211_MODE_11G);
2033 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2034 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2035 
2036 	/* 14: 11b channel only. */
2037 	clrbit(bands, IEEE80211_MODE_11G);
2038 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2039 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2040 
2041 	if (data->sku_cap_band_52GHz_enable) {
2042 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2043 			ch_num = nitems(iwm_nvm_channels);
2044 		else
2045 			ch_num = nitems(iwm_nvm_channels_8000);
2046 		memset(bands, 0, sizeof(bands));
2047 		setbit(bands, IEEE80211_MODE_11A);
2048 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2049 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2050 	}
2051 }
2052 
2053 static void
2054 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2055 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2056 {
2057 	const uint8_t *hw_addr;
2058 
2059 	if (mac_override) {
2060 		static const uint8_t reserved_mac[] = {
2061 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2062 		};
2063 
2064 		hw_addr = (const uint8_t *)(mac_override +
2065 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2066 
2067 		/*
2068 		 * Store the MAC address from MAO section.
2069 		 * No byte swapping is required in MAO section
2070 		 */
2071 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2072 
2073 		/*
2074 		 * Force the use of the OTP MAC address in case of reserved MAC
2075 		 * address in the NVM, or if address is given but invalid.
2076 		 */
2077 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2078 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2079 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2080 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2081 			return;
2082 
2083 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2084 		    "%s: mac address from nvm override section invalid\n",
2085 		    __func__);
2086 	}
2087 
2088 	if (nvm_hw) {
2089 		/* read the mac address from WFMP registers */
2090 		uint32_t mac_addr0 =
2091 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2092 		uint32_t mac_addr1 =
2093 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2094 
2095 		hw_addr = (const uint8_t *)&mac_addr0;
2096 		data->hw_addr[0] = hw_addr[3];
2097 		data->hw_addr[1] = hw_addr[2];
2098 		data->hw_addr[2] = hw_addr[1];
2099 		data->hw_addr[3] = hw_addr[0];
2100 
2101 		hw_addr = (const uint8_t *)&mac_addr1;
2102 		data->hw_addr[4] = hw_addr[1];
2103 		data->hw_addr[5] = hw_addr[0];
2104 
2105 		return;
2106 	}
2107 
2108 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2109 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2110 }
2111 
2112 static int
2113 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2114 	    const uint16_t *phy_sku)
2115 {
2116 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2117 		return le16_to_cpup(nvm_sw + IWM_SKU);
2118 
2119 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2120 }
2121 
2122 static int
2123 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2124 {
2125 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2126 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2127 	else
2128 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2129 						IWM_NVM_VERSION_8000));
2130 }
2131 
2132 static int
2133 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2134 		  const uint16_t *phy_sku)
2135 {
2136         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2137                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2138 
2139         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2140 }
2141 
2142 static int
2143 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2144 {
2145 	int n_hw_addr;
2146 
2147 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2148 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2149 
2150 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2151 
2152         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2153 }
2154 
2155 static void
2156 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2157 		  uint32_t radio_cfg)
2158 {
2159 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2160 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2161 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2162 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2163 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2164 		return;
2165 	}
2166 
2167 	/* set the radio configuration for family 8000 */
2168 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2169 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2170 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2171 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2172 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2173 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2174 }
2175 
2176 static int
2177 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2178 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2179 {
2180 #ifdef notyet /* for FAMILY 9000 */
2181 	if (cfg->mac_addr_from_csr) {
2182 		iwm_set_hw_address_from_csr(sc, data);
2183         } else
2184 #endif
2185 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2186 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2187 
2188 		/* The byte order is little endian 16 bit, meaning 214365 */
2189 		data->hw_addr[0] = hw_addr[1];
2190 		data->hw_addr[1] = hw_addr[0];
2191 		data->hw_addr[2] = hw_addr[3];
2192 		data->hw_addr[3] = hw_addr[2];
2193 		data->hw_addr[4] = hw_addr[5];
2194 		data->hw_addr[5] = hw_addr[4];
2195 	} else {
2196 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2197 	}
2198 
2199 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2200 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2201 		return EINVAL;
2202 	}
2203 
2204 	return 0;
2205 }
2206 
2207 static struct iwm_nvm_data *
2208 iwm_parse_nvm_data(struct iwm_softc *sc,
2209 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2210 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2211 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2212 {
2213 	struct iwm_nvm_data *data;
2214 	uint32_t sku, radio_cfg;
2215 	uint16_t lar_config;
2216 
2217 	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2218 		data = kmalloc(sizeof(*data) +
2219 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2220 		    M_DEVBUF, M_WAITOK | M_ZERO);
2221 	} else {
2222 		data = kmalloc(sizeof(*data) +
2223 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2224 		    M_DEVBUF, M_WAITOK | M_ZERO);
2225 	}
2226 	if (!data)
2227 		return NULL;
2228 
2229 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2230 
2231 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2232 	iwm_set_radio_cfg(sc, data, radio_cfg);
2233 
2234 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2235 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2236 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2237 	data->sku_cap_11n_enable = 0;
2238 
2239 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2240 
2241 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2242 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2243 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2244 				       IWM_NVM_LAR_OFFSET_8000;
2245 
2246 		lar_config = le16_to_cpup(regulatory + lar_offset);
2247 		data->lar_enabled = !!(lar_config &
2248 				       IWM_NVM_LAR_ENABLED_8000);
2249 	}
2250 
2251 	/* If no valid mac address was found - bail out */
2252 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2253 		kfree(data, M_DEVBUF);
2254 		return NULL;
2255 	}
2256 
2257 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2258 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2259 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2260 	} else {
2261 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2262 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2263 	}
2264 
2265 	return data;
2266 }
2267 
2268 static void
2269 iwm_free_nvm_data(struct iwm_nvm_data *data)
2270 {
2271 	if (data != NULL)
2272 		kfree(data, M_DEVBUF);
2273 }
2274 
2275 static struct iwm_nvm_data *
2276 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2277 {
2278 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2279 
2280 	/* Checking for required sections */
2281 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2282 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2283 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2284 			device_printf(sc->sc_dev,
2285 			    "Can't parse empty OTP/NVM sections\n");
2286 			return NULL;
2287 		}
2288 	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2289 		/* SW and REGULATORY sections are mandatory */
2290 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2291 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2292 			device_printf(sc->sc_dev,
2293 			    "Can't parse empty OTP/NVM sections\n");
2294 			return NULL;
2295 		}
2296 		/* MAC_OVERRIDE or at least HW section must exist */
2297 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2298 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2299 			device_printf(sc->sc_dev,
2300 			    "Can't parse mac_address, empty sections\n");
2301 			return NULL;
2302 		}
2303 
2304 		/* PHY_SKU section is mandatory in B0 */
2305 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2306 			device_printf(sc->sc_dev,
2307 			    "Can't parse phy_sku in B0, empty sections\n");
2308 			return NULL;
2309 		}
2310 	} else {
2311 		panic("unknown device family %d\n", sc->cfg->device_family);
2312 	}
2313 
2314 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2315 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2316 	calib = (const uint16_t *)
2317 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2318 	regulatory = (const uint16_t *)
2319 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2320 	mac_override = (const uint16_t *)
2321 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2322 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2323 
2324 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2325 	    phy_sku, regulatory);
2326 }
2327 
2328 static int
2329 iwm_nvm_init(struct iwm_softc *sc)
2330 {
2331 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2332 	int i, ret, section;
2333 	uint32_t size_read = 0;
2334 	uint8_t *nvm_buffer, *temp;
2335 	uint16_t len;
2336 
2337 	memset(nvm_sections, 0, sizeof(nvm_sections));
2338 
2339 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2340 		return EINVAL;
2341 
2342 	/* load NVM values from nic */
2343 	/* Read From FW NVM */
2344 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2345 
2346 	nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2347 	    M_INTWAIT | M_ZERO);
2348 	if (!nvm_buffer)
2349 		return ENOMEM;
2350 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2351 		/* we override the constness for initial read */
2352 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2353 					   &len, size_read);
2354 		if (ret)
2355 			continue;
2356 		size_read += len;
2357 		temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2358 		if (!temp) {
2359 			ret = ENOMEM;
2360 			break;
2361 		}
2362 		memcpy(temp, nvm_buffer, len);
2363 
2364 		nvm_sections[section].data = temp;
2365 		nvm_sections[section].length = len;
2366 	}
2367 	if (!size_read)
2368 		device_printf(sc->sc_dev, "OTP is blank\n");
2369 	kfree(nvm_buffer, M_DEVBUF);
2370 
2371 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2372 	if (!sc->nvm_data)
2373 		return EINVAL;
2374 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2375 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2376 
2377 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2378 		if (nvm_sections[i].data != NULL)
2379 			kfree(nvm_sections[i].data, M_DEVBUF);
2380 	}
2381 
2382 	return 0;
2383 }
2384 
2385 static int
2386 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2387 	const struct iwm_fw_desc *section)
2388 {
2389 	struct iwm_dma_info *dma = &sc->fw_dma;
2390 	uint8_t *v_addr;
2391 	bus_addr_t p_addr;
2392 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2393 	int ret = 0;
2394 
2395 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2396 		    "%s: [%d] uCode section being loaded...\n",
2397 		    __func__, section_num);
2398 
2399 	v_addr = dma->vaddr;
2400 	p_addr = dma->paddr;
2401 
2402 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2403 		uint32_t copy_size, dst_addr;
2404 		int extended_addr = FALSE;
2405 
2406 		copy_size = MIN(chunk_sz, section->len - offset);
2407 		dst_addr = section->offset + offset;
2408 
2409 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2410 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2411 			extended_addr = TRUE;
2412 
2413 		if (extended_addr)
2414 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2415 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2416 
2417 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2418 		    copy_size);
2419 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2420 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2421 						   copy_size);
2422 
2423 		if (extended_addr)
2424 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2425 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2426 
2427 		if (ret) {
2428 			device_printf(sc->sc_dev,
2429 			    "%s: Could not load the [%d] uCode section\n",
2430 			    __func__, section_num);
2431 			break;
2432 		}
2433 	}
2434 
2435 	return ret;
2436 }
2437 
2438 /*
2439  * ucode
2440  */
2441 static int
2442 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2443 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2444 {
2445 	int ret;
2446 
2447 	sc->sc_fw_chunk_done = 0;
2448 
2449 	if (!iwm_nic_lock(sc))
2450 		return EBUSY;
2451 
2452 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2453 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2454 
2455 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2456 	    dst_addr);
2457 
2458 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2459 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2460 
2461 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2462 	    (iwm_get_dma_hi_addr(phy_addr)
2463 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2464 
2465 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2466 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2467 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2468 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2469 
2470 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2471 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2472 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2473 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2474 
2475 	iwm_nic_unlock(sc);
2476 
2477 	/* wait up to 5s for this segment to load */
2478 	ret = 0;
2479 	while (!sc->sc_fw_chunk_done) {
2480 #if defined(__DragonFly__)
2481 		ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2482 #else
2483 		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2484 #endif
2485 		if (ret)
2486 			break;
2487 	}
2488 
2489 	if (ret != 0) {
2490 		device_printf(sc->sc_dev,
2491 		    "fw chunk addr 0x%x len %d failed to load\n",
2492 		    dst_addr, byte_cnt);
2493 		return ETIMEDOUT;
2494 	}
2495 
2496 	return 0;
2497 }
2498 
2499 static int
2500 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2501 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2502 {
2503 	int shift_param;
2504 	int i, ret = 0, sec_num = 0x1;
2505 	uint32_t val, last_read_idx = 0;
2506 
2507 	if (cpu == 1) {
2508 		shift_param = 0;
2509 		*first_ucode_section = 0;
2510 	} else {
2511 		shift_param = 16;
2512 		(*first_ucode_section)++;
2513 	}
2514 
2515 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2516 		last_read_idx = i;
2517 
2518 		/*
2519 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2520 		 * CPU1 to CPU2.
2521 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2522 		 * CPU2 non paged to CPU2 paging sec.
2523 		 */
2524 		if (!image->sec[i].data ||
2525 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2526 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2527 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2528 				    "Break since Data not valid or Empty section, sec = %d\n",
2529 				    i);
2530 			break;
2531 		}
2532 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2533 		if (ret)
2534 			return ret;
2535 
2536 		/* Notify the ucode of the loaded section number and status */
2537 		if (iwm_nic_lock(sc)) {
2538 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2539 			val = val | (sec_num << shift_param);
2540 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2541 			sec_num = (sec_num << 1) | 0x1;
2542 			iwm_nic_unlock(sc);
2543 		}
2544 	}
2545 
2546 	*first_ucode_section = last_read_idx;
2547 
2548 	iwm_enable_interrupts(sc);
2549 
2550 	if (iwm_nic_lock(sc)) {
2551 		if (cpu == 1)
2552 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2553 		else
2554 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2555 		iwm_nic_unlock(sc);
2556 	}
2557 
2558 	return 0;
2559 }
2560 
2561 static int
2562 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2563 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2564 {
2565 	int shift_param;
2566 	int i, ret = 0;
2567 	uint32_t last_read_idx = 0;
2568 
2569 	if (cpu == 1) {
2570 		shift_param = 0;
2571 		*first_ucode_section = 0;
2572 	} else {
2573 		shift_param = 16;
2574 		(*first_ucode_section)++;
2575 	}
2576 
2577 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2578 		last_read_idx = i;
2579 
2580 		/*
2581 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2582 		 * CPU1 to CPU2.
2583 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2584 		 * CPU2 non paged to CPU2 paging sec.
2585 		 */
2586 		if (!image->sec[i].data ||
2587 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2588 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2589 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2590 				    "Break since Data not valid or Empty section, sec = %d\n",
2591 				     i);
2592 			break;
2593 		}
2594 
2595 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2596 		if (ret)
2597 			return ret;
2598 	}
2599 
2600 	*first_ucode_section = last_read_idx;
2601 
2602 	return 0;
2603 
2604 }
2605 
2606 static int
2607 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2608 {
2609 	int ret = 0;
2610 	int first_ucode_section;
2611 
2612 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2613 		     image->is_dual_cpus ? "Dual" : "Single");
2614 
2615 	/* load to FW the binary non secured sections of CPU1 */
2616 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2617 	if (ret)
2618 		return ret;
2619 
2620 	if (image->is_dual_cpus) {
2621 		/* set CPU2 header address */
2622 		if (iwm_nic_lock(sc)) {
2623 			iwm_write_prph(sc,
2624 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2625 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2626 			iwm_nic_unlock(sc);
2627 		}
2628 
2629 		/* load to FW the binary sections of CPU2 */
2630 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2631 						 &first_ucode_section);
2632 		if (ret)
2633 			return ret;
2634 	}
2635 
2636 	iwm_enable_interrupts(sc);
2637 
2638 	/* release CPU reset */
2639 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2640 
2641 	return 0;
2642 }
2643 
2644 int
2645 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2646 	const struct iwm_fw_img *image)
2647 {
2648 	int ret = 0;
2649 	int first_ucode_section;
2650 
2651 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2652 		    image->is_dual_cpus ? "Dual" : "Single");
2653 
2654 	/* configure the ucode to be ready to get the secured image */
2655 	/* release CPU reset */
2656 	if (iwm_nic_lock(sc)) {
2657 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2658 		    IWM_RELEASE_CPU_RESET_BIT);
2659 		iwm_nic_unlock(sc);
2660 	}
2661 
2662 	/* load to FW the binary Secured sections of CPU1 */
2663 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2664 	    &first_ucode_section);
2665 	if (ret)
2666 		return ret;
2667 
2668 	/* load to FW the binary sections of CPU2 */
2669 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2670 	    &first_ucode_section);
2671 }
2672 
2673 /* XXX Get rid of this definition */
2674 static inline void
2675 iwm_enable_fw_load_int(struct iwm_softc *sc)
2676 {
2677 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2678 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2679 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2680 }
2681 
2682 /* XXX Add proper rfkill support code */
2683 static int
2684 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2685 {
2686 	int ret;
2687 
2688 	/* This may fail if AMT took ownership of the device */
2689 	if (iwm_prepare_card_hw(sc)) {
2690 		device_printf(sc->sc_dev,
2691 		    "%s: Exit HW not ready\n", __func__);
2692 		ret = EIO;
2693 		goto out;
2694 	}
2695 
2696 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2697 
2698 	iwm_disable_interrupts(sc);
2699 
2700 	/* make sure rfkill handshake bits are cleared */
2701 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2703 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2704 
2705 	/* clear (again), then enable host interrupts */
2706 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2707 
2708 	ret = iwm_nic_init(sc);
2709 	if (ret) {
2710 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2711 		goto out;
2712 	}
2713 
2714 	/*
2715 	 * Now, we load the firmware and don't want to be interrupted, even
2716 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2717 	 * FH_TX interrupt which is needed to load the firmware). If the
2718 	 * RF-Kill switch is toggled, we will find out after having loaded
2719 	 * the firmware and return the proper value to the caller.
2720 	 */
2721 	iwm_enable_fw_load_int(sc);
2722 
2723 	/* really make sure rfkill handshake bits are cleared */
2724 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2725 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2726 
2727 	/* Load the given image to the HW */
2728 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2729 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2730 	else
2731 		ret = iwm_pcie_load_given_ucode(sc, fw);
2732 
2733 	/* XXX re-check RF-Kill state */
2734 
2735 out:
2736 	return ret;
2737 }
2738 
2739 static int
2740 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2741 {
2742 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2743 		.valid = htole32(valid_tx_ant),
2744 	};
2745 
2746 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2747 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2748 }
2749 
2750 static int
2751 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2752 {
2753 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2754 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2755 
2756 	/* Set parameters */
2757 	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2758 	phy_cfg_cmd.calib_control.event_trigger =
2759 	    sc->sc_default_calib[ucode_type].event_trigger;
2760 	phy_cfg_cmd.calib_control.flow_trigger =
2761 	    sc->sc_default_calib[ucode_type].flow_trigger;
2762 
2763 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2764 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2765 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2766 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2767 }
2768 
2769 static int
2770 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2771 {
2772 	struct iwm_mvm_alive_data *alive_data = data;
2773 	struct iwm_mvm_alive_resp_ver1 *palive1;
2774 	struct iwm_mvm_alive_resp_ver2 *palive2;
2775 	struct iwm_mvm_alive_resp *palive;
2776 
2777 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2778 		palive1 = (void *)pkt->data;
2779 
2780 		sc->support_umac_log = FALSE;
2781                 sc->error_event_table =
2782                         le32toh(palive1->error_event_table_ptr);
2783                 sc->log_event_table =
2784                         le32toh(palive1->log_event_table_ptr);
2785                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2786 
2787                 alive_data->valid = le16toh(palive1->status) ==
2788                                     IWM_ALIVE_STATUS_OK;
2789                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2790 			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2791 			     le16toh(palive1->status), palive1->ver_type,
2792                              palive1->ver_subtype, palive1->flags);
2793 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2794 		palive2 = (void *)pkt->data;
2795 		sc->error_event_table =
2796 			le32toh(palive2->error_event_table_ptr);
2797 		sc->log_event_table =
2798 			le32toh(palive2->log_event_table_ptr);
2799 		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2800 		sc->umac_error_event_table =
2801                         le32toh(palive2->error_info_addr);
2802 
2803 		alive_data->valid = le16toh(palive2->status) ==
2804 				    IWM_ALIVE_STATUS_OK;
2805 		if (sc->umac_error_event_table)
2806 			sc->support_umac_log = TRUE;
2807 
2808 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2809 			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2810 			    le16toh(palive2->status), palive2->ver_type,
2811 			    palive2->ver_subtype, palive2->flags);
2812 
2813 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2814 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2815 			    palive2->umac_major, palive2->umac_minor);
2816 	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2817 		palive = (void *)pkt->data;
2818 
2819 		sc->error_event_table =
2820 			le32toh(palive->error_event_table_ptr);
2821 		sc->log_event_table =
2822 			le32toh(palive->log_event_table_ptr);
2823 		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2824 		sc->umac_error_event_table =
2825 			le32toh(palive->error_info_addr);
2826 
2827 		alive_data->valid = le16toh(palive->status) ==
2828 				    IWM_ALIVE_STATUS_OK;
2829 		if (sc->umac_error_event_table)
2830 			sc->support_umac_log = TRUE;
2831 
2832 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2833 			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2834 			    le16toh(palive->status), palive->ver_type,
2835 			    palive->ver_subtype, palive->flags);
2836 
2837 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2838 			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2839 			    le32toh(palive->umac_major),
2840 			    le32toh(palive->umac_minor));
2841 	}
2842 
2843 	return TRUE;
2844 }
2845 
2846 static int
2847 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2848 	struct iwm_rx_packet *pkt, void *data)
2849 {
2850 	struct iwm_phy_db *phy_db = data;
2851 
2852 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2853 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2854 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2855 			    __func__, pkt->hdr.code);
2856 		}
2857 		return TRUE;
2858 	}
2859 
2860 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2861 		device_printf(sc->sc_dev,
2862 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2863 	}
2864 
2865 	return FALSE;
2866 }
2867 
2868 static int
2869 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2870 	enum iwm_ucode_type ucode_type)
2871 {
2872 	struct iwm_notification_wait alive_wait;
2873 	struct iwm_mvm_alive_data alive_data;
2874 	const struct iwm_fw_img *fw;
2875 	enum iwm_ucode_type old_type = sc->cur_ucode;
2876 	int error;
2877 	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2878 
2879 	fw = &sc->sc_fw.img[ucode_type];
2880 	sc->cur_ucode = ucode_type;
2881 	sc->ucode_loaded = FALSE;
2882 
2883 	memset(&alive_data, 0, sizeof(alive_data));
2884 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2885 				   alive_cmd, NELEM(alive_cmd),
2886 				   iwm_alive_fn, &alive_data);
2887 
2888 	error = iwm_start_fw(sc, fw);
2889 	if (error) {
2890 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2891 		sc->cur_ucode = old_type;
2892 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2893 		return error;
2894 	}
2895 
2896 	/*
2897 	 * Some things may run in the background now, but we
2898 	 * just wait for the ALIVE notification here.
2899 	 */
2900 	IWM_UNLOCK(sc);
2901 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2902 				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2903 	IWM_LOCK(sc);
2904 	if (error) {
2905 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2906 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2907 			if (iwm_nic_lock(sc)) {
2908 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2909 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2910 				iwm_nic_unlock(sc);
2911 			}
2912 			device_printf(sc->sc_dev,
2913 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2914 			    a, b);
2915 		}
2916 		sc->cur_ucode = old_type;
2917 		return error;
2918 	}
2919 
2920 	if (!alive_data.valid) {
2921 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2922 		    __func__);
2923 		sc->cur_ucode = old_type;
2924 		return EIO;
2925 	}
2926 
2927 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2928 
2929 	/*
2930 	 * configure and operate fw paging mechanism.
2931 	 * driver configures the paging flow only once, CPU2 paging image
2932 	 * included in the IWM_UCODE_INIT image.
2933 	 */
2934 	if (fw->paging_mem_size) {
2935 		error = iwm_save_fw_paging(sc, fw);
2936 		if (error) {
2937 			device_printf(sc->sc_dev,
2938 			    "%s: failed to save the FW paging image\n",
2939 			    __func__);
2940 			return error;
2941 		}
2942 
2943 		error = iwm_send_paging_cmd(sc, fw);
2944 		if (error) {
2945 			device_printf(sc->sc_dev,
2946 			    "%s: failed to send the paging cmd\n", __func__);
2947 			iwm_free_fw_paging(sc);
2948 			return error;
2949 		}
2950 	}
2951 
2952 	if (!error)
2953 		sc->ucode_loaded = TRUE;
2954 	return error;
2955 }
2956 
2957 /*
2958  * mvm misc bits
2959  */
2960 
2961 static int
2962 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2963 {
2964 	struct iwm_notification_wait calib_wait;
2965 	static const uint16_t init_complete[] = {
2966 		IWM_INIT_COMPLETE_NOTIF,
2967 		IWM_CALIB_RES_NOTIF_PHY_DB
2968 	};
2969 	int ret;
2970 
2971 	/* do not operate with rfkill switch turned on */
2972 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2973 		device_printf(sc->sc_dev,
2974 		    "radio is disabled by hardware switch\n");
2975 		return EPERM;
2976 	}
2977 
2978 	iwm_init_notification_wait(sc->sc_notif_wait,
2979 				   &calib_wait,
2980 				   init_complete,
2981 				   NELEM(init_complete),
2982 				   iwm_wait_phy_db_entry,
2983 				   sc->sc_phy_db);
2984 
2985 	/* Will also start the device */
2986 	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2987 	if (ret) {
2988 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2989 		    ret);
2990 		goto error;
2991 	}
2992 
2993 	if (justnvm) {
2994 		/* Read nvm */
2995 		ret = iwm_nvm_init(sc);
2996 		if (ret) {
2997 			device_printf(sc->sc_dev, "failed to read nvm\n");
2998 			goto error;
2999 		}
3000 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3001 		goto error;
3002 	}
3003 
3004 	ret = iwm_send_bt_init_conf(sc);
3005 	if (ret) {
3006 		device_printf(sc->sc_dev,
3007 		    "failed to send bt coex configuration: %d\n", ret);
3008 		goto error;
3009 	}
3010 
3011 	/* Send TX valid antennas before triggering calibrations */
3012 	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3013 	if (ret) {
3014 		device_printf(sc->sc_dev,
3015 		    "failed to send antennas before calibration: %d\n", ret);
3016 		goto error;
3017 	}
3018 
3019 	/*
3020 	 * Send phy configurations command to init uCode
3021 	 * to start the 16.0 uCode init image internal calibrations.
3022 	 */
3023 	ret = iwm_send_phy_cfg_cmd(sc);
3024 	if (ret) {
3025 		device_printf(sc->sc_dev,
3026 		    "%s: Failed to run INIT calibrations: %d\n",
3027 		    __func__, ret);
3028 		goto error;
3029 	}
3030 
3031 	/*
3032 	 * Nothing to do but wait for the init complete notification
3033 	 * from the firmware.
3034 	 */
3035 	IWM_UNLOCK(sc);
3036 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3037 	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3038 	IWM_LOCK(sc);
3039 
3040 
3041 	goto out;
3042 
3043 error:
3044 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3045 out:
3046 	return ret;
3047 }
3048 
3049 static int
3050 iwm_mvm_config_ltr(struct iwm_softc *sc)
3051 {
3052 	struct iwm_ltr_config_cmd cmd = {
3053 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3054 	};
3055 
3056 	if (!sc->sc_ltr_enabled)
3057 		return 0;
3058 
3059 	return iwm_mvm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3060 }
3061 
3062 /*
3063  * receive side
3064  */
3065 
3066 /* (re)stock rx ring, called at init-time and at runtime */
3067 static int
3068 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3069 {
3070 	struct iwm_rx_ring *ring = &sc->rxq;
3071 	struct iwm_rx_data *data = &ring->data[idx];
3072 	struct mbuf *m;
3073 	bus_dmamap_t dmamap;
3074 	bus_dma_segment_t seg;
3075 	int nsegs, error;
3076 
3077 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3078 	if (m == NULL)
3079 		return ENOBUFS;
3080 
3081 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3082 #if defined(__DragonFly__)
3083 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3084 	    m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3085 #else
3086 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3087 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3088 #endif
3089 	if (error != 0) {
3090 		device_printf(sc->sc_dev,
3091 		    "%s: can't map mbuf, error %d\n", __func__, error);
3092 		m_freem(m);
3093 		return error;
3094 	}
3095 
3096 	if (data->m != NULL)
3097 		bus_dmamap_unload(ring->data_dmat, data->map);
3098 
3099 	/* Swap ring->spare_map with data->map */
3100 	dmamap = data->map;
3101 	data->map = ring->spare_map;
3102 	ring->spare_map = dmamap;
3103 
3104 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3105 	data->m = m;
3106 
3107 	/* Update RX descriptor. */
3108 	KKASSERT((seg.ds_addr & 255) == 0);
3109 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3110 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3111 	    BUS_DMASYNC_PREWRITE);
3112 
3113 	return 0;
3114 }
3115 
3116 /*
3117  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3118  * values are reported by the fw as positive values - need to negate
3119  * to obtain their dBM.  Account for missing antennas by replacing 0
3120  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3121  */
3122 static int
3123 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3124 {
3125 	int energy_a, energy_b, energy_c, max_energy;
3126 	uint32_t val;
3127 
3128 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3129 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3130 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3131 	energy_a = energy_a ? -energy_a : -256;
3132 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3133 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3134 	energy_b = energy_b ? -energy_b : -256;
3135 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3136 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3137 	energy_c = energy_c ? -energy_c : -256;
3138 	max_energy = MAX(energy_a, energy_b);
3139 	max_energy = MAX(max_energy, energy_c);
3140 
3141 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3142 	    "energy In A %d B %d C %d , and max %d\n",
3143 	    energy_a, energy_b, energy_c, max_energy);
3144 
3145 	return max_energy;
3146 }
3147 
3148 static void
3149 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3150 {
3151 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3152 
3153 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3154 
3155 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3156 }
3157 
3158 /*
3159  * Retrieve the average noise (in dBm) among receivers.
3160  */
3161 static int
3162 iwm_get_noise(struct iwm_softc *sc,
3163 	const struct iwm_mvm_statistics_rx_non_phy *stats)
3164 {
3165 	int i, total, nbant, noise;
3166 
3167 	total = nbant = noise = 0;
3168 	for (i = 0; i < 3; i++) {
3169 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3170 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3171 		    __func__, i, noise);
3172 
3173 		if (noise) {
3174 			total += noise;
3175 			nbant++;
3176 		}
3177 	}
3178 
3179 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3180 	    __func__, nbant, total);
3181 #if 0
3182 	/* There should be at least one antenna but check anyway. */
3183 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3184 #else
3185 	/* For now, just hard-code it to -96 to be safe */
3186 	return (-96);
3187 #endif
3188 }
3189 
3190 static void
3191 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3192 {
3193 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3194 
3195 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3196 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3197 }
3198 
3199 /*
3200  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3201  *
3202  * Handles the actual data of the Rx packet from the fw
3203  */
3204 static boolean_t
3205 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3206 	boolean_t stolen)
3207 {
3208 	struct ieee80211com *ic = &sc->sc_ic;
3209 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3210 	struct ieee80211_frame *wh;
3211 	struct ieee80211_node *ni;
3212 	struct ieee80211_rx_stats rxs;
3213 	struct iwm_rx_phy_info *phy_info;
3214 	struct iwm_rx_mpdu_res_start *rx_res;
3215 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3216 	uint32_t len;
3217 	uint32_t rx_pkt_status;
3218 	int rssi;
3219 
3220 	phy_info = &sc->sc_last_phy_info;
3221 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3222 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3223 	len = le16toh(rx_res->byte_count);
3224 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3225 
3226 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3227 		device_printf(sc->sc_dev,
3228 		    "dsp size out of range [0,20]: %d\n",
3229 		    phy_info->cfg_phy_cnt);
3230 		return FALSE;
3231 	}
3232 
3233 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3234 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3235 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3236 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3237 		return FALSE; /* drop */
3238 	}
3239 
3240 	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3241 	/* Note: RSSI is absolute (ie a -ve value) */
3242 	if (rssi < IWM_MIN_DBM)
3243 		rssi = IWM_MIN_DBM;
3244 	else if (rssi > IWM_MAX_DBM)
3245 		rssi = IWM_MAX_DBM;
3246 
3247 	/* Map it to relative value */
3248 	rssi = rssi - sc->sc_noise;
3249 
3250 	/* replenish ring for the buffer we're going to feed to the sharks */
3251 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3252 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3253 		    __func__);
3254 		return FALSE;
3255 	}
3256 
3257 	m->m_data = pkt->data + sizeof(*rx_res);
3258 	m->m_pkthdr.len = m->m_len = len;
3259 
3260 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3261 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3262 
3263 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3264 
3265 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3266 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3267 	    __func__,
3268 	    le16toh(phy_info->channel),
3269 	    le16toh(phy_info->phy_flags));
3270 
3271 	/*
3272 	 * Populate an RX state struct with the provided information.
3273 	 */
3274 	bzero(&rxs, sizeof(rxs));
3275 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3276 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3277 	rxs.c_ieee = le16toh(phy_info->channel);
3278 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3279 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3280 	} else {
3281 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3282 	}
3283 	/* rssi is in 1/2db units */
3284 	rxs.rssi = rssi * 2;
3285 	rxs.nf = sc->sc_noise;
3286 
3287 	if (ieee80211_radiotap_active_vap(vap)) {
3288 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3289 
3290 		tap->wr_flags = 0;
3291 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3292 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3293 		tap->wr_chan_freq = htole16(rxs.c_freq);
3294 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3295 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3296 		tap->wr_dbm_antsignal = (int8_t)rssi;
3297 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3298 		tap->wr_tsft = phy_info->system_timestamp;
3299 		switch (phy_info->rate) {
3300 		/* CCK rates. */
3301 		case  10: tap->wr_rate =   2; break;
3302 		case  20: tap->wr_rate =   4; break;
3303 		case  55: tap->wr_rate =  11; break;
3304 		case 110: tap->wr_rate =  22; break;
3305 		/* OFDM rates. */
3306 		case 0xd: tap->wr_rate =  12; break;
3307 		case 0xf: tap->wr_rate =  18; break;
3308 		case 0x5: tap->wr_rate =  24; break;
3309 		case 0x7: tap->wr_rate =  36; break;
3310 		case 0x9: tap->wr_rate =  48; break;
3311 		case 0xb: tap->wr_rate =  72; break;
3312 		case 0x1: tap->wr_rate =  96; break;
3313 		case 0x3: tap->wr_rate = 108; break;
3314 		/* Unknown rate: should not happen. */
3315 		default:  tap->wr_rate =   0;
3316 		}
3317 	}
3318 
3319 	IWM_UNLOCK(sc);
3320 	if (ni != NULL) {
3321 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3322 		ieee80211_input_mimo(ni, m, &rxs);
3323 		ieee80211_free_node(ni);
3324 	} else {
3325 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3326 		ieee80211_input_mimo_all(ic, m, &rxs);
3327 	}
3328 	IWM_LOCK(sc);
3329 
3330 	return TRUE;
3331 }
3332 
3333 static int
3334 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3335 	struct iwm_node *in)
3336 {
3337 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3338 	struct ieee80211_node *ni = &in->in_ni;
3339 	struct ieee80211vap *vap = ni->ni_vap;
3340 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3341 	int failack = tx_resp->failure_frame;
3342 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3343 	boolean_t rate_matched;
3344 	uint8_t tx_resp_rate;
3345 	int ret;
3346 
3347 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3348 
3349 	/* Update rate control statistics. */
3350 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3351 	    __func__,
3352 	    (int) le16toh(tx_resp->status.status),
3353 	    (int) le16toh(tx_resp->status.sequence),
3354 	    tx_resp->frame_count,
3355 	    tx_resp->bt_kill_count,
3356 	    tx_resp->failure_rts,
3357 	    tx_resp->failure_frame,
3358 	    le32toh(tx_resp->initial_rate),
3359 	    (int) le16toh(tx_resp->wireless_media_time));
3360 
3361 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3362 
3363 	/* For rate control, ignore frames sent at different initial rate */
3364 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3365 
3366 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3367 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3368 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3369 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3370 	}
3371 
3372 	if (status != IWM_TX_STATUS_SUCCESS &&
3373 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3374 		if (rate_matched) {
3375 			ieee80211_ratectl_tx_complete(vap, ni,
3376 			    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3377 		}
3378 		ret = 1;
3379 	} else {
3380 		if (rate_matched) {
3381 			ieee80211_ratectl_tx_complete(vap, ni,
3382 			    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3383 		}
3384 		ret = 0;
3385 	}
3386 
3387 	if (rate_matched) {
3388 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3389 		new_rate = vap->iv_bss->ni_txrate;
3390 		if (new_rate != 0 && new_rate != cur_rate) {
3391 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3392 			iwm_setrates(sc, in, rix);
3393 			iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3394 		}
3395 	}
3396 
3397 	return ret;
3398 }
3399 
3400 static void
3401 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3402 {
3403 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3404 	int idx = cmd_hdr->idx;
3405 	int qid = cmd_hdr->qid;
3406 	struct iwm_tx_ring *ring = &sc->txq[qid];
3407 	struct iwm_tx_data *txd = &ring->data[idx];
3408 	struct iwm_node *in = txd->in;
3409 	struct mbuf *m = txd->m;
3410 	int status;
3411 
3412 	KASSERT(txd->done == 0, ("txd not done"));
3413 	KASSERT(txd->in != NULL, ("txd without node"));
3414 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3415 
3416 	sc->sc_tx_timer = 0;
3417 
3418 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3419 
3420 	/* Unmap and free mbuf. */
3421 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3422 	bus_dmamap_unload(ring->data_dmat, txd->map);
3423 
3424 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3425 	    "free txd %p, in %p\n", txd, txd->in);
3426 	txd->done = 1;
3427 	txd->m = NULL;
3428 	txd->in = NULL;
3429 
3430 	ieee80211_tx_complete(&in->in_ni, m, status);
3431 
3432 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3433 		sc->qfullmsk &= ~(1 << ring->qid);
3434 		if (sc->qfullmsk == 0) {
3435 			iwm_start(sc);
3436 		}
3437 	}
3438 }
3439 
3440 /*
3441  * transmit side
3442  */
3443 
3444 /*
3445  * Process a "command done" firmware notification.  This is where we wakeup
3446  * processes waiting for a synchronous command completion.
3447  * from if_iwn
3448  */
3449 static void
3450 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3451 {
3452 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3453 	struct iwm_tx_data *data;
3454 
3455 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3456 		return;	/* Not a command ack. */
3457 	}
3458 
3459 	data = &ring->data[pkt->hdr.idx];
3460 
3461 	/* If the command was mapped in an mbuf, free it. */
3462 	if (data->m != NULL) {
3463 		bus_dmamap_sync(ring->data_dmat, data->map,
3464 		    BUS_DMASYNC_POSTWRITE);
3465 		bus_dmamap_unload(ring->data_dmat, data->map);
3466 		m_freem(data->m);
3467 		data->m = NULL;
3468 	}
3469 	wakeup(&ring->desc[pkt->hdr.idx]);
3470 
3471 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3472 		device_printf(sc->sc_dev,
3473 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3474 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3475 		/* XXX call iwm_force_nmi() */
3476 	}
3477 
3478 	KKASSERT(ring->queued > 0);
3479 	ring->queued--;
3480 	if (ring->queued == 0)
3481 		iwm_pcie_clear_cmd_in_flight(sc);
3482 }
3483 
3484 #if 0
3485 /*
3486  * necessary only for block ack mode
3487  */
3488 void
3489 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3490 	uint16_t len)
3491 {
3492 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3493 	uint16_t w_val;
3494 
3495 	scd_bc_tbl = sc->sched_dma.vaddr;
3496 
3497 	len += 8; /* magic numbers came naturally from paris */
3498 	len = roundup(len, 4) / 4;
3499 
3500 	w_val = htole16(sta_id << 12 | len);
3501 
3502 	/* Update TX scheduler. */
3503 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3504 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3505 	    BUS_DMASYNC_PREWRITE);
3506 
3507 	/* I really wonder what this is ?!? */
3508 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3509 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3510 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3511 		    BUS_DMASYNC_PREWRITE);
3512 	}
3513 }
3514 #endif
3515 
3516 /*
3517  * Fill in the rate related information for a transmit command.
3518  */
3519 static uint8_t
3520 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3521 	struct mbuf *m, struct iwm_tx_cmd *tx)
3522 {
3523 	struct ieee80211com *ic = &sc->sc_ic;
3524 	struct ieee80211_node *ni = &in->in_ni;
3525 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3526 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3527 	const struct iwm_rate *rinfo;
3528 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3529 	int ridx, rate_flags;
3530 
3531 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3532 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3533 
3534 	if (type == IEEE80211_FC0_TYPE_MGT) {
3535 		ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3536 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3537 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3538 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3539                 ridx = iwm_rate2ridx(sc, tp->mcastrate);
3540 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3541 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3542         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3543                 ridx = iwm_rate2ridx(sc, tp->ucastrate);
3544 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3545 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3546         } else if (m->m_flags & M_EAPOL) {
3547                 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3548 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3549 		    "%s: EAPOL (%d)\n", __func__, tp->mgmtrate);
3550 	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3551 		/* This is the index into the programmed table */
3552 		tx->initial_rate_index = 0;
3553 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3554 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA (%d)\n",
3555 		    __func__, ni->ni_txrate);
3556 		return ni->ni_txrate;
3557 	} else {
3558 		ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3559 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3560 		    "%s: DEFAULT (%d)\n", __func__, tp->mgmtrate);
3561 	}
3562 
3563 	/*
3564 	 * Sanity check ridx, and provide fallback. If the rate lookup
3565 	 * ever fails, iwm_rate2ridx() will already print an error message.
3566 	 */
3567 	if (ridx < 0 || ridx > IWM_RIDX_MAX) {
3568 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
3569 			/*
3570 			 * XXX this assumes the mode is either 11a or not 11a;
3571 			 * definitely won't work for 11n.
3572 			 */
3573 			ridx = IWM_RIDX_OFDM;
3574 		} else {
3575 			ridx = IWM_RIDX_CCK;
3576 		}
3577 	}
3578 
3579 	rinfo = &iwm_rates[ridx];
3580 
3581 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3582 	    "%s: frame type=%d, ridx=%d, rate=%d, CCK=%d\n",
3583 	    __func__, type, ridx, rinfo->rate, !! (IWM_RIDX_IS_CCK(ridx)));
3584 
3585 	/* XXX TODO: hard-coded TX antenna? */
3586 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3587 	if (IWM_RIDX_IS_CCK(ridx))
3588 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3589 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3590 
3591 	return rinfo->rate;
3592 }
3593 
3594 #define TB0_SIZE 16
3595 static int
3596 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3597 {
3598 	struct ieee80211com *ic = &sc->sc_ic;
3599 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3600 	struct iwm_node *in = IWM_NODE(ni);
3601 	struct iwm_tx_ring *ring;
3602 	struct iwm_tx_data *data;
3603 	struct iwm_tfd *desc;
3604 	struct iwm_device_cmd *cmd;
3605 	struct iwm_tx_cmd *tx;
3606 	struct ieee80211_frame *wh;
3607 	struct ieee80211_key *k = NULL;
3608 #if !defined(__DragonFly__)
3609 	struct mbuf *m1;
3610 #endif
3611 	uint32_t flags;
3612 	u_int hdrlen;
3613 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3614 	int nsegs;
3615 	uint8_t rate, tid, type;
3616 	int i, totlen, error, pad;
3617 
3618 	wh = mtod(m, struct ieee80211_frame *);
3619 	hdrlen = ieee80211_anyhdrsize(wh);
3620 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3621 	tid = 0;
3622 	ring = &sc->txq[ac];
3623 	desc = &ring->desc[ring->cur];
3624 	memset(desc, 0, sizeof(*desc));
3625 	data = &ring->data[ring->cur];
3626 
3627 	/* Fill out iwm_tx_cmd to send to the firmware */
3628 	cmd = &ring->cmd[ring->cur];
3629 	cmd->hdr.code = IWM_TX_CMD;
3630 	cmd->hdr.flags = 0;
3631 	cmd->hdr.qid = ring->qid;
3632 	cmd->hdr.idx = ring->cur;
3633 
3634 	tx = (void *)cmd->data;
3635 	memset(tx, 0, sizeof(*tx));
3636 
3637 	rate = iwm_tx_fill_cmd(sc, in, m, tx);
3638 
3639 	/* Encrypt the frame if need be. */
3640 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3641 		/* Retrieve key for TX && do software encryption. */
3642 		k = ieee80211_crypto_encap(ni, m);
3643 		if (k == NULL) {
3644 			m_freem(m);
3645 			return (ENOBUFS);
3646 		}
3647 		/* 802.11 header may have moved. */
3648 		wh = mtod(m, struct ieee80211_frame *);
3649 	}
3650 
3651 	if (ieee80211_radiotap_active_vap(vap)) {
3652 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3653 
3654 		tap->wt_flags = 0;
3655 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3656 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3657 		tap->wt_rate = rate;
3658 		if (k != NULL)
3659 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3660 		ieee80211_radiotap_tx(vap, m);
3661 	}
3662 
3663 
3664 	totlen = m->m_pkthdr.len;
3665 
3666 	flags = 0;
3667 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3668 		flags |= IWM_TX_CMD_FLG_ACK;
3669 	}
3670 
3671 	if (type == IEEE80211_FC0_TYPE_DATA
3672 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3673 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3674 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3675 	}
3676 
3677 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3678 	    type != IEEE80211_FC0_TYPE_DATA)
3679 		tx->sta_id = sc->sc_aux_sta.sta_id;
3680 	else
3681 		tx->sta_id = IWM_STATION_ID;
3682 
3683 	if (type == IEEE80211_FC0_TYPE_MGT) {
3684 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3685 
3686 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3687 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3688 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3689 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3690 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3691 		} else {
3692 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3693 		}
3694 	} else {
3695 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3696 	}
3697 
3698 	if (hdrlen & 3) {
3699 		/* First segment length must be a multiple of 4. */
3700 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3701 		pad = 4 - (hdrlen & 3);
3702 	} else
3703 		pad = 0;
3704 
3705 	tx->driver_txop = 0;
3706 	tx->next_frame_len = 0;
3707 
3708 	tx->len = htole16(totlen);
3709 	tx->tid_tspec = tid;
3710 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3711 
3712 	/* Set physical address of "scratch area". */
3713 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3714 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3715 
3716 	/* Copy 802.11 header in TX command. */
3717 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3718 
3719 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3720 
3721 	tx->sec_ctl = 0;
3722 	tx->tx_flags |= htole32(flags);
3723 
3724 	/* Trim 802.11 header. */
3725 	m_adj(m, hdrlen);
3726 #if defined(__DragonFly__)
3727 	error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3728 					    segs, IWM_MAX_SCATTER - 2,
3729 					    &nsegs, BUS_DMA_NOWAIT);
3730 #else
3731 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3732 	    segs, &nsegs, BUS_DMA_NOWAIT);
3733 #endif
3734 	if (error != 0) {
3735 #if defined(__DragonFly__)
3736 		device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3737 		    error);
3738 		m_freem(m);
3739 		return error;
3740 #else
3741 		if (error != EFBIG) {
3742 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3743 			    error);
3744 			m_freem(m);
3745 			return error;
3746 		}
3747 		/* Too many DMA segments, linearize mbuf. */
3748 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3749 		if (m1 == NULL) {
3750 			device_printf(sc->sc_dev,
3751 			    "%s: could not defrag mbuf\n", __func__);
3752 			m_freem(m);
3753 			return (ENOBUFS);
3754 		}
3755 		m = m1;
3756 
3757 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3758 		    segs, &nsegs, BUS_DMA_NOWAIT);
3759 		if (error != 0) {
3760 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3761 			    error);
3762 			m_freem(m);
3763 			return error;
3764 		}
3765 #endif
3766 	}
3767 	data->m = m;
3768 	data->in = in;
3769 	data->done = 0;
3770 
3771 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3772 	    "sending txd %p, in %p\n", data, data->in);
3773 	KASSERT(data->in != NULL, ("node is NULL"));
3774 
3775 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3776 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3777 	    ring->qid, ring->cur, totlen, nsegs,
3778 	    le32toh(tx->tx_flags),
3779 	    le32toh(tx->rate_n_flags),
3780 	    tx->initial_rate_index
3781 	    );
3782 
3783 	/* Fill TX descriptor. */
3784 	desc->num_tbs = 2 + nsegs;
3785 
3786 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3787 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3788 	    (TB0_SIZE << 4);
3789 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3790 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3791 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3792 	      + hdrlen + pad - TB0_SIZE) << 4);
3793 
3794 	/* Other DMA segments are for data payload. */
3795 	for (i = 0; i < nsegs; i++) {
3796 		seg = &segs[i];
3797 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3798 		desc->tbs[i+2].hi_n_len = \
3799 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3800 		    | ((seg->ds_len) << 4);
3801 	}
3802 
3803 	bus_dmamap_sync(ring->data_dmat, data->map,
3804 	    BUS_DMASYNC_PREWRITE);
3805 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3806 	    BUS_DMASYNC_PREWRITE);
3807 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3808 	    BUS_DMASYNC_PREWRITE);
3809 
3810 #if 0
3811 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3812 #endif
3813 
3814 	/* Kick TX ring. */
3815 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3816 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3817 
3818 	/* Mark TX ring as full if we reach a certain threshold. */
3819 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3820 		sc->qfullmsk |= 1 << ring->qid;
3821 	}
3822 
3823 	return 0;
3824 }
3825 
3826 static int
3827 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3828     const struct ieee80211_bpf_params *params)
3829 {
3830 	struct ieee80211com *ic = ni->ni_ic;
3831 	struct iwm_softc *sc = ic->ic_softc;
3832 	int error = 0;
3833 
3834 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3835 	    "->%s begin\n", __func__);
3836 
3837 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3838 		m_freem(m);
3839 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3840 		    "<-%s not RUNNING\n", __func__);
3841 		return (ENETDOWN);
3842         }
3843 
3844 	IWM_LOCK(sc);
3845 	/* XXX fix this */
3846         if (params == NULL) {
3847 		error = iwm_tx(sc, m, ni, 0);
3848 	} else {
3849 		error = iwm_tx(sc, m, ni, 0);
3850 	}
3851 	sc->sc_tx_timer = 5;
3852 	IWM_UNLOCK(sc);
3853 
3854         return (error);
3855 }
3856 
3857 /*
3858  * mvm/tx.c
3859  */
3860 
3861 /*
3862  * Note that there are transports that buffer frames before they reach
3863  * the firmware. This means that after flush_tx_path is called, the
3864  * queue might not be empty. The race-free way to handle this is to:
3865  * 1) set the station as draining
3866  * 2) flush the Tx path
3867  * 3) wait for the transport queues to be empty
3868  */
3869 int
3870 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3871 {
3872 	int ret;
3873 	struct iwm_tx_path_flush_cmd flush_cmd = {
3874 		.queues_ctl = htole32(tfd_msk),
3875 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3876 	};
3877 
3878 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3879 	    sizeof(flush_cmd), &flush_cmd);
3880 	if (ret)
3881                 device_printf(sc->sc_dev,
3882 		    "Flushing tx queue failed: %d\n", ret);
3883 	return ret;
3884 }
3885 
3886 static int
3887 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3888 {
3889 	struct iwm_time_quota_cmd cmd;
3890 	int i, idx, ret, num_active_macs, quota, quota_rem;
3891 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3892 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3893 	uint16_t id;
3894 
3895 	memset(&cmd, 0, sizeof(cmd));
3896 
3897 	/* currently, PHY ID == binding ID */
3898 	if (ivp) {
3899 		id = ivp->phy_ctxt->id;
3900 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3901 		colors[id] = ivp->phy_ctxt->color;
3902 
3903 		if (1)
3904 			n_ifs[id] = 1;
3905 	}
3906 
3907 	/*
3908 	 * The FW's scheduling session consists of
3909 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3910 	 * equally between all the bindings that require quota
3911 	 */
3912 	num_active_macs = 0;
3913 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3914 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3915 		num_active_macs += n_ifs[i];
3916 	}
3917 
3918 	quota = 0;
3919 	quota_rem = 0;
3920 	if (num_active_macs) {
3921 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3922 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3923 	}
3924 
3925 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3926 		if (colors[i] < 0)
3927 			continue;
3928 
3929 		cmd.quotas[idx].id_and_color =
3930 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3931 
3932 		if (n_ifs[i] <= 0) {
3933 			cmd.quotas[idx].quota = htole32(0);
3934 			cmd.quotas[idx].max_duration = htole32(0);
3935 		} else {
3936 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3937 			cmd.quotas[idx].max_duration = htole32(0);
3938 		}
3939 		idx++;
3940 	}
3941 
3942 	/* Give the remainder of the session to the first binding */
3943 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3944 
3945 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3946 	    sizeof(cmd), &cmd);
3947 	if (ret)
3948 		device_printf(sc->sc_dev,
3949 		    "%s: Failed to send quota: %d\n", __func__, ret);
3950 	return ret;
3951 }
3952 
3953 /*
3954  * ieee80211 routines
3955  */
3956 
3957 /*
3958  * Change to AUTH state in 80211 state machine.  Roughly matches what
3959  * Linux does in bss_info_changed().
3960  */
3961 static int
3962 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3963 {
3964 	struct ieee80211_node *ni;
3965 	struct iwm_node *in;
3966 	struct iwm_vap *iv = IWM_VAP(vap);
3967 	uint32_t duration;
3968 	int error;
3969 
3970 	/*
3971 	 * XXX i have a feeling that the vap node is being
3972 	 * freed from underneath us. Grr.
3973 	 */
3974 	ni = ieee80211_ref_node(vap->iv_bss);
3975 	in = IWM_NODE(ni);
3976 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3977 	    "%s: called; vap=%p, bss ni=%p\n",
3978 	    __func__,
3979 	    vap,
3980 	    ni);
3981 
3982 	in->in_assoc = 0;
3983 
3984 	/*
3985 	 * Firmware bug - it'll crash if the beacon interval is less
3986 	 * than 16. We can't avoid connecting at all, so refuse the
3987 	 * station state change, this will cause net80211 to abandon
3988 	 * attempts to connect to this AP, and eventually wpa_s will
3989 	 * blacklist the AP...
3990 	 */
3991 	if (ni->ni_intval < 16) {
3992 		device_printf(sc->sc_dev,
3993 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3994 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3995 		error = EINVAL;
3996 		goto out;
3997 	}
3998 
3999 	error = iwm_allow_mcast(vap, sc);
4000 	if (error) {
4001 		device_printf(sc->sc_dev,
4002 		    "%s: failed to set multicast\n", __func__);
4003 		goto out;
4004 	}
4005 
4006 	/*
4007 	 * This is where it deviates from what Linux does.
4008 	 *
4009 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4010 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4011 	 * and always does a mac_ctx_changed().
4012 	 *
4013 	 * The openbsd port doesn't attempt to do that - it reset things
4014 	 * at odd states and does the add here.
4015 	 *
4016 	 * So, until the state handling is fixed (ie, we never reset
4017 	 * the NIC except for a firmware failure, which should drag
4018 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4019 	 * contexts that are required), let's do a dirty hack here.
4020 	 */
4021 	if (iv->is_uploaded) {
4022 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4023 			device_printf(sc->sc_dev,
4024 			    "%s: failed to update MAC\n", __func__);
4025 			goto out;
4026 		}
4027 	} else {
4028 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4029 			device_printf(sc->sc_dev,
4030 			    "%s: failed to add MAC\n", __func__);
4031 			goto out;
4032 		}
4033 	}
4034 
4035 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4036 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4037 		device_printf(sc->sc_dev,
4038 		    "%s: failed update phy ctxt\n", __func__);
4039 		goto out;
4040 	}
4041 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4042 
4043 	if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4044 		device_printf(sc->sc_dev,
4045 		    "%s: binding update cmd\n", __func__);
4046 		goto out;
4047 	}
4048 	/*
4049 	 * Authentication becomes unreliable when powersaving is left enabled
4050 	 * here. Powersaving will be activated again when association has
4051 	 * finished or is aborted.
4052 	 */
4053 	iv->ps_disabled = TRUE;
4054 	error = iwm_mvm_power_update_mac(sc);
4055 	iv->ps_disabled = FALSE;
4056 	if (error != 0) {
4057 		device_printf(sc->sc_dev,
4058 		    "%s: failed to update power management\n",
4059 		    __func__);
4060 		goto out;
4061 	}
4062 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4063 		device_printf(sc->sc_dev,
4064 		    "%s: failed to add sta\n", __func__);
4065 		goto out;
4066 	}
4067 
4068 	/*
4069 	 * Prevent the FW from wandering off channel during association
4070 	 * by "protecting" the session with a time event.
4071 	 */
4072 	/* XXX duration is in units of TU, not MS */
4073 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4074 	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4075 	DELAY(100);
4076 
4077 	error = 0;
4078 out:
4079 	ieee80211_free_node(ni);
4080 	return (error);
4081 }
4082 
4083 static int
4084 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4085 {
4086 	uint32_t tfd_msk;
4087 
4088 	/*
4089 	 * Ok, so *technically* the proper set of calls for going
4090 	 * from RUN back to SCAN is:
4091 	 *
4092 	 * iwm_mvm_power_mac_disable(sc, in);
4093 	 * iwm_mvm_mac_ctxt_changed(sc, vap);
4094 	 * iwm_mvm_rm_sta(sc, in);
4095 	 * iwm_mvm_update_quotas(sc, NULL);
4096 	 * iwm_mvm_mac_ctxt_changed(sc, in);
4097 	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4098 	 * iwm_mvm_mac_ctxt_remove(sc, in);
4099 	 *
4100 	 * However, that freezes the device not matter which permutations
4101 	 * and modifications are attempted.  Obviously, this driver is missing
4102 	 * something since it works in the Linux driver, but figuring out what
4103 	 * is missing is a little more complicated.  Now, since we're going
4104 	 * back to nothing anyway, we'll just do a complete device reset.
4105 	 * Up your's, device!
4106 	 */
4107 	/*
4108 	 * Just using 0xf for the queues mask is fine as long as we only
4109 	 * get here from RUN state.
4110 	 */
4111 	tfd_msk = 0xf;
4112 	iwm_xmit_queue_drain(sc);
4113 	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4114 	/*
4115 	 * We seem to get away with just synchronously sending the
4116 	 * IWM_TXPATH_FLUSH command.
4117 	 */
4118 //	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4119 	iwm_stop_device(sc);
4120 	iwm_init_hw(sc);
4121 	if (in)
4122 		in->in_assoc = 0;
4123 	return 0;
4124 
4125 #if 0
4126 	int error;
4127 
4128 	iwm_mvm_power_mac_disable(sc, in);
4129 
4130 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4131 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4132 		return error;
4133 	}
4134 
4135 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4136 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4137 		return error;
4138 	}
4139 	error = iwm_mvm_rm_sta(sc, in);
4140 	in->in_assoc = 0;
4141 	iwm_mvm_update_quotas(sc, NULL);
4142 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4143 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4144 		return error;
4145 	}
4146 	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4147 
4148 	iwm_mvm_mac_ctxt_remove(sc, in);
4149 
4150 	return error;
4151 #endif
4152 }
4153 
4154 static struct ieee80211_node *
4155 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4156 {
4157 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4158 	    M_INTWAIT | M_ZERO);
4159 }
4160 
4161 static uint8_t
4162 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4163 {
4164 	uint8_t plcp = rate_n_flags & 0xff;
4165 	int i;
4166 
4167 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4168 		if (iwm_rates[i].plcp == plcp)
4169 			return iwm_rates[i].rate;
4170 	}
4171 	return 0;
4172 }
4173 
4174 uint8_t
4175 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4176 {
4177 	int i;
4178 	uint8_t rval;
4179 
4180 	for (i = 0; i < rs->rs_nrates; i++) {
4181 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4182 		if (rval == iwm_rates[ridx].rate)
4183 			return rs->rs_rates[i];
4184 	}
4185 
4186 	return 0;
4187 }
4188 
4189 static int
4190 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4191 {
4192 	int i;
4193 
4194 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4195 		if (iwm_rates[i].rate == rate)
4196 			return i;
4197 	}
4198 
4199 	device_printf(sc->sc_dev,
4200 	    "%s: WARNING: device rate for %u not found!\n",
4201 	    __func__, rate);
4202 
4203 	return -1;
4204 }
4205 
4206 static void
4207 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4208 {
4209 	struct ieee80211_node *ni = &in->in_ni;
4210 	struct iwm_lq_cmd *lq = &in->in_lq;
4211 	struct ieee80211_rateset *rs = &ni->ni_rates;
4212 	int nrates = rs->rs_nrates;
4213 	int i, ridx, tab = 0;
4214 	int txant = 0;
4215 
4216 	KKASSERT(rix >= 0 && rix < nrates);
4217 
4218 	if (nrates > nitems(lq->rs_table)) {
4219 		device_printf(sc->sc_dev,
4220 		    "%s: node supports %d rates, driver handles "
4221 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4222 		return;
4223 	}
4224 	if (nrates == 0) {
4225 		device_printf(sc->sc_dev,
4226 		    "%s: node supports 0 rates, odd!\n", __func__);
4227 		return;
4228 	}
4229 	nrates = imin(rix + 1, nrates);
4230 
4231 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4232 	    "%s: nrates=%d\n", __func__, nrates);
4233 
4234 	/* then construct a lq_cmd based on those */
4235 	memset(lq, 0, sizeof(*lq));
4236 	lq->sta_id = IWM_STATION_ID;
4237 
4238 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4239 	if (ni->ni_flags & IEEE80211_NODE_HT)
4240 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4241 
4242 	/*
4243 	 * are these used? (we don't do SISO or MIMO)
4244 	 * need to set them to non-zero, though, or we get an error.
4245 	 */
4246 	lq->single_stream_ant_msk = 1;
4247 	lq->dual_stream_ant_msk = 1;
4248 
4249 	/*
4250 	 * Build the actual rate selection table.
4251 	 * The lowest bits are the rates.  Additionally,
4252 	 * CCK needs bit 9 to be set.  The rest of the bits
4253 	 * we add to the table select the tx antenna
4254 	 * Note that we add the rates in the highest rate first
4255 	 * (opposite of ni_rates).
4256 	 */
4257 	for (i = 0; i < nrates; i++) {
4258 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4259 		int nextant;
4260 
4261 		/* Map 802.11 rate to HW rate index. */
4262 		ridx = iwm_rate2ridx(sc, rate);
4263 		if (ridx == -1)
4264 			continue;
4265 
4266 		if (txant == 0)
4267 			txant = iwm_mvm_get_valid_tx_ant(sc);
4268 		nextant = 1<<(ffs(txant)-1);
4269 		txant &= ~nextant;
4270 
4271 		tab = iwm_rates[ridx].plcp;
4272 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4273 		if (IWM_RIDX_IS_CCK(ridx))
4274 			tab |= IWM_RATE_MCS_CCK_MSK;
4275 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4276 		    "station rate i=%d, rate=%d, hw=%x\n",
4277 		    i, iwm_rates[ridx].rate, tab);
4278 		lq->rs_table[i] = htole32(tab);
4279 	}
4280 	/* then fill the rest with the lowest possible rate */
4281 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4282 		KASSERT(tab != 0, ("invalid tab"));
4283 		lq->rs_table[i] = htole32(tab);
4284 	}
4285 }
4286 
4287 static int
4288 iwm_media_change(struct ifnet *ifp)
4289 {
4290 	struct ieee80211vap *vap = ifp->if_softc;
4291 	struct ieee80211com *ic = vap->iv_ic;
4292 	struct iwm_softc *sc = ic->ic_softc;
4293 	int error;
4294 
4295 	error = ieee80211_media_change(ifp);
4296 	if (error != ENETRESET)
4297 		return error;
4298 
4299 	IWM_LOCK(sc);
4300 	if (ic->ic_nrunning > 0) {
4301 		iwm_stop(sc);
4302 		iwm_init(sc);
4303 	}
4304 	IWM_UNLOCK(sc);
4305 	return error;
4306 }
4307 
4308 
4309 static int
4310 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4311 {
4312 	struct iwm_vap *ivp = IWM_VAP(vap);
4313 	struct ieee80211com *ic = vap->iv_ic;
4314 	struct iwm_softc *sc = ic->ic_softc;
4315 	struct iwm_node *in;
4316 	int error;
4317 
4318 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4319 	    "switching state %s -> %s\n",
4320 	    ieee80211_state_name[vap->iv_state],
4321 	    ieee80211_state_name[nstate]);
4322 	IEEE80211_UNLOCK(ic);
4323 	IWM_LOCK(sc);
4324 
4325 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4326 		iwm_led_blink_stop(sc);
4327 
4328 	/* disable beacon filtering if we're hopping out of RUN */
4329 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4330 		iwm_mvm_disable_beacon_filter(sc);
4331 
4332 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4333 			in->in_assoc = 0;
4334 
4335 		if (nstate == IEEE80211_S_INIT) {
4336 			IWM_UNLOCK(sc);
4337 			IEEE80211_LOCK(ic);
4338 			error = ivp->iv_newstate(vap, nstate, arg);
4339 			IEEE80211_UNLOCK(ic);
4340 			IWM_LOCK(sc);
4341 			iwm_release(sc, NULL);
4342 			IWM_UNLOCK(sc);
4343 			IEEE80211_LOCK(ic);
4344 			return error;
4345 		}
4346 
4347 		/*
4348 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4349 		 * above then the card will be completely reinitialized,
4350 		 * so the driver must do everything necessary to bring the card
4351 		 * from INIT to SCAN.
4352 		 *
4353 		 * Additionally, upon receiving deauth frame from AP,
4354 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4355 		 * state. This will also fail with this driver, so bring the FSM
4356 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4357 		 *
4358 		 * XXX TODO: fix this for FreeBSD!
4359 		 */
4360 		if (nstate == IEEE80211_S_SCAN ||
4361 		    nstate == IEEE80211_S_AUTH ||
4362 		    nstate == IEEE80211_S_ASSOC) {
4363 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4364 			    "Force transition to INIT; MGT=%d\n", arg);
4365 			IWM_UNLOCK(sc);
4366 			IEEE80211_LOCK(ic);
4367 			/* Always pass arg as -1 since we can't Tx right now. */
4368 			/*
4369 			 * XXX arg is just ignored anyway when transitioning
4370 			 *     to IEEE80211_S_INIT.
4371 			 */
4372 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4373 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4374 			    "Going INIT->SCAN\n");
4375 			nstate = IEEE80211_S_SCAN;
4376 			IEEE80211_UNLOCK(ic);
4377 			IWM_LOCK(sc);
4378 		}
4379 	}
4380 
4381 	switch (nstate) {
4382 	case IEEE80211_S_INIT:
4383 	case IEEE80211_S_SCAN:
4384 		if (vap->iv_state == IEEE80211_S_AUTH ||
4385 		    vap->iv_state == IEEE80211_S_ASSOC) {
4386 			int myerr;
4387 			IWM_UNLOCK(sc);
4388 			IEEE80211_LOCK(ic);
4389 			myerr = ivp->iv_newstate(vap, nstate, arg);
4390 			IEEE80211_UNLOCK(ic);
4391 			IWM_LOCK(sc);
4392 			error = iwm_mvm_rm_sta(sc, vap, FALSE);
4393                         if (error) {
4394                                 device_printf(sc->sc_dev,
4395 				    "%s: Failed to remove station: %d\n",
4396 				    __func__, error);
4397 			}
4398 			error = iwm_mvm_mac_ctxt_changed(sc, vap);
4399                         if (error) {
4400                                 device_printf(sc->sc_dev,
4401                                     "%s: Failed to change mac context: %d\n",
4402                                     __func__, error);
4403                         }
4404                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4405                         if (error) {
4406                                 device_printf(sc->sc_dev,
4407                                     "%s: Failed to remove channel ctx: %d\n",
4408                                     __func__, error);
4409                         }
4410 			ivp->phy_ctxt = NULL;
4411 			error = iwm_mvm_power_update_mac(sc);
4412 			if (error != 0) {
4413 				device_printf(sc->sc_dev,
4414 				    "%s: failed to update power management\n",
4415 				    __func__);
4416 			}
4417 			IWM_UNLOCK(sc);
4418 			IEEE80211_LOCK(ic);
4419 			return myerr;
4420 		}
4421 		break;
4422 
4423 	case IEEE80211_S_AUTH:
4424 		if ((error = iwm_auth(vap, sc)) != 0) {
4425 			device_printf(sc->sc_dev,
4426 			    "%s: could not move to auth state: %d\n",
4427 			    __func__, error);
4428 		}
4429 		break;
4430 
4431 	case IEEE80211_S_ASSOC:
4432 		/*
4433 		 * EBS may be disabled due to previous failures reported by FW.
4434 		 * Reset EBS status here assuming environment has been changed.
4435 		 */
4436 		sc->last_ebs_successful = TRUE;
4437 		break;
4438 
4439 	case IEEE80211_S_RUN:
4440 		in = IWM_NODE(vap->iv_bss);
4441 		/* Update the association state, now we have it all */
4442 		/* (eg associd comes in at this point */
4443 		error = iwm_mvm_update_sta(sc, in);
4444 		if (error != 0) {
4445 			device_printf(sc->sc_dev,
4446 			    "%s: failed to update STA\n", __func__);
4447 			IWM_UNLOCK(sc);
4448 			IEEE80211_LOCK(ic);
4449 			return error;
4450 		}
4451 		in->in_assoc = 1;
4452 		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4453 		if (error != 0) {
4454 			device_printf(sc->sc_dev,
4455 			    "%s: failed to update MAC: %d\n", __func__, error);
4456 		}
4457 
4458 		iwm_mvm_sf_update(sc, vap, FALSE);
4459 		iwm_mvm_enable_beacon_filter(sc, ivp);
4460 		iwm_mvm_power_update_mac(sc);
4461 		iwm_mvm_update_quotas(sc, ivp);
4462 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4463 		iwm_setrates(sc, in, rix);
4464 
4465 		if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4466 			device_printf(sc->sc_dev,
4467 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4468 		}
4469 
4470 		iwm_mvm_led_enable(sc);
4471 		break;
4472 
4473 	default:
4474 		break;
4475 	}
4476 	IWM_UNLOCK(sc);
4477 	IEEE80211_LOCK(ic);
4478 
4479 	return (ivp->iv_newstate(vap, nstate, arg));
4480 }
4481 
4482 void
4483 iwm_endscan_cb(void *arg, int pending)
4484 {
4485 	struct iwm_softc *sc = arg;
4486 	struct ieee80211com *ic = &sc->sc_ic;
4487 
4488 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4489 	    "%s: scan ended\n",
4490 	    __func__);
4491 
4492 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4493 }
4494 
4495 static int
4496 iwm_send_bt_init_conf(struct iwm_softc *sc)
4497 {
4498 	struct iwm_bt_coex_cmd bt_cmd;
4499 
4500 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4501 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4502 
4503 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4504 	    &bt_cmd);
4505 }
4506 
4507 static boolean_t
4508 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4509 {
4510 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4511 	boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa,
4512 					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4513 
4514 	if (iwm_lar_disable)
4515 		return FALSE;
4516 
4517 	/*
4518 	 * Enable LAR only if it is supported by the FW (TLV) &&
4519 	 * enabled in the NVM
4520 	 */
4521 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4522 		return nvm_lar && tlv_lar;
4523 	else
4524 		return tlv_lar;
4525 }
4526 
4527 static boolean_t
4528 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4529 {
4530 	return fw_has_api(&sc->sc_fw.ucode_capa,
4531 			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4532 	       fw_has_capa(&sc->sc_fw.ucode_capa,
4533 			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4534 }
4535 
4536 static int
4537 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4538 {
4539 	struct iwm_mcc_update_cmd mcc_cmd;
4540 	struct iwm_host_cmd hcmd = {
4541 		.id = IWM_MCC_UPDATE_CMD,
4542 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4543 		.data = { &mcc_cmd },
4544 	};
4545 	int ret;
4546 #ifdef IWM_DEBUG
4547 	struct iwm_rx_packet *pkt;
4548 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4549 	struct iwm_mcc_update_resp *mcc_resp;
4550 	int n_channels;
4551 	uint16_t mcc;
4552 #endif
4553 	int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa,
4554 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4555 
4556 	if (!iwm_mvm_is_lar_supported(sc)) {
4557 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4558 		    __func__);
4559 		return 0;
4560 	}
4561 
4562 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4563 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4564 	if (iwm_mvm_is_wifi_mcc_supported(sc))
4565 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4566 	else
4567 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4568 
4569 	if (resp_v2)
4570 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4571 	else
4572 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4573 
4574 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4575 	    "send MCC update to FW with '%c%c' src = %d\n",
4576 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4577 
4578 	ret = iwm_send_cmd(sc, &hcmd);
4579 	if (ret)
4580 		return ret;
4581 
4582 #ifdef IWM_DEBUG
4583 	pkt = hcmd.resp_pkt;
4584 
4585 	/* Extract MCC response */
4586 	if (resp_v2) {
4587 		mcc_resp = (void *)pkt->data;
4588 		mcc = mcc_resp->mcc;
4589 		n_channels =  le32toh(mcc_resp->n_channels);
4590 	} else {
4591 		mcc_resp_v1 = (void *)pkt->data;
4592 		mcc = mcc_resp_v1->mcc;
4593 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4594 	}
4595 
4596 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4597 	if (mcc == 0)
4598 		mcc = 0x3030;  /* "00" - world */
4599 
4600 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4601 	    "regulatory domain '%c%c' (%d channels available)\n",
4602 	    mcc >> 8, mcc & 0xff, n_channels);
4603 #endif
4604 	iwm_free_resp(sc, &hcmd);
4605 
4606 	return 0;
4607 }
4608 
4609 static void
4610 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4611 {
4612 	struct iwm_host_cmd cmd = {
4613 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4614 		.len = { sizeof(uint32_t), },
4615 		.data = { &backoff, },
4616 	};
4617 
4618 	if (iwm_send_cmd(sc, &cmd) != 0) {
4619 		device_printf(sc->sc_dev,
4620 		    "failed to change thermal tx backoff\n");
4621 	}
4622 }
4623 
4624 static int
4625 iwm_init_hw(struct iwm_softc *sc)
4626 {
4627 	struct ieee80211com *ic = &sc->sc_ic;
4628 	int error, i, ac;
4629 
4630 	sc->sf_state = IWM_SF_UNINIT;
4631 
4632 	if ((error = iwm_start_hw(sc)) != 0) {
4633 		kprintf("iwm_start_hw: failed %d\n", error);
4634 		return error;
4635 	}
4636 
4637 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4638 		kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4639 		return error;
4640 	}
4641 
4642 	/*
4643 	 * should stop and start HW since that INIT
4644 	 * image just loaded
4645 	 */
4646 	iwm_stop_device(sc);
4647 	sc->sc_ps_disabled = FALSE;
4648 	if ((error = iwm_start_hw(sc)) != 0) {
4649 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4650 		return error;
4651 	}
4652 
4653 	/* omstart, this time with the regular firmware */
4654 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4655 	if (error) {
4656 		device_printf(sc->sc_dev, "could not load firmware\n");
4657 		goto error;
4658 	}
4659 
4660 	error = iwm_mvm_sf_update(sc, NULL, FALSE);
4661 	if (error)
4662 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4663 
4664 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4665 		device_printf(sc->sc_dev, "bt init conf failed\n");
4666 		goto error;
4667 	}
4668 
4669 	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4670 	if (error != 0) {
4671 		device_printf(sc->sc_dev, "antenna config failed\n");
4672 		goto error;
4673 	}
4674 
4675 	/* Send phy db control command and then phy db calibration */
4676 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4677 		goto error;
4678 
4679 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4680 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4681 		goto error;
4682 	}
4683 
4684 	/* Add auxiliary station for scanning */
4685 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4686 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4687 		goto error;
4688 	}
4689 
4690 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4691 		/*
4692 		 * The channel used here isn't relevant as it's
4693 		 * going to be overwritten in the other flows.
4694 		 * For now use the first channel we have.
4695 		 */
4696 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4697 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4698 			goto error;
4699 	}
4700 
4701 	/* Initialize tx backoffs to the minimum. */
4702 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4703 		iwm_mvm_tt_tx_backoff(sc, 0);
4704 
4705 	if (iwm_mvm_config_ltr(sc) != 0)
4706 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4707 
4708 	error = iwm_mvm_power_update_device(sc);
4709 	if (error)
4710 		goto error;
4711 
4712 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4713 		goto error;
4714 
4715 	if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4716 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4717 			goto error;
4718 	}
4719 
4720 	/* Enable Tx queues. */
4721 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4722 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4723 		    iwm_mvm_ac_to_tx_fifo[ac]);
4724 		if (error)
4725 			goto error;
4726 	}
4727 
4728 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4729 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4730 		goto error;
4731 	}
4732 
4733 	return 0;
4734 
4735  error:
4736 	iwm_stop_device(sc);
4737 	return error;
4738 }
4739 
4740 /* Allow multicast from our BSSID. */
4741 static int
4742 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4743 {
4744 	struct ieee80211_node *ni = vap->iv_bss;
4745 	struct iwm_mcast_filter_cmd *cmd;
4746 	size_t size;
4747 	int error;
4748 
4749 	size = roundup(sizeof(*cmd), 4);
4750 	cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4751 	if (cmd == NULL)
4752 		return ENOMEM;
4753 	cmd->filter_own = 1;
4754 	cmd->port_id = 0;
4755 	cmd->count = 0;
4756 	cmd->pass_all = 1;
4757 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4758 
4759 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4760 	    IWM_CMD_SYNC, size, cmd);
4761 	kfree(cmd, M_DEVBUF);
4762 
4763 	return (error);
4764 }
4765 
4766 /*
4767  * ifnet interfaces
4768  */
4769 
4770 static void
4771 iwm_init(struct iwm_softc *sc)
4772 {
4773 	int error;
4774 
4775 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4776 		return;
4777 	}
4778 	sc->sc_generation++;
4779 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4780 
4781 	if ((error = iwm_init_hw(sc)) != 0) {
4782 		kprintf("iwm_init_hw failed %d\n", error);
4783 		iwm_stop(sc);
4784 		return;
4785 	}
4786 
4787 	/*
4788 	 * Ok, firmware loaded and we are jogging
4789 	 */
4790 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4791 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4792 }
4793 
4794 static int
4795 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4796 {
4797 	struct iwm_softc *sc;
4798 	int error;
4799 
4800 	sc = ic->ic_softc;
4801 
4802 	IWM_LOCK(sc);
4803 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4804 		IWM_UNLOCK(sc);
4805 		return (ENXIO);
4806 	}
4807 	error = mbufq_enqueue(&sc->sc_snd, m);
4808 	if (error) {
4809 		IWM_UNLOCK(sc);
4810 		return (error);
4811 	}
4812 	iwm_start(sc);
4813 	IWM_UNLOCK(sc);
4814 	return (0);
4815 }
4816 
4817 /*
4818  * Dequeue packets from sendq and call send.
4819  */
4820 static void
4821 iwm_start(struct iwm_softc *sc)
4822 {
4823 	struct ieee80211_node *ni;
4824 	struct mbuf *m;
4825 	int ac = 0;
4826 
4827 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4828 	while (sc->qfullmsk == 0 &&
4829 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4830 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4831 		if (iwm_tx(sc, m, ni, ac) != 0) {
4832 			if_inc_counter(ni->ni_vap->iv_ifp,
4833 			    IFCOUNTER_OERRORS, 1);
4834 			ieee80211_free_node(ni);
4835 			continue;
4836 		}
4837 		sc->sc_tx_timer = 15;
4838 	}
4839 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4840 }
4841 
4842 static void
4843 iwm_stop(struct iwm_softc *sc)
4844 {
4845 
4846 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4847 	sc->sc_flags |= IWM_FLAG_STOPPED;
4848 	sc->sc_generation++;
4849 	iwm_led_blink_stop(sc);
4850 	sc->sc_tx_timer = 0;
4851 	iwm_stop_device(sc);
4852 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4853 }
4854 
4855 static void
4856 iwm_watchdog(void *arg)
4857 {
4858 	struct iwm_softc *sc = arg;
4859 
4860 	if (sc->sc_tx_timer > 0) {
4861 		if (--sc->sc_tx_timer == 0) {
4862 			device_printf(sc->sc_dev, "device timeout\n");
4863 #ifdef IWM_DEBUG
4864 			iwm_nic_error(sc);
4865 #endif
4866 			iwm_stop(sc);
4867 #if defined(__DragonFly__)
4868 			++sc->sc_ic.ic_oerrors;
4869 #else
4870 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4871 #endif
4872 			return;
4873 		}
4874 	}
4875 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4876 }
4877 
4878 static void
4879 iwm_parent(struct ieee80211com *ic)
4880 {
4881 	struct iwm_softc *sc = ic->ic_softc;
4882 	int startall = 0;
4883 
4884 	IWM_LOCK(sc);
4885 	if (ic->ic_nrunning > 0) {
4886 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4887 			iwm_init(sc);
4888 			startall = 1;
4889 		}
4890 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4891 		iwm_stop(sc);
4892 	IWM_UNLOCK(sc);
4893 	if (startall)
4894 		ieee80211_start_all(ic);
4895 }
4896 
4897 /*
4898  * The interrupt side of things
4899  */
4900 
4901 /*
4902  * error dumping routines are from iwlwifi/mvm/utils.c
4903  */
4904 
4905 /*
4906  * Note: This structure is read from the device with IO accesses,
4907  * and the reading already does the endian conversion. As it is
4908  * read with uint32_t-sized accesses, any members with a different size
4909  * need to be ordered correctly though!
4910  */
4911 struct iwm_error_event_table {
4912 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4913 	uint32_t error_id;		/* type of error */
4914 	uint32_t trm_hw_status0;	/* TRM HW status */
4915 	uint32_t trm_hw_status1;	/* TRM HW status */
4916 	uint32_t blink2;		/* branch link */
4917 	uint32_t ilink1;		/* interrupt link */
4918 	uint32_t ilink2;		/* interrupt link */
4919 	uint32_t data1;		/* error-specific data */
4920 	uint32_t data2;		/* error-specific data */
4921 	uint32_t data3;		/* error-specific data */
4922 	uint32_t bcon_time;		/* beacon timer */
4923 	uint32_t tsf_low;		/* network timestamp function timer */
4924 	uint32_t tsf_hi;		/* network timestamp function timer */
4925 	uint32_t gp1;		/* GP1 timer register */
4926 	uint32_t gp2;		/* GP2 timer register */
4927 	uint32_t fw_rev_type;	/* firmware revision type */
4928 	uint32_t major;		/* uCode version major */
4929 	uint32_t minor;		/* uCode version minor */
4930 	uint32_t hw_ver;		/* HW Silicon version */
4931 	uint32_t brd_ver;		/* HW board version */
4932 	uint32_t log_pc;		/* log program counter */
4933 	uint32_t frame_ptr;		/* frame pointer */
4934 	uint32_t stack_ptr;		/* stack pointer */
4935 	uint32_t hcmd;		/* last host command header */
4936 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4937 				 * rxtx_flag */
4938 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4939 				 * host_flag */
4940 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4941 				 * enc_flag */
4942 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4943 				 * time_flag */
4944 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4945 				 * wico interrupt */
4946 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4947 	uint32_t wait_event;		/* wait event() caller address */
4948 	uint32_t l2p_control;	/* L2pControlField */
4949 	uint32_t l2p_duration;	/* L2pDurationField */
4950 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4951 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4952 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4953 				 * (LMPM_PMG_SEL) */
4954 	uint32_t u_timestamp;	/* indicate when the date and time of the
4955 				 * compilation */
4956 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4957 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4958 
4959 /*
4960  * UMAC error struct - relevant starting from family 8000 chip.
4961  * Note: This structure is read from the device with IO accesses,
4962  * and the reading already does the endian conversion. As it is
4963  * read with u32-sized accesses, any members with a different size
4964  * need to be ordered correctly though!
4965  */
4966 struct iwm_umac_error_event_table {
4967 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4968 	uint32_t error_id;	/* type of error */
4969 	uint32_t blink1;	/* branch link */
4970 	uint32_t blink2;	/* branch link */
4971 	uint32_t ilink1;	/* interrupt link */
4972 	uint32_t ilink2;	/* interrupt link */
4973 	uint32_t data1;		/* error-specific data */
4974 	uint32_t data2;		/* error-specific data */
4975 	uint32_t data3;		/* error-specific data */
4976 	uint32_t umac_major;
4977 	uint32_t umac_minor;
4978 	uint32_t frame_pointer;	/* core register 27*/
4979 	uint32_t stack_pointer;	/* core register 28 */
4980 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4981 	uint32_t nic_isr_pref;	/* ISR status register */
4982 } __packed;
4983 
4984 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4985 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4986 
4987 #ifdef IWM_DEBUG
4988 struct {
4989 	const char *name;
4990 	uint8_t num;
4991 } advanced_lookup[] = {
4992 	{ "NMI_INTERRUPT_WDG", 0x34 },
4993 	{ "SYSASSERT", 0x35 },
4994 	{ "UCODE_VERSION_MISMATCH", 0x37 },
4995 	{ "BAD_COMMAND", 0x38 },
4996 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4997 	{ "FATAL_ERROR", 0x3D },
4998 	{ "NMI_TRM_HW_ERR", 0x46 },
4999 	{ "NMI_INTERRUPT_TRM", 0x4C },
5000 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5001 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5002 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5003 	{ "NMI_INTERRUPT_HOST", 0x66 },
5004 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5005 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5006 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5007 	{ "ADVANCED_SYSASSERT", 0 },
5008 };
5009 
5010 static const char *
5011 iwm_desc_lookup(uint32_t num)
5012 {
5013 	int i;
5014 
5015 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5016 		if (advanced_lookup[i].num == num)
5017 			return advanced_lookup[i].name;
5018 
5019 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5020 	return advanced_lookup[i].name;
5021 }
5022 
5023 static void
5024 iwm_nic_umac_error(struct iwm_softc *sc)
5025 {
5026 	struct iwm_umac_error_event_table table;
5027 	uint32_t base;
5028 
5029 	base = sc->umac_error_event_table;
5030 
5031 	if (base < 0x800000) {
5032 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5033 		    base);
5034 		return;
5035 	}
5036 
5037 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5038 		device_printf(sc->sc_dev, "reading errlog failed\n");
5039 		return;
5040 	}
5041 
5042 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5043 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5044 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5045 		    sc->sc_flags, table.valid);
5046 	}
5047 
5048 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5049 		iwm_desc_lookup(table.error_id));
5050 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5051 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5052 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5053 	    table.ilink1);
5054 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5055 	    table.ilink2);
5056 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5057 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5058 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5059 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5060 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5061 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5062 	    table.frame_pointer);
5063 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5064 	    table.stack_pointer);
5065 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5066 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5067 	    table.nic_isr_pref);
5068 }
5069 
5070 /*
5071  * Support for dumping the error log seemed like a good idea ...
5072  * but it's mostly hex junk and the only sensible thing is the
5073  * hw/ucode revision (which we know anyway).  Since it's here,
5074  * I'll just leave it in, just in case e.g. the Intel guys want to
5075  * help us decipher some "ADVANCED_SYSASSERT" later.
5076  */
5077 static void
5078 iwm_nic_error(struct iwm_softc *sc)
5079 {
5080 	struct iwm_error_event_table table;
5081 	uint32_t base;
5082 
5083 	device_printf(sc->sc_dev, "dumping device error log\n");
5084 	base = sc->error_event_table;
5085 	if (base < 0x800000) {
5086 		device_printf(sc->sc_dev,
5087 		    "Invalid error log pointer 0x%08x\n", base);
5088 		return;
5089 	}
5090 
5091 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5092 		device_printf(sc->sc_dev, "reading errlog failed\n");
5093 		return;
5094 	}
5095 
5096 	if (!table.valid) {
5097 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5098 		return;
5099 	}
5100 
5101 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5102 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5103 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5104 		    sc->sc_flags, table.valid);
5105 	}
5106 
5107 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5108 	    iwm_desc_lookup(table.error_id));
5109 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5110 	    table.trm_hw_status0);
5111 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5112 	    table.trm_hw_status1);
5113 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5114 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5115 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5116 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5117 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5118 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5119 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5120 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5121 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5122 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5123 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5124 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5125 	    table.fw_rev_type);
5126 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5127 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5128 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5129 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5130 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5131 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5132 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5133 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5134 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5135 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5136 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5137 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5138 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5139 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5140 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5141 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5142 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5143 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5144 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5145 
5146 	if (sc->umac_error_event_table)
5147 		iwm_nic_umac_error(sc);
5148 }
5149 #endif
5150 
5151 static void
5152 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5153 {
5154 	struct ieee80211com *ic = &sc->sc_ic;
5155 	struct iwm_cmd_response *cresp;
5156 	struct mbuf *m1;
5157 	uint32_t offset = 0;
5158 	uint32_t maxoff = IWM_RBUF_SIZE;
5159 	uint32_t nextoff;
5160 	boolean_t stolen = FALSE;
5161 
5162 #define HAVEROOM(a)	\
5163     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5164 
5165 	while (HAVEROOM(offset)) {
5166 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5167 		    offset);
5168 		int qid, idx, code, len;
5169 
5170 		qid = pkt->hdr.qid;
5171 		idx = pkt->hdr.idx;
5172 
5173 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5174 
5175 		/*
5176 		 * randomly get these from the firmware, no idea why.
5177 		 * they at least seem harmless, so just ignore them for now
5178 		 */
5179 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5180 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5181 			break;
5182 		}
5183 
5184 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5185 		    "rx packet qid=%d idx=%d type=%x\n",
5186 		    qid & ~0x80, pkt->hdr.idx, code);
5187 
5188 		len = iwm_rx_packet_len(pkt);
5189 		len += sizeof(uint32_t); /* account for status word */
5190 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5191 
5192 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5193 
5194 		switch (code) {
5195 		case IWM_REPLY_RX_PHY_CMD:
5196 			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5197 			break;
5198 
5199 		case IWM_REPLY_RX_MPDU_CMD: {
5200 			/*
5201 			 * If this is the last frame in the RX buffer, we
5202 			 * can directly feed the mbuf to the sharks here.
5203 			 */
5204 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5205 			    struct iwm_rx_packet *, nextoff);
5206 			if (!HAVEROOM(nextoff) ||
5207 			    (nextpkt->hdr.code == 0 &&
5208 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5209 			     nextpkt->hdr.idx == 0) ||
5210 			    (nextpkt->len_n_flags ==
5211 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5212 				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5213 					stolen = FALSE;
5214 					/* Make sure we abort the loop */
5215 					nextoff = maxoff;
5216 				}
5217 				break;
5218 			}
5219 
5220 			/*
5221 			 * Use m_copym instead of m_split, because that
5222 			 * makes it easier to keep a valid rx buffer in
5223 			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5224 			 *
5225 			 * We need to start m_copym() at offset 0, to get the
5226 			 * M_PKTHDR flag preserved.
5227 			 */
5228 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5229 			if (m1) {
5230 				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5231 					stolen = TRUE;
5232 				else
5233 					m_freem(m1);
5234 			}
5235 			break;
5236 		}
5237 
5238 		case IWM_TX_CMD:
5239 			iwm_mvm_rx_tx_cmd(sc, pkt);
5240 			break;
5241 
5242 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5243 			struct iwm_missed_beacons_notif *resp;
5244 			int missed;
5245 
5246 			/* XXX look at mac_id to determine interface ID */
5247 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5248 
5249 			resp = (void *)pkt->data;
5250 			missed = le32toh(resp->consec_missed_beacons);
5251 
5252 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5253 			    "%s: MISSED_BEACON: mac_id=%d, "
5254 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5255 			    "num_rx=%d\n",
5256 			    __func__,
5257 			    le32toh(resp->mac_id),
5258 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5259 			    le32toh(resp->consec_missed_beacons),
5260 			    le32toh(resp->num_expected_beacons),
5261 			    le32toh(resp->num_recvd_beacons));
5262 
5263 			/* Be paranoid */
5264 			if (vap == NULL)
5265 				break;
5266 
5267 			/* XXX no net80211 locking? */
5268 			if (vap->iv_state == IEEE80211_S_RUN &&
5269 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5270 				if (missed > vap->iv_bmissthreshold) {
5271 					/* XXX bad locking; turn into task */
5272 					IWM_UNLOCK(sc);
5273 					ieee80211_beacon_miss(ic);
5274 					IWM_LOCK(sc);
5275 				}
5276 			}
5277 
5278 			break; }
5279 
5280 		case IWM_MFUART_LOAD_NOTIFICATION:
5281 			break;
5282 
5283 		case IWM_MVM_ALIVE:
5284 			break;
5285 
5286 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5287 			break;
5288 
5289 		case IWM_STATISTICS_NOTIFICATION:
5290 			iwm_mvm_handle_rx_statistics(sc, pkt);
5291 			break;
5292 
5293 		case IWM_NVM_ACCESS_CMD:
5294 		case IWM_MCC_UPDATE_CMD:
5295 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5296 				memcpy(sc->sc_cmd_resp,
5297 				    pkt, sizeof(sc->sc_cmd_resp));
5298 			}
5299 			break;
5300 
5301 		case IWM_MCC_CHUB_UPDATE_CMD: {
5302 			struct iwm_mcc_chub_notif *notif;
5303 			notif = (void *)pkt->data;
5304 
5305 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5306 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5307 			sc->sc_fw_mcc[2] = '\0';
5308 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5309 			    "fw source %d sent CC '%s'\n",
5310 			    notif->source_id, sc->sc_fw_mcc);
5311 			break;
5312 		}
5313 
5314 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5315 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5316 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5317 			struct iwm_dts_measurement_notif_v1 *notif;
5318 
5319 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5320 				device_printf(sc->sc_dev,
5321 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5322 				break;
5323 			}
5324 			notif = (void *)pkt->data;
5325 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5326 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5327 			    notif->temp);
5328 			break;
5329 		}
5330 
5331 		case IWM_PHY_CONFIGURATION_CMD:
5332 		case IWM_TX_ANT_CONFIGURATION_CMD:
5333 		case IWM_ADD_STA:
5334 		case IWM_MAC_CONTEXT_CMD:
5335 		case IWM_REPLY_SF_CFG_CMD:
5336 		case IWM_POWER_TABLE_CMD:
5337 		case IWM_LTR_CONFIG:
5338 		case IWM_PHY_CONTEXT_CMD:
5339 		case IWM_BINDING_CONTEXT_CMD:
5340 		case IWM_TIME_EVENT_CMD:
5341 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5342 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5343 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5344 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5345 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5346 		case IWM_REPLY_BEACON_FILTERING_CMD:
5347 		case IWM_MAC_PM_POWER_TABLE:
5348 		case IWM_TIME_QUOTA_CMD:
5349 		case IWM_REMOVE_STA:
5350 		case IWM_TXPATH_FLUSH:
5351 		case IWM_LQ_CMD:
5352 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5353 				 IWM_FW_PAGING_BLOCK_CMD):
5354 		case IWM_BT_CONFIG:
5355 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5356 			cresp = (void *)pkt->data;
5357 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5358 				memcpy(sc->sc_cmd_resp,
5359 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5360 			}
5361 			break;
5362 
5363 		/* ignore */
5364 		case IWM_PHY_DB_CMD:
5365 			break;
5366 
5367 		case IWM_INIT_COMPLETE_NOTIF:
5368 			break;
5369 
5370 		case IWM_SCAN_OFFLOAD_COMPLETE:
5371 			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5372 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5373 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5374 				ieee80211_runtask(ic, &sc->sc_es_task);
5375 			}
5376 			break;
5377 
5378 		case IWM_SCAN_ITERATION_COMPLETE: {
5379 			struct iwm_lmac_scan_complete_notif *notif;
5380 			notif = (void *)pkt->data;
5381 			break;
5382 		}
5383 
5384 		case IWM_SCAN_COMPLETE_UMAC:
5385 			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5386 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5387 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5388 				ieee80211_runtask(ic, &sc->sc_es_task);
5389 			}
5390 			break;
5391 
5392 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5393 			struct iwm_umac_scan_iter_complete_notif *notif;
5394 			notif = (void *)pkt->data;
5395 
5396 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5397 			    "complete, status=0x%x, %d channels scanned\n",
5398 			    notif->status, notif->scanned_channels);
5399 			break;
5400 		}
5401 
5402 		case IWM_REPLY_ERROR: {
5403 			struct iwm_error_resp *resp;
5404 			resp = (void *)pkt->data;
5405 
5406 			device_printf(sc->sc_dev,
5407 			    "firmware error 0x%x, cmd 0x%x\n",
5408 			    le32toh(resp->error_type),
5409 			    resp->cmd_id);
5410 			break;
5411 		}
5412 
5413 		case IWM_TIME_EVENT_NOTIFICATION: {
5414 			struct iwm_time_event_notif *notif;
5415 			notif = (void *)pkt->data;
5416 
5417 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5418 			    "TE notif status = 0x%x action = 0x%x\n",
5419 			    notif->status, notif->action);
5420 			break;
5421 		}
5422 
5423 		/*
5424 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5425 		 * messages. Just ignore them for now.
5426 		 */
5427 		case IWM_DEBUG_LOG_MSG:
5428 			break;
5429 
5430 		case IWM_MCAST_FILTER_CMD:
5431 			break;
5432 
5433 		case IWM_SCD_QUEUE_CFG: {
5434 			struct iwm_scd_txq_cfg_rsp *rsp;
5435 			rsp = (void *)pkt->data;
5436 
5437 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5438 			    "queue cfg token=0x%x sta_id=%d "
5439 			    "tid=%d scd_queue=%d\n",
5440 			    rsp->token, rsp->sta_id, rsp->tid,
5441 			    rsp->scd_queue);
5442 			break;
5443 		}
5444 
5445 		default:
5446 			device_printf(sc->sc_dev,
5447 			    "frame %d/%d %x UNHANDLED (this should "
5448 			    "not happen)\n", qid & ~0x80, idx,
5449 			    pkt->len_n_flags);
5450 			break;
5451 		}
5452 
5453 		/*
5454 		 * Why test bit 0x80?  The Linux driver:
5455 		 *
5456 		 * There is one exception:  uCode sets bit 15 when it
5457 		 * originates the response/notification, i.e. when the
5458 		 * response/notification is not a direct response to a
5459 		 * command sent by the driver.  For example, uCode issues
5460 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5461 		 * it is not a direct response to any driver command.
5462 		 *
5463 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5464 		 * uses a slightly different format for pkt->hdr, and "qid"
5465 		 * is actually the upper byte of a two-byte field.
5466 		 */
5467 		if (!(qid & (1 << 7)))
5468 			iwm_cmd_done(sc, pkt);
5469 
5470 		offset = nextoff;
5471 	}
5472 	if (stolen)
5473 		m_freem(m);
5474 #undef HAVEROOM
5475 }
5476 
5477 /*
5478  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5479  * Basic structure from if_iwn
5480  */
5481 static void
5482 iwm_notif_intr(struct iwm_softc *sc)
5483 {
5484 	uint16_t hw;
5485 
5486 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5487 	    BUS_DMASYNC_POSTREAD);
5488 
5489 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5490 
5491 	/*
5492 	 * Process responses
5493 	 */
5494 	while (sc->rxq.cur != hw) {
5495 		struct iwm_rx_ring *ring = &sc->rxq;
5496 		struct iwm_rx_data *data = &ring->data[ring->cur];
5497 
5498 		bus_dmamap_sync(ring->data_dmat, data->map,
5499 		    BUS_DMASYNC_POSTREAD);
5500 
5501 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5502 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5503 		iwm_handle_rxb(sc, data->m);
5504 
5505 		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5506 	}
5507 
5508 	/*
5509 	 * Tell the firmware that it can reuse the ring entries that
5510 	 * we have just processed.
5511 	 * Seems like the hardware gets upset unless we align
5512 	 * the write by 8??
5513 	 */
5514 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5515 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5516 }
5517 
5518 static void
5519 iwm_intr(void *arg)
5520 {
5521 	struct iwm_softc *sc = arg;
5522 	int handled = 0;
5523 	int r1, r2, rv = 0;
5524 	int isperiodic = 0;
5525 
5526 #if defined(__DragonFly__)
5527 	if (sc->sc_mem == NULL) {
5528 		kprintf("iwm_intr: detached\n");
5529 		return;
5530 	}
5531 #endif
5532 	IWM_LOCK(sc);
5533 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5534 
5535 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5536 		uint32_t *ict = sc->ict_dma.vaddr;
5537 		int tmp;
5538 
5539 		tmp = htole32(ict[sc->ict_cur]);
5540 		if (!tmp)
5541 			goto out_ena;
5542 
5543 		/*
5544 		 * ok, there was something.  keep plowing until we have all.
5545 		 */
5546 		r1 = r2 = 0;
5547 		while (tmp) {
5548 			r1 |= tmp;
5549 			ict[sc->ict_cur] = 0;
5550 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5551 			tmp = htole32(ict[sc->ict_cur]);
5552 		}
5553 
5554 		/* this is where the fun begins.  don't ask */
5555 		if (r1 == 0xffffffff)
5556 			r1 = 0;
5557 
5558 		/* i am not expected to understand this */
5559 		if (r1 & 0xc0000)
5560 			r1 |= 0x8000;
5561 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5562 	} else {
5563 		r1 = IWM_READ(sc, IWM_CSR_INT);
5564 		/* "hardware gone" (where, fishing?) */
5565 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5566 			goto out;
5567 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5568 	}
5569 	if (r1 == 0 && r2 == 0) {
5570 		goto out_ena;
5571 	}
5572 
5573 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5574 
5575 	/* Safely ignore these bits for debug checks below */
5576 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5577 
5578 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5579 		int i;
5580 		struct ieee80211com *ic = &sc->sc_ic;
5581 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5582 
5583 #ifdef IWM_DEBUG
5584 		iwm_nic_error(sc);
5585 #endif
5586 		/* Dump driver status (TX and RX rings) while we're here. */
5587 		device_printf(sc->sc_dev, "driver status:\n");
5588 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5589 			struct iwm_tx_ring *ring = &sc->txq[i];
5590 			device_printf(sc->sc_dev,
5591 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5592 			    "queued=%-3d\n",
5593 			    i, ring->qid, ring->cur, ring->queued);
5594 		}
5595 		device_printf(sc->sc_dev,
5596 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5597 		device_printf(sc->sc_dev,
5598 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5599 
5600 		/* Don't stop the device; just do a VAP restart */
5601 		IWM_UNLOCK(sc);
5602 
5603 		if (vap == NULL) {
5604 			kprintf("%s: null vap\n", __func__);
5605 			return;
5606 		}
5607 
5608 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5609 		    "restarting\n", __func__, vap->iv_state);
5610 
5611 		ieee80211_restart_all(ic);
5612 		return;
5613 	}
5614 
5615 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5616 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5617 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5618 		iwm_stop(sc);
5619 		rv = 1;
5620 		goto out;
5621 	}
5622 
5623 	/* firmware chunk loaded */
5624 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5625 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5626 		handled |= IWM_CSR_INT_BIT_FH_TX;
5627 		sc->sc_fw_chunk_done = 1;
5628 		wakeup(&sc->sc_fw);
5629 	}
5630 
5631 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5632 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5633 		if (iwm_check_rfkill(sc)) {
5634 			device_printf(sc->sc_dev,
5635 			    "%s: rfkill switch, disabling interface\n",
5636 			    __func__);
5637 			iwm_stop(sc);
5638 		}
5639 	}
5640 
5641 	/*
5642 	 * The Linux driver uses periodic interrupts to avoid races.
5643 	 * We cargo-cult like it's going out of fashion.
5644 	 */
5645 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5646 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5647 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5648 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5649 			IWM_WRITE_1(sc,
5650 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5651 		isperiodic = 1;
5652 	}
5653 
5654 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5655 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5656 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5657 
5658 		iwm_notif_intr(sc);
5659 
5660 		/* enable periodic interrupt, see above */
5661 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5662 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5663 			    IWM_CSR_INT_PERIODIC_ENA);
5664 	}
5665 
5666 	if (__predict_false(r1 & ~handled))
5667 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5668 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5669 	rv = 1;
5670 
5671  out_ena:
5672 	iwm_restore_interrupts(sc);
5673  out:
5674 	IWM_UNLOCK(sc);
5675 	return;
5676 }
5677 
5678 /*
5679  * Autoconf glue-sniffing
5680  */
5681 #define	PCI_VENDOR_INTEL		0x8086
5682 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5683 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5684 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5685 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5686 #define	PCI_PRODUCT_INTEL_WL_3168	0x24fb
5687 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5688 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5689 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5690 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5691 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5692 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5693 #define	PCI_PRODUCT_INTEL_WL_8265	0x24fd
5694 
5695 static const struct iwm_devices {
5696 	uint16_t		device;
5697 	const struct iwm_cfg	*cfg;
5698 } iwm_devices[] = {
5699 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5700 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5701 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5702 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5703 	{ PCI_PRODUCT_INTEL_WL_3168,   &iwm3168_cfg },
5704 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5705 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5706 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5707 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5708 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5709 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5710 	{ PCI_PRODUCT_INTEL_WL_8265,   &iwm8265_cfg },
5711 };
5712 
5713 static int
5714 iwm_probe(device_t dev)
5715 {
5716 	int i;
5717 
5718 	for (i = 0; i < nitems(iwm_devices); i++) {
5719 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5720 		    pci_get_device(dev) == iwm_devices[i].device) {
5721 			device_set_desc(dev, iwm_devices[i].cfg->name);
5722 			return (BUS_PROBE_DEFAULT);
5723 		}
5724 	}
5725 
5726 	return (ENXIO);
5727 }
5728 
5729 static int
5730 iwm_dev_check(device_t dev)
5731 {
5732 	struct iwm_softc *sc;
5733 	uint16_t devid;
5734 	int i;
5735 
5736 	sc = device_get_softc(dev);
5737 
5738 	devid = pci_get_device(dev);
5739 	for (i = 0; i < NELEM(iwm_devices); i++) {
5740 		if (iwm_devices[i].device == devid) {
5741 			sc->cfg = iwm_devices[i].cfg;
5742 			return (0);
5743 		}
5744 	}
5745 	device_printf(dev, "unknown adapter type\n");
5746 	return ENXIO;
5747 }
5748 
5749 /* PCI registers */
5750 #define PCI_CFG_RETRY_TIMEOUT	0x041
5751 
5752 static int
5753 iwm_pci_attach(device_t dev)
5754 {
5755 	struct iwm_softc *sc;
5756 	int count, error, rid;
5757 	uint16_t reg;
5758 #if defined(__DragonFly__)
5759 	int irq_flags;
5760 #endif
5761 
5762 	sc = device_get_softc(dev);
5763 
5764 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5765 	 * PCI Tx retries from interfering with C3 CPU state */
5766 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5767 
5768 	/* Enable bus-mastering and hardware bug workaround. */
5769 	pci_enable_busmaster(dev);
5770 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5771 	/* if !MSI */
5772 	if (reg & PCIM_STATUS_INTxSTATE) {
5773 		reg &= ~PCIM_STATUS_INTxSTATE;
5774 	}
5775 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5776 
5777 	rid = PCIR_BAR(0);
5778 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5779 	    RF_ACTIVE);
5780 	if (sc->sc_mem == NULL) {
5781 		device_printf(sc->sc_dev, "can't map mem space\n");
5782 		return (ENXIO);
5783 	}
5784 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5785 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5786 
5787 	/* Install interrupt handler. */
5788 	count = 1;
5789 	rid = 0;
5790 #if defined(__DragonFly__)
5791 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5792 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5793 #else
5794 	if (pci_alloc_msi(dev, &count) == 0)
5795 		rid = 1;
5796 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5797 	    (rid != 0 ? 0 : RF_SHAREABLE));
5798 #endif
5799 	if (sc->sc_irq == NULL) {
5800 		device_printf(dev, "can't map interrupt\n");
5801 			return (ENXIO);
5802 	}
5803 #if defined(__DragonFly__)
5804 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5805 			       iwm_intr, sc, &sc->sc_ih,
5806 			       &wlan_global_serializer);
5807 #else
5808 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5809 	    NULL, iwm_intr, sc, &sc->sc_ih);
5810 #endif
5811 	if (sc->sc_ih == NULL) {
5812 		device_printf(dev, "can't establish interrupt");
5813 #if defined(__DragonFly__)
5814                 pci_release_msi(dev);
5815 #endif
5816 			return (ENXIO);
5817 	}
5818 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5819 
5820 	return (0);
5821 }
5822 
5823 static void
5824 iwm_pci_detach(device_t dev)
5825 {
5826 	struct iwm_softc *sc = device_get_softc(dev);
5827 
5828 	if (sc->sc_irq != NULL) {
5829 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5830 		bus_release_resource(dev, SYS_RES_IRQ,
5831 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5832 		pci_release_msi(dev);
5833 #if defined(__DragonFly__)
5834 		sc->sc_irq = NULL;
5835 #endif
5836         }
5837 	if (sc->sc_mem != NULL) {
5838 		bus_release_resource(dev, SYS_RES_MEMORY,
5839 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5840 #if defined(__DragonFly__)
5841 		sc->sc_mem = NULL;
5842 #endif
5843 	}
5844 }
5845 
5846 
5847 
5848 static int
5849 iwm_attach(device_t dev)
5850 {
5851 	struct iwm_softc *sc = device_get_softc(dev);
5852 	struct ieee80211com *ic = &sc->sc_ic;
5853 	int error;
5854 	int txq_i, i;
5855 
5856 	sc->sc_dev = dev;
5857 	sc->sc_attached = 1;
5858 	IWM_LOCK_INIT(sc);
5859 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5860 #if defined(__DragonFly__)
5861 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5862 #else
5863 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5864 #endif
5865 	callout_init(&sc->sc_led_blink_to);
5866 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5867 
5868 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5869 	if (sc->sc_notif_wait == NULL) {
5870 		device_printf(dev, "failed to init notification wait struct\n");
5871 		goto fail;
5872 	}
5873 
5874 	sc->sf_state = IWM_SF_UNINIT;
5875 
5876 	/* Init phy db */
5877 	sc->sc_phy_db = iwm_phy_db_init(sc);
5878 	if (!sc->sc_phy_db) {
5879 		device_printf(dev, "Cannot init phy_db\n");
5880 		goto fail;
5881 	}
5882 
5883 	/* Set EBS as successful as long as not stated otherwise by the FW. */
5884 	sc->last_ebs_successful = TRUE;
5885 
5886 	/* PCI attach */
5887 	error = iwm_pci_attach(dev);
5888 	if (error != 0)
5889 		goto fail;
5890 
5891 	sc->sc_wantresp = -1;
5892 
5893 	/* Match device id */
5894 	error = iwm_dev_check(dev);
5895 	if (error != 0)
5896 		goto fail;
5897 
5898 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5899 	/*
5900 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5901 	 * changed, and now the revision step also includes bit 0-1 (no more
5902 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5903 	 * in the old format.
5904 	 */
5905 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5906 		int ret;
5907 		uint32_t hw_step;
5908 
5909 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5910 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5911 
5912 		if (iwm_prepare_card_hw(sc) != 0) {
5913 			device_printf(dev, "could not initialize hardware\n");
5914 			goto fail;
5915 		}
5916 
5917 		/*
5918 		 * In order to recognize C step the driver should read the
5919 		 * chip version id located at the AUX bus MISC address.
5920 		 */
5921 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5922 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5923 		DELAY(2);
5924 
5925 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5926 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5927 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5928 				   25000);
5929 		if (!ret) {
5930 			device_printf(sc->sc_dev,
5931 			    "Failed to wake up the nic\n");
5932 			goto fail;
5933 		}
5934 
5935 		if (iwm_nic_lock(sc)) {
5936 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5937 			hw_step |= IWM_ENABLE_WFPM;
5938 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5939 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5940 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5941 			if (hw_step == 0x3)
5942 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5943 						(IWM_SILICON_C_STEP << 2);
5944 			iwm_nic_unlock(sc);
5945 		} else {
5946 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5947 			goto fail;
5948 		}
5949 	}
5950 
5951 	/* special-case 7265D, it has the same PCI IDs. */
5952 	if (sc->cfg == &iwm7265_cfg &&
5953 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5954 		sc->cfg = &iwm7265d_cfg;
5955 	}
5956 
5957 	/* Allocate DMA memory for firmware transfers. */
5958 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5959 		device_printf(dev, "could not allocate memory for firmware\n");
5960 		goto fail;
5961 	}
5962 
5963 	/* Allocate "Keep Warm" page. */
5964 	if ((error = iwm_alloc_kw(sc)) != 0) {
5965 		device_printf(dev, "could not allocate keep warm page\n");
5966 		goto fail;
5967 	}
5968 
5969 	/* We use ICT interrupts */
5970 	if ((error = iwm_alloc_ict(sc)) != 0) {
5971 		device_printf(dev, "could not allocate ICT table\n");
5972 		goto fail;
5973 	}
5974 
5975 	/* Allocate TX scheduler "rings". */
5976 	if ((error = iwm_alloc_sched(sc)) != 0) {
5977 		device_printf(dev, "could not allocate TX scheduler rings\n");
5978 		goto fail;
5979 	}
5980 
5981 	/* Allocate TX rings */
5982 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5983 		if ((error = iwm_alloc_tx_ring(sc,
5984 		    &sc->txq[txq_i], txq_i)) != 0) {
5985 			device_printf(dev,
5986 			    "could not allocate TX ring %d\n",
5987 			    txq_i);
5988 			goto fail;
5989 		}
5990 	}
5991 
5992 	/* Allocate RX ring. */
5993 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5994 		device_printf(dev, "could not allocate RX ring\n");
5995 		goto fail;
5996 	}
5997 
5998 	/* Clear pending interrupts. */
5999 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6000 
6001 	ic->ic_softc = sc;
6002 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6003 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6004 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6005 
6006 	/* Set device capabilities. */
6007 	ic->ic_caps =
6008 	    IEEE80211_C_STA |
6009 	    IEEE80211_C_WPA |		/* WPA/RSN */
6010 	    IEEE80211_C_WME |
6011 	    IEEE80211_C_PMGT |
6012 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6013 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6014 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6015 	    ;
6016 	/* Advertise full-offload scanning */
6017 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6018 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6019 		sc->sc_phyctxt[i].id = i;
6020 		sc->sc_phyctxt[i].color = 0;
6021 		sc->sc_phyctxt[i].ref = 0;
6022 		sc->sc_phyctxt[i].channel = NULL;
6023 	}
6024 
6025 	/* Default noise floor */
6026 	sc->sc_noise = -96;
6027 
6028 	/* Max RSSI */
6029 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6030 
6031 #ifdef IWM_DEBUG
6032 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6033 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6034 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6035 #endif
6036 
6037 	error = iwm_read_firmware(sc);
6038 	if (error) {
6039 		goto fail;
6040 	} else if (sc->sc_fw.fw_fp == NULL) {
6041 		/*
6042 		 * XXX Add a solution for properly deferring firmware load
6043 		 *     during bootup.
6044 		 */
6045 		goto fail;
6046 	} else {
6047 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6048 		sc->sc_preinit_hook.ich_arg = sc;
6049 		sc->sc_preinit_hook.ich_desc = "iwm";
6050 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6051 			device_printf(dev,
6052 			    "config_intrhook_establish failed\n");
6053 			goto fail;
6054 		}
6055 	}
6056 
6057 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6058 	    "<-%s\n", __func__);
6059 
6060 	return 0;
6061 
6062 	/* Free allocated memory if something failed during attachment. */
6063 fail:
6064 	iwm_detach_local(sc, 0);
6065 
6066 	return ENXIO;
6067 }
6068 
6069 static int
6070 iwm_is_valid_ether_addr(uint8_t *addr)
6071 {
6072 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6073 
6074 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6075 		return (FALSE);
6076 
6077 	return (TRUE);
6078 }
6079 
6080 static int
6081 iwm_wme_update(struct ieee80211com *ic)
6082 {
6083 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6084 	struct iwm_softc *sc = ic->ic_softc;
6085 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6086 	struct iwm_vap *ivp = IWM_VAP(vap);
6087 	struct iwm_node *in;
6088 	struct wmeParams tmp[WME_NUM_AC];
6089 	int aci, error;
6090 
6091 	if (vap == NULL)
6092 		return (0);
6093 
6094 	IEEE80211_LOCK(ic);
6095 	for (aci = 0; aci < WME_NUM_AC; aci++)
6096 		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6097 	IEEE80211_UNLOCK(ic);
6098 
6099 	IWM_LOCK(sc);
6100 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6101 		const struct wmeParams *ac = &tmp[aci];
6102 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6103 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6104 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6105 		ivp->queue_params[aci].edca_txop =
6106 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6107 	}
6108 	ivp->have_wme = TRUE;
6109 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6110 		in = IWM_NODE(vap->iv_bss);
6111 		if (in->in_assoc) {
6112 			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6113 				device_printf(sc->sc_dev,
6114 				    "%s: failed to update MAC\n", __func__);
6115 			}
6116 		}
6117 	}
6118 	IWM_UNLOCK(sc);
6119 
6120 	return (0);
6121 #undef IWM_EXP2
6122 }
6123 
6124 static void
6125 iwm_preinit(void *arg)
6126 {
6127 	struct iwm_softc *sc = arg;
6128 	device_t dev = sc->sc_dev;
6129 	struct ieee80211com *ic = &sc->sc_ic;
6130 	int error;
6131 
6132 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6133 	    "->%s\n", __func__);
6134 
6135 	IWM_LOCK(sc);
6136 	if ((error = iwm_start_hw(sc)) != 0) {
6137 		device_printf(dev, "could not initialize hardware\n");
6138 		IWM_UNLOCK(sc);
6139 		goto fail;
6140 	}
6141 
6142 	error = iwm_run_init_mvm_ucode(sc, 1);
6143 	iwm_stop_device(sc);
6144 	if (error) {
6145 		IWM_UNLOCK(sc);
6146 		goto fail;
6147 	}
6148 	device_printf(dev,
6149 	    "hw rev 0x%x, fw ver %s, address %s\n",
6150 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6151 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6152 
6153 	/* not all hardware can do 5GHz band */
6154 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6155 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6156 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6157 	IWM_UNLOCK(sc);
6158 
6159 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6160 	    ic->ic_channels);
6161 
6162 	/*
6163 	 * At this point we've committed - if we fail to do setup,
6164 	 * we now also have to tear down the net80211 state.
6165 	 */
6166 	ieee80211_ifattach(ic);
6167 	ic->ic_vap_create = iwm_vap_create;
6168 	ic->ic_vap_delete = iwm_vap_delete;
6169 	ic->ic_raw_xmit = iwm_raw_xmit;
6170 	ic->ic_node_alloc = iwm_node_alloc;
6171 	ic->ic_scan_start = iwm_scan_start;
6172 	ic->ic_scan_end = iwm_scan_end;
6173 	ic->ic_update_mcast = iwm_update_mcast;
6174 	ic->ic_getradiocaps = iwm_init_channel_map;
6175 	ic->ic_set_channel = iwm_set_channel;
6176 	ic->ic_scan_curchan = iwm_scan_curchan;
6177 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6178 	ic->ic_wme.wme_update = iwm_wme_update;
6179 	ic->ic_parent = iwm_parent;
6180 	ic->ic_transmit = iwm_transmit;
6181 	iwm_radiotap_attach(sc);
6182 	if (bootverbose)
6183 		ieee80211_announce(ic);
6184 
6185 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6186 	    "<-%s\n", __func__);
6187 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6188 
6189 	return;
6190 fail:
6191 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6192 	iwm_detach_local(sc, 0);
6193 }
6194 
6195 /*
6196  * Attach the interface to 802.11 radiotap.
6197  */
6198 static void
6199 iwm_radiotap_attach(struct iwm_softc *sc)
6200 {
6201         struct ieee80211com *ic = &sc->sc_ic;
6202 
6203 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6204 	    "->%s begin\n", __func__);
6205         ieee80211_radiotap_attach(ic,
6206             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6207                 IWM_TX_RADIOTAP_PRESENT,
6208             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6209                 IWM_RX_RADIOTAP_PRESENT);
6210 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6211 	    "->%s end\n", __func__);
6212 }
6213 
6214 static struct ieee80211vap *
6215 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6216     enum ieee80211_opmode opmode, int flags,
6217     const uint8_t bssid[IEEE80211_ADDR_LEN],
6218     const uint8_t mac[IEEE80211_ADDR_LEN])
6219 {
6220 	struct iwm_vap *ivp;
6221 	struct ieee80211vap *vap;
6222 
6223 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6224 		return NULL;
6225 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6226 	vap = &ivp->iv_vap;
6227 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6228 	vap->iv_bmissthreshold = 10;            /* override default */
6229 	/* Override with driver methods. */
6230 	ivp->iv_newstate = vap->iv_newstate;
6231 	vap->iv_newstate = iwm_newstate;
6232 
6233 	ivp->id = IWM_DEFAULT_MACID;
6234 	ivp->color = IWM_DEFAULT_COLOR;
6235 
6236 	ivp->have_wme = FALSE;
6237 	ivp->ps_disabled = FALSE;
6238 
6239 	ieee80211_ratectl_init(vap);
6240 	/* Complete setup. */
6241 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6242 	    mac);
6243 	ic->ic_opmode = opmode;
6244 
6245 	return vap;
6246 }
6247 
6248 static void
6249 iwm_vap_delete(struct ieee80211vap *vap)
6250 {
6251 	struct iwm_vap *ivp = IWM_VAP(vap);
6252 
6253 	ieee80211_ratectl_deinit(vap);
6254 	ieee80211_vap_detach(vap);
6255 	kfree(ivp, M_80211_VAP);
6256 }
6257 
6258 static void
6259 iwm_xmit_queue_drain(struct iwm_softc *sc)
6260 {
6261 	struct mbuf *m;
6262 	struct ieee80211_node *ni;
6263 
6264 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6265 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6266 		ieee80211_free_node(ni);
6267 		m_freem(m);
6268 	}
6269 }
6270 
6271 static void
6272 iwm_scan_start(struct ieee80211com *ic)
6273 {
6274 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6275 	struct iwm_softc *sc = ic->ic_softc;
6276 	int error;
6277 
6278 	IWM_LOCK(sc);
6279 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6280 		/* This should not be possible */
6281 		device_printf(sc->sc_dev,
6282 		    "%s: Previous scan not completed yet\n", __func__);
6283 	}
6284 	if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6285 		error = iwm_mvm_umac_scan(sc);
6286 	else
6287 		error = iwm_mvm_lmac_scan(sc);
6288 	if (error != 0) {
6289 		device_printf(sc->sc_dev, "could not initiate scan\n");
6290 		IWM_UNLOCK(sc);
6291 		ieee80211_cancel_scan(vap);
6292 	} else {
6293 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6294 		iwm_led_blink_start(sc);
6295 		IWM_UNLOCK(sc);
6296 	}
6297 }
6298 
6299 static void
6300 iwm_scan_end(struct ieee80211com *ic)
6301 {
6302 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6303 	struct iwm_softc *sc = ic->ic_softc;
6304 
6305 	IWM_LOCK(sc);
6306 	iwm_led_blink_stop(sc);
6307 	if (vap->iv_state == IEEE80211_S_RUN)
6308 		iwm_mvm_led_enable(sc);
6309 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6310 		/*
6311 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6312 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6313 		 * taskqueue.
6314 		 */
6315 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6316 		iwm_mvm_scan_stop_wait(sc);
6317 	}
6318 	IWM_UNLOCK(sc);
6319 
6320 	/*
6321 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6322 	 * This is to make sure that it won't call ieee80211_scan_done
6323 	 * when we have already started the next scan.
6324 	 */
6325 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6326 }
6327 
6328 static void
6329 iwm_update_mcast(struct ieee80211com *ic)
6330 {
6331 }
6332 
6333 static void
6334 iwm_set_channel(struct ieee80211com *ic)
6335 {
6336 }
6337 
6338 static void
6339 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6340 {
6341 }
6342 
6343 static void
6344 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6345 {
6346 	return;
6347 }
6348 
6349 void
6350 iwm_init_task(void *arg1)
6351 {
6352 	struct iwm_softc *sc = arg1;
6353 
6354 	IWM_LOCK(sc);
6355 	while (sc->sc_flags & IWM_FLAG_BUSY) {
6356 #if defined(__DragonFly__)
6357 		lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6358 #else
6359 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6360 #endif
6361 }
6362 	sc->sc_flags |= IWM_FLAG_BUSY;
6363 	iwm_stop(sc);
6364 	if (sc->sc_ic.ic_nrunning > 0)
6365 		iwm_init(sc);
6366 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6367 	wakeup(&sc->sc_flags);
6368 	IWM_UNLOCK(sc);
6369 }
6370 
6371 static int
6372 iwm_resume(device_t dev)
6373 {
6374 	struct iwm_softc *sc = device_get_softc(dev);
6375 	int do_reinit = 0;
6376 
6377 	/*
6378 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6379 	 * PCI Tx retries from interfering with C3 CPU state.
6380 	 */
6381 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6382 
6383 	if (!sc->sc_attached)
6384 		return 0;
6385 
6386 	iwm_init_task(device_get_softc(dev));
6387 
6388 	IWM_LOCK(sc);
6389 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6390 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6391 		do_reinit = 1;
6392 	}
6393 	IWM_UNLOCK(sc);
6394 
6395 	if (do_reinit)
6396 		ieee80211_resume_all(&sc->sc_ic);
6397 
6398 	return 0;
6399 }
6400 
6401 static int
6402 iwm_suspend(device_t dev)
6403 {
6404 	int do_stop = 0;
6405 	struct iwm_softc *sc = device_get_softc(dev);
6406 
6407 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6408 
6409 	if (!sc->sc_attached)
6410 		return (0);
6411 
6412 	ieee80211_suspend_all(&sc->sc_ic);
6413 
6414 	if (do_stop) {
6415 		IWM_LOCK(sc);
6416 		iwm_stop(sc);
6417 		sc->sc_flags |= IWM_FLAG_SCANNING;
6418 		IWM_UNLOCK(sc);
6419 	}
6420 
6421 	return (0);
6422 }
6423 
6424 static int
6425 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6426 {
6427 	struct iwm_fw_info *fw = &sc->sc_fw;
6428 	device_t dev = sc->sc_dev;
6429 	int i;
6430 
6431 	if (!sc->sc_attached)
6432 		return 0;
6433 	sc->sc_attached = 0;
6434 	if (do_net80211) {
6435 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6436 	}
6437 	callout_drain(&sc->sc_led_blink_to);
6438 	callout_drain(&sc->sc_watchdog_to);
6439 	iwm_stop_device(sc);
6440 	if (do_net80211) {
6441 		IWM_LOCK(sc);
6442 		iwm_xmit_queue_drain(sc);
6443 		IWM_UNLOCK(sc);
6444 		ieee80211_ifdetach(&sc->sc_ic);
6445 	}
6446 
6447 	iwm_phy_db_free(sc->sc_phy_db);
6448 	sc->sc_phy_db = NULL;
6449 
6450 	iwm_free_nvm_data(sc->nvm_data);
6451 
6452 	/* Free descriptor rings */
6453 	iwm_free_rx_ring(sc, &sc->rxq);
6454 	for (i = 0; i < nitems(sc->txq); i++)
6455 		iwm_free_tx_ring(sc, &sc->txq[i]);
6456 
6457 	/* Free firmware */
6458 	if (fw->fw_fp != NULL)
6459 		iwm_fw_info_free(fw);
6460 
6461 	/* Free scheduler */
6462 	iwm_dma_contig_free(&sc->sched_dma);
6463 	iwm_dma_contig_free(&sc->ict_dma);
6464 	iwm_dma_contig_free(&sc->kw_dma);
6465 	iwm_dma_contig_free(&sc->fw_dma);
6466 
6467 	iwm_free_fw_paging(sc);
6468 
6469 	/* Finished with the hardware - detach things */
6470 	iwm_pci_detach(dev);
6471 
6472 	if (sc->sc_notif_wait != NULL) {
6473 		iwm_notification_wait_free(sc->sc_notif_wait);
6474 		sc->sc_notif_wait = NULL;
6475 	}
6476 
6477 	IWM_LOCK_DESTROY(sc);
6478 
6479 	return (0);
6480 }
6481 
6482 static int
6483 iwm_detach(device_t dev)
6484 {
6485 	struct iwm_softc *sc = device_get_softc(dev);
6486 
6487 	return (iwm_detach_local(sc, 1));
6488 }
6489 
6490 static device_method_t iwm_pci_methods[] = {
6491         /* Device interface */
6492         DEVMETHOD(device_probe,         iwm_probe),
6493         DEVMETHOD(device_attach,        iwm_attach),
6494         DEVMETHOD(device_detach,        iwm_detach),
6495         DEVMETHOD(device_suspend,       iwm_suspend),
6496         DEVMETHOD(device_resume,        iwm_resume),
6497 
6498         DEVMETHOD_END
6499 };
6500 
6501 static driver_t iwm_pci_driver = {
6502         "iwm",
6503         iwm_pci_methods,
6504         sizeof (struct iwm_softc)
6505 };
6506 
6507 static devclass_t iwm_devclass;
6508 
6509 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6510 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6511 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6512 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6513