xref: /freebsd/sys/dev/iwm/if_iwm.c (revision 2f513db7)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107 
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110 
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127 
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131 
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134 
135 #include <net/bpf.h>
136 
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143 
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148 
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153 
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169 
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173 
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176 
177 const uint8_t iwm_nvm_channels[] = {
178 	/* 2.4 GHz */
179 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 	/* 5 GHz */
181 	36, 40, 44, 48, 52, 56, 60, 64,
182 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 	149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187 
188 const uint8_t iwm_nvm_channels_8000[] = {
189 	/* 2.4 GHz */
190 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 	/* 5 GHz */
192 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 	149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198 
199 #define IWM_NUM_2GHZ_CHANNELS	14
200 #define IWM_N_HW_ADDR_MASK	0xF
201 
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207 	uint8_t rate;
208 	uint8_t plcp;
209 } iwm_rates[] = {
210 	{   2,	IWM_RATE_1M_PLCP  },
211 	{   4,	IWM_RATE_2M_PLCP  },
212 	{  11,	IWM_RATE_5M_PLCP  },
213 	{  22,	IWM_RATE_11M_PLCP },
214 	{  12,	IWM_RATE_6M_PLCP  },
215 	{  18,	IWM_RATE_9M_PLCP  },
216 	{  24,	IWM_RATE_12M_PLCP },
217 	{  36,	IWM_RATE_18M_PLCP },
218 	{  48,	IWM_RATE_24M_PLCP },
219 	{  72,	IWM_RATE_36M_PLCP },
220 	{  96,	IWM_RATE_48M_PLCP },
221 	{ 108,	IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK	0
224 #define IWM_RIDX_OFDM	4
225 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228 
229 struct iwm_nvm_section {
230 	uint16_t length;
231 	uint8_t *data;
232 };
233 
234 #define IWM_UCODE_ALIVE_TIMEOUT	hz
235 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
236 
237 struct iwm_alive_data {
238 	int valid;
239 	uint32_t scd_base_addr;
240 };
241 
242 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int	iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void	iwm_fw_info_free(struct iwm_fw_info *);
248 static int	iwm_read_firmware(struct iwm_softc *);
249 static int	iwm_alloc_fwmem(struct iwm_softc *);
250 static int	iwm_alloc_sched(struct iwm_softc *);
251 static int	iwm_alloc_kw(struct iwm_softc *);
252 static int	iwm_alloc_ict(struct iwm_softc *);
253 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 				     uint16_t *, uint32_t);
275 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276 static void	iwm_add_channel_band(struct iwm_softc *,
277 		    struct ieee80211_channel[], int, int *, int, size_t,
278 		    const uint8_t[]);
279 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280 		    struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			   const uint16_t *, const uint16_t *,
284 			   const uint16_t *, const uint16_t *,
285 			   const uint16_t *);
286 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288 					       struct iwm_nvm_data *,
289 					       const uint16_t *,
290 					       const uint16_t *);
291 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 			    const uint16_t *);
293 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 				  const uint16_t *);
296 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297 				   const uint16_t *);
298 static void	iwm_set_radio_cfg(const struct iwm_softc *,
299 				  struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int	iwm_nvm_init(struct iwm_softc *);
303 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 				      const struct iwm_fw_desc *);
305 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 					     bus_addr_t, uint32_t);
307 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 						const struct iwm_fw_img *,
309 						int, int *);
310 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 					   const struct iwm_fw_img *,
312 					   int, int *);
313 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 					       const struct iwm_fw_img *);
315 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316 					  const struct iwm_fw_img *);
317 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int	iwm_run_init_ucode(struct iwm_softc *, int);
323 static int	iwm_config_ltr(struct iwm_softc *sc);
324 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
325 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
326                                       struct iwm_rx_packet *);
327 static int	iwm_get_noise(struct iwm_softc *,
328 		    const struct iwm_statistics_rx_non_phy *);
329 static void	iwm_handle_rx_statistics(struct iwm_softc *,
330 		    struct iwm_rx_packet *);
331 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
332 		    uint32_t, bool);
333 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
334                                          struct iwm_rx_packet *,
335 				         struct iwm_node *);
336 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
337 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338 #if 0
339 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340                                  uint16_t);
341 #endif
342 static const struct iwm_rate *
343 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344 			struct mbuf *, struct iwm_tx_cmd *);
345 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
346                        struct ieee80211_node *, int);
347 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348 			     const struct ieee80211_bpf_params *);
349 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
350 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
351 static struct ieee80211_node *
352 		iwm_node_alloc(struct ieee80211vap *,
353 		               const uint8_t[IEEE80211_ADDR_LEN]);
354 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
355 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
356 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
357 static int	iwm_media_change(struct ifnet *);
358 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
359 static void	iwm_endscan_cb(void *, int);
360 static int	iwm_send_bt_init_conf(struct iwm_softc *);
361 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
362 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
363 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
364 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
365 static int	iwm_init_hw(struct iwm_softc *);
366 static void	iwm_init(struct iwm_softc *);
367 static void	iwm_start(struct iwm_softc *);
368 static void	iwm_stop(struct iwm_softc *);
369 static void	iwm_watchdog(void *);
370 static void	iwm_parent(struct ieee80211com *);
371 #ifdef IWM_DEBUG
372 static const char *
373 		iwm_desc_lookup(uint32_t);
374 static void	iwm_nic_error(struct iwm_softc *);
375 static void	iwm_nic_umac_error(struct iwm_softc *);
376 #endif
377 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
378 static void	iwm_notif_intr(struct iwm_softc *);
379 static void	iwm_intr(void *);
380 static int	iwm_attach(device_t);
381 static int	iwm_is_valid_ether_addr(uint8_t *);
382 static void	iwm_preinit(void *);
383 static int	iwm_detach_local(struct iwm_softc *sc, int);
384 static void	iwm_init_task(void *);
385 static void	iwm_radiotap_attach(struct iwm_softc *);
386 static struct ieee80211vap *
387 		iwm_vap_create(struct ieee80211com *,
388 		               const char [IFNAMSIZ], int,
389 		               enum ieee80211_opmode, int,
390 		               const uint8_t [IEEE80211_ADDR_LEN],
391 		               const uint8_t [IEEE80211_ADDR_LEN]);
392 static void	iwm_vap_delete(struct ieee80211vap *);
393 static void	iwm_xmit_queue_drain(struct iwm_softc *);
394 static void	iwm_scan_start(struct ieee80211com *);
395 static void	iwm_scan_end(struct ieee80211com *);
396 static void	iwm_update_mcast(struct ieee80211com *);
397 static void	iwm_set_channel(struct ieee80211com *);
398 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
399 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
400 static int	iwm_detach(device_t);
401 
402 static int	iwm_lar_disable = 0;
403 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
404 
405 /*
406  * Firmware parser.
407  */
408 
409 static int
410 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
411 {
412 	const struct iwm_fw_cscheme_list *l = (const void *)data;
413 
414 	if (dlen < sizeof(*l) ||
415 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
416 		return EINVAL;
417 
418 	/* we don't actually store anything for now, always use s/w crypto */
419 
420 	return 0;
421 }
422 
423 static int
424 iwm_firmware_store_section(struct iwm_softc *sc,
425     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
426 {
427 	struct iwm_fw_img *fws;
428 	struct iwm_fw_desc *fwone;
429 
430 	if (type >= IWM_UCODE_TYPE_MAX)
431 		return EINVAL;
432 	if (dlen < sizeof(uint32_t))
433 		return EINVAL;
434 
435 	fws = &sc->sc_fw.img[type];
436 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
437 		return EINVAL;
438 
439 	fwone = &fws->sec[fws->fw_count];
440 
441 	/* first 32bit are device load offset */
442 	memcpy(&fwone->offset, data, sizeof(uint32_t));
443 
444 	/* rest is data */
445 	fwone->data = data + sizeof(uint32_t);
446 	fwone->len = dlen - sizeof(uint32_t);
447 
448 	fws->fw_count++;
449 
450 	return 0;
451 }
452 
453 #define IWM_DEFAULT_SCAN_CHANNELS 40
454 
455 /* iwlwifi: iwl-drv.c */
456 struct iwm_tlv_calib_data {
457 	uint32_t ucode_type;
458 	struct iwm_tlv_calib_ctrl calib;
459 } __packed;
460 
461 static int
462 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
463 {
464 	const struct iwm_tlv_calib_data *def_calib = data;
465 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
466 
467 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
468 		device_printf(sc->sc_dev,
469 		    "Wrong ucode_type %u for default "
470 		    "calibration.\n", ucode_type);
471 		return EINVAL;
472 	}
473 
474 	sc->sc_default_calib[ucode_type].flow_trigger =
475 	    def_calib->calib.flow_trigger;
476 	sc->sc_default_calib[ucode_type].event_trigger =
477 	    def_calib->calib.event_trigger;
478 
479 	return 0;
480 }
481 
482 static int
483 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
484 			struct iwm_ucode_capabilities *capa)
485 {
486 	const struct iwm_ucode_api *ucode_api = (const void *)data;
487 	uint32_t api_index = le32toh(ucode_api->api_index);
488 	uint32_t api_flags = le32toh(ucode_api->api_flags);
489 	int i;
490 
491 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
492 		device_printf(sc->sc_dev,
493 		    "api flags index %d larger than supported by driver\n",
494 		    api_index);
495 		/* don't return an error so we can load FW that has more bits */
496 		return 0;
497 	}
498 
499 	for (i = 0; i < 32; i++) {
500 		if (api_flags & (1U << i))
501 			setbit(capa->enabled_api, i + 32 * api_index);
502 	}
503 
504 	return 0;
505 }
506 
507 static int
508 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
509 			   struct iwm_ucode_capabilities *capa)
510 {
511 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
512 	uint32_t api_index = le32toh(ucode_capa->api_index);
513 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
514 	int i;
515 
516 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
517 		device_printf(sc->sc_dev,
518 		    "capa flags index %d larger than supported by driver\n",
519 		    api_index);
520 		/* don't return an error so we can load FW that has more bits */
521 		return 0;
522 	}
523 
524 	for (i = 0; i < 32; i++) {
525 		if (api_flags & (1U << i))
526 			setbit(capa->enabled_capa, i + 32 * api_index);
527 	}
528 
529 	return 0;
530 }
531 
532 static void
533 iwm_fw_info_free(struct iwm_fw_info *fw)
534 {
535 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
536 	fw->fw_fp = NULL;
537 	memset(fw->img, 0, sizeof(fw->img));
538 }
539 
540 static int
541 iwm_read_firmware(struct iwm_softc *sc)
542 {
543 	struct iwm_fw_info *fw = &sc->sc_fw;
544 	const struct iwm_tlv_ucode_header *uhdr;
545 	const struct iwm_ucode_tlv *tlv;
546 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
547 	enum iwm_ucode_tlv_type tlv_type;
548 	const struct firmware *fwp;
549 	const uint8_t *data;
550 	uint32_t tlv_len;
551 	uint32_t usniffer_img;
552 	const uint8_t *tlv_data;
553 	uint32_t paging_mem_size;
554 	int num_of_cpus;
555 	int error = 0;
556 	size_t len;
557 
558 	/*
559 	 * Load firmware into driver memory.
560 	 * fw_fp will be set.
561 	 */
562 	fwp = firmware_get(sc->cfg->fw_name);
563 	if (fwp == NULL) {
564 		device_printf(sc->sc_dev,
565 		    "could not read firmware %s (error %d)\n",
566 		    sc->cfg->fw_name, error);
567 		goto out;
568 	}
569 	fw->fw_fp = fwp;
570 
571 	/* (Re-)Initialize default values. */
572 	capa->flags = 0;
573 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
574 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
575 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
576 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
577 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
578 
579 	/*
580 	 * Parse firmware contents
581 	 */
582 
583 	uhdr = (const void *)fw->fw_fp->data;
584 	if (*(const uint32_t *)fw->fw_fp->data != 0
585 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
586 		device_printf(sc->sc_dev, "invalid firmware %s\n",
587 		    sc->cfg->fw_name);
588 		error = EINVAL;
589 		goto out;
590 	}
591 
592 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
593 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
594 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
595 	    IWM_UCODE_API(le32toh(uhdr->ver)));
596 	data = uhdr->data;
597 	len = fw->fw_fp->datasize - sizeof(*uhdr);
598 
599 	while (len >= sizeof(*tlv)) {
600 		len -= sizeof(*tlv);
601 		tlv = (const void *)data;
602 
603 		tlv_len = le32toh(tlv->length);
604 		tlv_type = le32toh(tlv->type);
605 		tlv_data = tlv->data;
606 
607 		if (len < tlv_len) {
608 			device_printf(sc->sc_dev,
609 			    "firmware too short: %zu bytes\n",
610 			    len);
611 			error = EINVAL;
612 			goto parse_out;
613 		}
614 		len -= roundup2(tlv_len, 4);
615 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
616 
617 		switch ((int)tlv_type) {
618 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
619 			if (tlv_len != sizeof(uint32_t)) {
620 				device_printf(sc->sc_dev,
621 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
622 				    __func__, tlv_len);
623 				error = EINVAL;
624 				goto parse_out;
625 			}
626 			capa->max_probe_length =
627 			    le32_to_cpup((const uint32_t *)tlv_data);
628 			/* limit it to something sensible */
629 			if (capa->max_probe_length >
630 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
631 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
632 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
633 				    "ridiculous\n", __func__);
634 				error = EINVAL;
635 				goto parse_out;
636 			}
637 			break;
638 		case IWM_UCODE_TLV_PAN:
639 			if (tlv_len) {
640 				device_printf(sc->sc_dev,
641 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
642 				    __func__, tlv_len);
643 				error = EINVAL;
644 				goto parse_out;
645 			}
646 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
647 			break;
648 		case IWM_UCODE_TLV_FLAGS:
649 			if (tlv_len < sizeof(uint32_t)) {
650 				device_printf(sc->sc_dev,
651 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
652 				    __func__, tlv_len);
653 				error = EINVAL;
654 				goto parse_out;
655 			}
656 			if (tlv_len % sizeof(uint32_t)) {
657 				device_printf(sc->sc_dev,
658 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
659 				    __func__, tlv_len);
660 				error = EINVAL;
661 				goto parse_out;
662 			}
663 			/*
664 			 * Apparently there can be many flags, but Linux driver
665 			 * parses only the first one, and so do we.
666 			 *
667 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
668 			 * Intentional or a bug?  Observations from
669 			 * current firmware file:
670 			 *  1) TLV_PAN is parsed first
671 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
672 			 * ==> this resets TLV_PAN to itself... hnnnk
673 			 */
674 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
675 			break;
676 		case IWM_UCODE_TLV_CSCHEME:
677 			if ((error = iwm_store_cscheme(sc,
678 			    tlv_data, tlv_len)) != 0) {
679 				device_printf(sc->sc_dev,
680 				    "%s: iwm_store_cscheme(): returned %d\n",
681 				    __func__, error);
682 				goto parse_out;
683 			}
684 			break;
685 		case IWM_UCODE_TLV_NUM_OF_CPU:
686 			if (tlv_len != sizeof(uint32_t)) {
687 				device_printf(sc->sc_dev,
688 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
689 				    __func__, tlv_len);
690 				error = EINVAL;
691 				goto parse_out;
692 			}
693 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
694 			if (num_of_cpus == 2) {
695 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
696 					TRUE;
697 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
698 					TRUE;
699 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
700 					TRUE;
701 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
702 				device_printf(sc->sc_dev,
703 				    "%s: Driver supports only 1 or 2 CPUs\n",
704 				    __func__);
705 				error = EINVAL;
706 				goto parse_out;
707 			}
708 			break;
709 		case IWM_UCODE_TLV_SEC_RT:
710 			if ((error = iwm_firmware_store_section(sc,
711 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
712 				device_printf(sc->sc_dev,
713 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
714 				    __func__, error);
715 				goto parse_out;
716 			}
717 			break;
718 		case IWM_UCODE_TLV_SEC_INIT:
719 			if ((error = iwm_firmware_store_section(sc,
720 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
721 				device_printf(sc->sc_dev,
722 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
723 				    __func__, error);
724 				goto parse_out;
725 			}
726 			break;
727 		case IWM_UCODE_TLV_SEC_WOWLAN:
728 			if ((error = iwm_firmware_store_section(sc,
729 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
730 				device_printf(sc->sc_dev,
731 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
732 				    __func__, error);
733 				goto parse_out;
734 			}
735 			break;
736 		case IWM_UCODE_TLV_DEF_CALIB:
737 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
738 				device_printf(sc->sc_dev,
739 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
740 				    __func__, tlv_len,
741 				    sizeof(struct iwm_tlv_calib_data));
742 				error = EINVAL;
743 				goto parse_out;
744 			}
745 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
746 				device_printf(sc->sc_dev,
747 				    "%s: iwm_set_default_calib() failed: %d\n",
748 				    __func__, error);
749 				goto parse_out;
750 			}
751 			break;
752 		case IWM_UCODE_TLV_PHY_SKU:
753 			if (tlv_len != sizeof(uint32_t)) {
754 				error = EINVAL;
755 				device_printf(sc->sc_dev,
756 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
757 				    __func__, tlv_len);
758 				goto parse_out;
759 			}
760 			sc->sc_fw.phy_config =
761 			    le32_to_cpup((const uint32_t *)tlv_data);
762 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
763 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
764 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
765 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
766 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
767 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
768 			break;
769 
770 		case IWM_UCODE_TLV_API_CHANGES_SET: {
771 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
772 				error = EINVAL;
773 				goto parse_out;
774 			}
775 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
776 				error = EINVAL;
777 				goto parse_out;
778 			}
779 			break;
780 		}
781 
782 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
783 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
784 				error = EINVAL;
785 				goto parse_out;
786 			}
787 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
788 				error = EINVAL;
789 				goto parse_out;
790 			}
791 			break;
792 		}
793 
794 		case IWM_UCODE_TLV_CMD_VERSIONS:
795 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
796 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
797 			/* ignore, not used by current driver */
798 			break;
799 
800 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
801 			if ((error = iwm_firmware_store_section(sc,
802 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
803 			    tlv_len)) != 0)
804 				goto parse_out;
805 			break;
806 
807 		case IWM_UCODE_TLV_PAGING:
808 			if (tlv_len != sizeof(uint32_t)) {
809 				error = EINVAL;
810 				goto parse_out;
811 			}
812 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
813 
814 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
815 			    "%s: Paging: paging enabled (size = %u bytes)\n",
816 			    __func__, paging_mem_size);
817 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
818 				device_printf(sc->sc_dev,
819 					"%s: Paging: driver supports up to %u bytes for paging image\n",
820 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
821 				error = EINVAL;
822 				goto out;
823 			}
824 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
825 				device_printf(sc->sc_dev,
826 				    "%s: Paging: image isn't multiple %u\n",
827 				    __func__, IWM_FW_PAGING_SIZE);
828 				error = EINVAL;
829 				goto out;
830 			}
831 
832 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
833 			    paging_mem_size;
834 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
835 			sc->sc_fw.img[usniffer_img].paging_mem_size =
836 			    paging_mem_size;
837 			break;
838 
839 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
840 			if (tlv_len != sizeof(uint32_t)) {
841 				error = EINVAL;
842 				goto parse_out;
843 			}
844 			capa->n_scan_channels =
845 			    le32_to_cpup((const uint32_t *)tlv_data);
846 			break;
847 
848 		case IWM_UCODE_TLV_FW_VERSION:
849 			if (tlv_len != sizeof(uint32_t) * 3) {
850 				error = EINVAL;
851 				goto parse_out;
852 			}
853 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
854 			    "%u.%u.%u",
855 			    le32toh(((const uint32_t *)tlv_data)[0]),
856 			    le32toh(((const uint32_t *)tlv_data)[1]),
857 			    le32toh(((const uint32_t *)tlv_data)[2]));
858 			break;
859 
860 		case IWM_UCODE_TLV_FW_MEM_SEG:
861 			break;
862 
863 		default:
864 			device_printf(sc->sc_dev,
865 			    "%s: unknown firmware section %d, abort\n",
866 			    __func__, tlv_type);
867 			error = EINVAL;
868 			goto parse_out;
869 		}
870 	}
871 
872 	KASSERT(error == 0, ("unhandled error"));
873 
874  parse_out:
875 	if (error) {
876 		device_printf(sc->sc_dev, "firmware parse error %d, "
877 		    "section type %d\n", error, tlv_type);
878 	}
879 
880  out:
881 	if (error) {
882 		if (fw->fw_fp != NULL)
883 			iwm_fw_info_free(fw);
884 	}
885 
886 	return error;
887 }
888 
889 /*
890  * DMA resource routines
891  */
892 
893 /* fwmem is used to load firmware onto the card */
894 static int
895 iwm_alloc_fwmem(struct iwm_softc *sc)
896 {
897 	/* Must be aligned on a 16-byte boundary. */
898 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
899 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
900 }
901 
902 /* tx scheduler rings.  not used? */
903 static int
904 iwm_alloc_sched(struct iwm_softc *sc)
905 {
906 	/* TX scheduler rings must be aligned on a 1KB boundary. */
907 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
908 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
909 }
910 
911 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
912 static int
913 iwm_alloc_kw(struct iwm_softc *sc)
914 {
915 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
916 }
917 
918 /* interrupt cause table */
919 static int
920 iwm_alloc_ict(struct iwm_softc *sc)
921 {
922 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
923 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
924 }
925 
926 static int
927 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
928 {
929 	bus_size_t size;
930 	size_t descsz;
931 	int count, i, error;
932 
933 	ring->cur = 0;
934 	if (sc->cfg->mqrx_supported) {
935 		count = IWM_RX_MQ_RING_COUNT;
936 		descsz = sizeof(uint64_t);
937 	} else {
938 		count = IWM_RX_LEGACY_RING_COUNT;
939 		descsz = sizeof(uint32_t);
940 	}
941 
942 	/* Allocate RX descriptors (256-byte aligned). */
943 	size = count * descsz;
944 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
945 	    256);
946 	if (error != 0) {
947 		device_printf(sc->sc_dev,
948 		    "could not allocate RX ring DMA memory\n");
949 		goto fail;
950 	}
951 	ring->desc = ring->free_desc_dma.vaddr;
952 
953 	/* Allocate RX status area (16-byte aligned). */
954 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
955 	    sizeof(*ring->stat), 16);
956 	if (error != 0) {
957 		device_printf(sc->sc_dev,
958 		    "could not allocate RX status DMA memory\n");
959 		goto fail;
960 	}
961 	ring->stat = ring->stat_dma.vaddr;
962 
963 	if (sc->cfg->mqrx_supported) {
964 		size = count * sizeof(uint32_t);
965 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
966 		    size, 256);
967 		if (error != 0) {
968 			device_printf(sc->sc_dev,
969 			    "could not allocate RX ring DMA memory\n");
970 			goto fail;
971 		}
972 	}
973 
974         /* Create RX buffer DMA tag. */
975         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
976             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
977             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
978         if (error != 0) {
979                 device_printf(sc->sc_dev,
980                     "%s: could not create RX buf DMA tag, error %d\n",
981                     __func__, error);
982                 goto fail;
983         }
984 
985 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
986 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
987 	if (error != 0) {
988 		device_printf(sc->sc_dev,
989 		    "%s: could not create RX buf DMA map, error %d\n",
990 		    __func__, error);
991 		goto fail;
992 	}
993 
994 	/*
995 	 * Allocate and map RX buffers.
996 	 */
997 	for (i = 0; i < count; i++) {
998 		struct iwm_rx_data *data = &ring->data[i];
999 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1000 		if (error != 0) {
1001 			device_printf(sc->sc_dev,
1002 			    "%s: could not create RX buf DMA map, error %d\n",
1003 			    __func__, error);
1004 			goto fail;
1005 		}
1006 		data->m = NULL;
1007 
1008 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1009 			goto fail;
1010 		}
1011 	}
1012 	return 0;
1013 
1014 fail:	iwm_free_rx_ring(sc, ring);
1015 	return error;
1016 }
1017 
1018 static void
1019 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1020 {
1021 	/* Reset the ring state */
1022 	ring->cur = 0;
1023 
1024 	/*
1025 	 * The hw rx ring index in shared memory must also be cleared,
1026 	 * otherwise the discrepancy can cause reprocessing chaos.
1027 	 */
1028 	if (sc->rxq.stat)
1029 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1030 }
1031 
1032 static void
1033 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1034 {
1035 	int count, i;
1036 
1037 	iwm_dma_contig_free(&ring->free_desc_dma);
1038 	iwm_dma_contig_free(&ring->stat_dma);
1039 	iwm_dma_contig_free(&ring->used_desc_dma);
1040 
1041 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1042 	    IWM_RX_LEGACY_RING_COUNT;
1043 
1044 	for (i = 0; i < count; i++) {
1045 		struct iwm_rx_data *data = &ring->data[i];
1046 
1047 		if (data->m != NULL) {
1048 			bus_dmamap_sync(ring->data_dmat, data->map,
1049 			    BUS_DMASYNC_POSTREAD);
1050 			bus_dmamap_unload(ring->data_dmat, data->map);
1051 			m_freem(data->m);
1052 			data->m = NULL;
1053 		}
1054 		if (data->map != NULL) {
1055 			bus_dmamap_destroy(ring->data_dmat, data->map);
1056 			data->map = NULL;
1057 		}
1058 	}
1059 	if (ring->spare_map != NULL) {
1060 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1061 		ring->spare_map = NULL;
1062 	}
1063 	if (ring->data_dmat != NULL) {
1064 		bus_dma_tag_destroy(ring->data_dmat);
1065 		ring->data_dmat = NULL;
1066 	}
1067 }
1068 
1069 static int
1070 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1071 {
1072 	bus_addr_t paddr;
1073 	bus_size_t size;
1074 	size_t maxsize;
1075 	int nsegments;
1076 	int i, error;
1077 
1078 	ring->qid = qid;
1079 	ring->queued = 0;
1080 	ring->cur = 0;
1081 
1082 	/* Allocate TX descriptors (256-byte aligned). */
1083 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1084 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1085 	if (error != 0) {
1086 		device_printf(sc->sc_dev,
1087 		    "could not allocate TX ring DMA memory\n");
1088 		goto fail;
1089 	}
1090 	ring->desc = ring->desc_dma.vaddr;
1091 
1092 	/*
1093 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1094 	 * to allocate commands space for other rings.
1095 	 */
1096 	if (qid > IWM_CMD_QUEUE)
1097 		return 0;
1098 
1099 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1100 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1101 	if (error != 0) {
1102 		device_printf(sc->sc_dev,
1103 		    "could not allocate TX cmd DMA memory\n");
1104 		goto fail;
1105 	}
1106 	ring->cmd = ring->cmd_dma.vaddr;
1107 
1108 	/* FW commands may require more mapped space than packets. */
1109 	if (qid == IWM_CMD_QUEUE) {
1110 		maxsize = IWM_RBUF_SIZE;
1111 		nsegments = 1;
1112 	} else {
1113 		maxsize = MCLBYTES;
1114 		nsegments = IWM_MAX_SCATTER - 2;
1115 	}
1116 
1117 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1118 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1119             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1120 	if (error != 0) {
1121 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1122 		goto fail;
1123 	}
1124 
1125 	paddr = ring->cmd_dma.paddr;
1126 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1127 		struct iwm_tx_data *data = &ring->data[i];
1128 
1129 		data->cmd_paddr = paddr;
1130 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1131 		    + offsetof(struct iwm_tx_cmd, scratch);
1132 		paddr += sizeof(struct iwm_device_cmd);
1133 
1134 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1135 		if (error != 0) {
1136 			device_printf(sc->sc_dev,
1137 			    "could not create TX buf DMA map\n");
1138 			goto fail;
1139 		}
1140 	}
1141 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1142 	    ("invalid physical address"));
1143 	return 0;
1144 
1145 fail:	iwm_free_tx_ring(sc, ring);
1146 	return error;
1147 }
1148 
1149 static void
1150 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1151 {
1152 	int i;
1153 
1154 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1155 		struct iwm_tx_data *data = &ring->data[i];
1156 
1157 		if (data->m != NULL) {
1158 			bus_dmamap_sync(ring->data_dmat, data->map,
1159 			    BUS_DMASYNC_POSTWRITE);
1160 			bus_dmamap_unload(ring->data_dmat, data->map);
1161 			m_freem(data->m);
1162 			data->m = NULL;
1163 		}
1164 	}
1165 	/* Clear TX descriptors. */
1166 	memset(ring->desc, 0, ring->desc_dma.size);
1167 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1168 	    BUS_DMASYNC_PREWRITE);
1169 	sc->qfullmsk &= ~(1 << ring->qid);
1170 	ring->queued = 0;
1171 	ring->cur = 0;
1172 
1173 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1174 		iwm_pcie_clear_cmd_in_flight(sc);
1175 }
1176 
1177 static void
1178 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1179 {
1180 	int i;
1181 
1182 	iwm_dma_contig_free(&ring->desc_dma);
1183 	iwm_dma_contig_free(&ring->cmd_dma);
1184 
1185 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1186 		struct iwm_tx_data *data = &ring->data[i];
1187 
1188 		if (data->m != NULL) {
1189 			bus_dmamap_sync(ring->data_dmat, data->map,
1190 			    BUS_DMASYNC_POSTWRITE);
1191 			bus_dmamap_unload(ring->data_dmat, data->map);
1192 			m_freem(data->m);
1193 			data->m = NULL;
1194 		}
1195 		if (data->map != NULL) {
1196 			bus_dmamap_destroy(ring->data_dmat, data->map);
1197 			data->map = NULL;
1198 		}
1199 	}
1200 	if (ring->data_dmat != NULL) {
1201 		bus_dma_tag_destroy(ring->data_dmat);
1202 		ring->data_dmat = NULL;
1203 	}
1204 }
1205 
1206 /*
1207  * High-level hardware frobbing routines
1208  */
1209 
1210 static void
1211 iwm_enable_interrupts(struct iwm_softc *sc)
1212 {
1213 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1214 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1215 }
1216 
1217 static void
1218 iwm_restore_interrupts(struct iwm_softc *sc)
1219 {
1220 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1221 }
1222 
1223 static void
1224 iwm_disable_interrupts(struct iwm_softc *sc)
1225 {
1226 	/* disable interrupts */
1227 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1228 
1229 	/* acknowledge all interrupts */
1230 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1231 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1232 }
1233 
1234 static void
1235 iwm_ict_reset(struct iwm_softc *sc)
1236 {
1237 	iwm_disable_interrupts(sc);
1238 
1239 	/* Reset ICT table. */
1240 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1241 	sc->ict_cur = 0;
1242 
1243 	/* Set physical address of ICT table (4KB aligned). */
1244 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1245 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1246 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1247 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1248 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1249 
1250 	/* Switch to ICT interrupt mode in driver. */
1251 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1252 
1253 	/* Re-enable interrupts. */
1254 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1255 	iwm_enable_interrupts(sc);
1256 }
1257 
1258 /* iwlwifi pcie/trans.c */
1259 
1260 /*
1261  * Since this .. hard-resets things, it's time to actually
1262  * mark the first vap (if any) as having no mac context.
1263  * It's annoying, but since the driver is potentially being
1264  * stop/start'ed whilst active (thanks openbsd port!) we
1265  * have to correctly track this.
1266  */
1267 static void
1268 iwm_stop_device(struct iwm_softc *sc)
1269 {
1270 	struct ieee80211com *ic = &sc->sc_ic;
1271 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1272 	int chnl, qid;
1273 	uint32_t mask = 0;
1274 
1275 	/* tell the device to stop sending interrupts */
1276 	iwm_disable_interrupts(sc);
1277 
1278 	/*
1279 	 * FreeBSD-local: mark the first vap as not-uploaded,
1280 	 * so the next transition through auth/assoc
1281 	 * will correctly populate the MAC context.
1282 	 */
1283 	if (vap) {
1284 		struct iwm_vap *iv = IWM_VAP(vap);
1285 		iv->phy_ctxt = NULL;
1286 		iv->is_uploaded = 0;
1287 	}
1288 	sc->sc_firmware_state = 0;
1289 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1290 
1291 	/* device going down, Stop using ICT table */
1292 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1293 
1294 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1295 
1296 	if (iwm_nic_lock(sc)) {
1297 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1298 
1299 		/* Stop each Tx DMA channel */
1300 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1301 			IWM_WRITE(sc,
1302 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1303 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1304 		}
1305 
1306 		/* Wait for DMA channels to be idle */
1307 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1308 		    5000)) {
1309 			device_printf(sc->sc_dev,
1310 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1311 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1312 		}
1313 		iwm_nic_unlock(sc);
1314 	}
1315 	iwm_pcie_rx_stop(sc);
1316 
1317 	/* Stop RX ring. */
1318 	iwm_reset_rx_ring(sc, &sc->rxq);
1319 
1320 	/* Reset all TX rings. */
1321 	for (qid = 0; qid < nitems(sc->txq); qid++)
1322 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1323 
1324 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1325 		/* Power-down device's busmaster DMA clocks */
1326 		if (iwm_nic_lock(sc)) {
1327 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1328 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1329 			iwm_nic_unlock(sc);
1330 		}
1331 		DELAY(5);
1332 	}
1333 
1334 	/* Make sure (redundant) we've released our request to stay awake */
1335 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1336 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1337 
1338 	/* Stop the device, and put it in low power state */
1339 	iwm_apm_stop(sc);
1340 
1341 	/* stop and reset the on-board processor */
1342 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1343 	DELAY(5000);
1344 
1345 	/*
1346 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1347 	 */
1348 	iwm_disable_interrupts(sc);
1349 
1350 	/*
1351 	 * Even if we stop the HW, we still want the RF kill
1352 	 * interrupt
1353 	 */
1354 	iwm_enable_rfkill_int(sc);
1355 	iwm_check_rfkill(sc);
1356 
1357 	iwm_prepare_card_hw(sc);
1358 }
1359 
1360 /* iwlwifi: mvm/ops.c */
1361 static void
1362 iwm_nic_config(struct iwm_softc *sc)
1363 {
1364 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1365 	uint32_t reg_val = 0;
1366 	uint32_t phy_config = iwm_get_phy_config(sc);
1367 
1368 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1369 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1370 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1371 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1372 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1373 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1374 
1375 	/* SKU control */
1376 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1377 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1378 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1379 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1380 
1381 	/* radio configuration */
1382 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1383 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1384 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1385 
1386 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1387 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1388 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1389 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1390 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1391 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1392 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1393 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1394 	    reg_val);
1395 
1396 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1397 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1398 	    radio_cfg_step, radio_cfg_dash);
1399 
1400 	/*
1401 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1402 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1403 	 * to lose ownership and not being able to obtain it back.
1404 	 */
1405 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1406 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1407 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1408 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1409 	}
1410 }
1411 
1412 static int
1413 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1414 {
1415 	int enabled;
1416 
1417 	if (!iwm_nic_lock(sc))
1418 		return EBUSY;
1419 
1420 	/* Stop RX DMA. */
1421 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1422 	/* Disable RX used and free queue operation. */
1423 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1424 
1425 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1426 	    sc->rxq.free_desc_dma.paddr);
1427 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1428 	    sc->rxq.used_desc_dma.paddr);
1429 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1430 	    sc->rxq.stat_dma.paddr);
1431 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1432 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1433 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1434 
1435 	/* We configure only queue 0 for now. */
1436 	enabled = ((1 << 0) << 16) | (1 << 0);
1437 
1438 	/* Enable RX DMA, 4KB buffer size. */
1439 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1440 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1441 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1442 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1443 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1444 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1445 
1446 	/* Enable RX DMA snooping. */
1447 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1448 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1449 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1450 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1451 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1452 
1453 	/* Enable the configured queue(s). */
1454 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1455 
1456 	iwm_nic_unlock(sc);
1457 
1458 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1459 
1460 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1461 
1462 	return (0);
1463 }
1464 
1465 static int
1466 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1467 {
1468 
1469 	/* Stop Rx DMA */
1470 	iwm_pcie_rx_stop(sc);
1471 
1472 	if (!iwm_nic_lock(sc))
1473 		return EBUSY;
1474 
1475 	/* reset and flush pointers */
1476 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1477 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1478 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1479 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1480 
1481 	/* Set physical address of RX ring (256-byte aligned). */
1482 	IWM_WRITE(sc,
1483 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1484 	    sc->rxq.free_desc_dma.paddr >> 8);
1485 
1486 	/* Set physical address of RX status (16-byte aligned). */
1487 	IWM_WRITE(sc,
1488 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1489 
1490 	/* Enable Rx DMA
1491 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1492 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1493 	 *      the credit mechanism in 5000 HW RX FIFO
1494 	 * Direct rx interrupts to hosts
1495 	 * Rx buffer size 4 or 8k or 12k
1496 	 * RB timeout 0x10
1497 	 * 256 RBDs
1498 	 */
1499 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1500 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1501 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1502 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1503 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1504 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1505 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1506 
1507 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1508 
1509 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1510 	if (sc->cfg->host_interrupt_operation_mode)
1511 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1512 
1513 	iwm_nic_unlock(sc);
1514 
1515 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1516 
1517 	return 0;
1518 }
1519 
1520 static int
1521 iwm_nic_rx_init(struct iwm_softc *sc)
1522 {
1523 	if (sc->cfg->mqrx_supported)
1524 		return iwm_nic_rx_mq_init(sc);
1525 	else
1526 		return iwm_nic_rx_legacy_init(sc);
1527 }
1528 
1529 static int
1530 iwm_nic_tx_init(struct iwm_softc *sc)
1531 {
1532 	int qid;
1533 
1534 	if (!iwm_nic_lock(sc))
1535 		return EBUSY;
1536 
1537 	/* Deactivate TX scheduler. */
1538 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1539 
1540 	/* Set physical address of "keep warm" page (16-byte aligned). */
1541 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1542 
1543 	/* Initialize TX rings. */
1544 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1545 		struct iwm_tx_ring *txq = &sc->txq[qid];
1546 
1547 		/* Set physical address of TX ring (256-byte aligned). */
1548 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1549 		    txq->desc_dma.paddr >> 8);
1550 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1551 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1552 		    __func__,
1553 		    qid, txq->desc,
1554 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1555 	}
1556 
1557 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1558 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1559 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1560 
1561 	iwm_nic_unlock(sc);
1562 
1563 	return 0;
1564 }
1565 
1566 static int
1567 iwm_nic_init(struct iwm_softc *sc)
1568 {
1569 	int error;
1570 
1571 	iwm_apm_init(sc);
1572 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1573 		iwm_set_pwr(sc);
1574 
1575 	iwm_nic_config(sc);
1576 
1577 	if ((error = iwm_nic_rx_init(sc)) != 0)
1578 		return error;
1579 
1580 	/*
1581 	 * Ditto for TX, from iwn
1582 	 */
1583 	if ((error = iwm_nic_tx_init(sc)) != 0)
1584 		return error;
1585 
1586 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1587 	    "%s: shadow registers enabled\n", __func__);
1588 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1589 
1590 	return 0;
1591 }
1592 
1593 int
1594 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1595 {
1596 	int qmsk;
1597 
1598 	qmsk = 1 << qid;
1599 
1600 	if (!iwm_nic_lock(sc)) {
1601 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1602 		    __func__, qid);
1603 		return EBUSY;
1604 	}
1605 
1606 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1607 
1608 	if (qid == IWM_CMD_QUEUE) {
1609 		/* Disable the scheduler. */
1610 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1611 
1612 		/* Stop the TX queue prior to configuration. */
1613 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1614 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1615 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1616 
1617 		iwm_nic_unlock(sc);
1618 
1619 		/* Disable aggregations for this queue. */
1620 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1621 
1622 		if (!iwm_nic_lock(sc)) {
1623 			device_printf(sc->sc_dev,
1624 			    "%s: cannot enable txq %d\n", __func__, qid);
1625 			return EBUSY;
1626 		}
1627 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1628 		iwm_nic_unlock(sc);
1629 
1630 		iwm_write_mem32(sc,
1631 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1632 		/* Set scheduler window size and frame limit. */
1633 		iwm_write_mem32(sc,
1634 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1635 		    sizeof(uint32_t),
1636 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1637 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1638 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1639 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1640 
1641 		if (!iwm_nic_lock(sc)) {
1642 			device_printf(sc->sc_dev,
1643 			    "%s: cannot enable txq %d\n", __func__, qid);
1644 			return EBUSY;
1645 		}
1646 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1647 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1648 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1649 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1650 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1651 
1652 		/* Enable the scheduler for this queue. */
1653 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1654 	} else {
1655 		struct iwm_scd_txq_cfg_cmd cmd;
1656 		int error;
1657 
1658 		iwm_nic_unlock(sc);
1659 
1660 		memset(&cmd, 0, sizeof(cmd));
1661 		cmd.scd_queue = qid;
1662 		cmd.enable = 1;
1663 		cmd.sta_id = sta_id;
1664 		cmd.tx_fifo = fifo;
1665 		cmd.aggregate = 0;
1666 		cmd.window = IWM_FRAME_LIMIT;
1667 
1668 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1669 		    sizeof(cmd), &cmd);
1670 		if (error) {
1671 			device_printf(sc->sc_dev,
1672 			    "cannot enable txq %d\n", qid);
1673 			return error;
1674 		}
1675 
1676 		if (!iwm_nic_lock(sc))
1677 			return EBUSY;
1678 	}
1679 
1680 	iwm_nic_unlock(sc);
1681 
1682 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1683 	    __func__, qid, fifo);
1684 
1685 	return 0;
1686 }
1687 
1688 static int
1689 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1690 {
1691 	int error, chnl;
1692 
1693 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1694 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1695 
1696 	if (!iwm_nic_lock(sc))
1697 		return EBUSY;
1698 
1699 	iwm_ict_reset(sc);
1700 
1701 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1702 	if (scd_base_addr != 0 &&
1703 	    scd_base_addr != sc->scd_base_addr) {
1704 		device_printf(sc->sc_dev,
1705 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1706 		    __func__, sc->scd_base_addr, scd_base_addr);
1707 	}
1708 
1709 	iwm_nic_unlock(sc);
1710 
1711 	/* reset context data, TX status and translation data */
1712 	error = iwm_write_mem(sc,
1713 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1714 	    NULL, clear_dwords);
1715 	if (error)
1716 		return EBUSY;
1717 
1718 	if (!iwm_nic_lock(sc))
1719 		return EBUSY;
1720 
1721 	/* Set physical address of TX scheduler rings (1KB aligned). */
1722 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1723 
1724 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1725 
1726 	iwm_nic_unlock(sc);
1727 
1728 	/* enable command channel */
1729 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1730 	if (error)
1731 		return error;
1732 
1733 	if (!iwm_nic_lock(sc))
1734 		return EBUSY;
1735 
1736 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1737 
1738 	/* Enable DMA channels. */
1739 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1740 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1741 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1742 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1743 	}
1744 
1745 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1746 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1747 
1748 	iwm_nic_unlock(sc);
1749 
1750 	/* Enable L1-Active */
1751 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1752 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1753 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1754 	}
1755 
1756 	return error;
1757 }
1758 
1759 /*
1760  * NVM read access and content parsing.  We do not support
1761  * external NVM or writing NVM.
1762  * iwlwifi/mvm/nvm.c
1763  */
1764 
1765 /* Default NVM size to read */
1766 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1767 
1768 #define IWM_NVM_WRITE_OPCODE 1
1769 #define IWM_NVM_READ_OPCODE 0
1770 
1771 /* load nvm chunk response */
1772 enum {
1773 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1774 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1775 };
1776 
1777 static int
1778 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1779 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1780 {
1781 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1782 		.offset = htole16(offset),
1783 		.length = htole16(length),
1784 		.type = htole16(section),
1785 		.op_code = IWM_NVM_READ_OPCODE,
1786 	};
1787 	struct iwm_nvm_access_resp *nvm_resp;
1788 	struct iwm_rx_packet *pkt;
1789 	struct iwm_host_cmd cmd = {
1790 		.id = IWM_NVM_ACCESS_CMD,
1791 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1792 		.data = { &nvm_access_cmd, },
1793 	};
1794 	int ret, bytes_read, offset_read;
1795 	uint8_t *resp_data;
1796 
1797 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1798 
1799 	ret = iwm_send_cmd(sc, &cmd);
1800 	if (ret) {
1801 		device_printf(sc->sc_dev,
1802 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1803 		return ret;
1804 	}
1805 
1806 	pkt = cmd.resp_pkt;
1807 
1808 	/* Extract NVM response */
1809 	nvm_resp = (void *)pkt->data;
1810 	ret = le16toh(nvm_resp->status);
1811 	bytes_read = le16toh(nvm_resp->length);
1812 	offset_read = le16toh(nvm_resp->offset);
1813 	resp_data = nvm_resp->data;
1814 	if (ret) {
1815 		if ((offset != 0) &&
1816 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1817 			/*
1818 			 * meaning of NOT_VALID_ADDRESS:
1819 			 * driver try to read chunk from address that is
1820 			 * multiple of 2K and got an error since addr is empty.
1821 			 * meaning of (offset != 0): driver already
1822 			 * read valid data from another chunk so this case
1823 			 * is not an error.
1824 			 */
1825 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1826 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1827 				    offset);
1828 			*len = 0;
1829 			ret = 0;
1830 		} else {
1831 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1832 				    "NVM access command failed with status %d\n", ret);
1833 			ret = EIO;
1834 		}
1835 		goto exit;
1836 	}
1837 
1838 	if (offset_read != offset) {
1839 		device_printf(sc->sc_dev,
1840 		    "NVM ACCESS response with invalid offset %d\n",
1841 		    offset_read);
1842 		ret = EINVAL;
1843 		goto exit;
1844 	}
1845 
1846 	if (bytes_read > length) {
1847 		device_printf(sc->sc_dev,
1848 		    "NVM ACCESS response with too much data "
1849 		    "(%d bytes requested, %d bytes received)\n",
1850 		    length, bytes_read);
1851 		ret = EINVAL;
1852 		goto exit;
1853 	}
1854 
1855 	/* Write data to NVM */
1856 	memcpy(data + offset, resp_data, bytes_read);
1857 	*len = bytes_read;
1858 
1859  exit:
1860 	iwm_free_resp(sc, &cmd);
1861 	return ret;
1862 }
1863 
1864 /*
1865  * Reads an NVM section completely.
1866  * NICs prior to 7000 family don't have a real NVM, but just read
1867  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1868  * by uCode, we need to manually check in this case that we don't
1869  * overflow and try to read more than the EEPROM size.
1870  * For 7000 family NICs, we supply the maximal size we can read, and
1871  * the uCode fills the response with as much data as we can,
1872  * without overflowing, so no check is needed.
1873  */
1874 static int
1875 iwm_nvm_read_section(struct iwm_softc *sc,
1876 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1877 {
1878 	uint16_t seglen, length, offset = 0;
1879 	int ret;
1880 
1881 	/* Set nvm section read length */
1882 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1883 
1884 	seglen = length;
1885 
1886 	/* Read the NVM until exhausted (reading less than requested) */
1887 	while (seglen == length) {
1888 		/* Check no memory assumptions fail and cause an overflow */
1889 		if ((size_read + offset + length) >
1890 		    sc->cfg->eeprom_size) {
1891 			device_printf(sc->sc_dev,
1892 			    "EEPROM size is too small for NVM\n");
1893 			return ENOBUFS;
1894 		}
1895 
1896 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1897 		if (ret) {
1898 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1899 				    "Cannot read NVM from section %d offset %d, length %d\n",
1900 				    section, offset, length);
1901 			return ret;
1902 		}
1903 		offset += seglen;
1904 	}
1905 
1906 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1907 		    "NVM section %d read completed\n", section);
1908 	*len = offset;
1909 	return 0;
1910 }
1911 
1912 /*
1913  * BEGIN IWM_NVM_PARSE
1914  */
1915 
1916 /* iwlwifi/iwl-nvm-parse.c */
1917 
1918 /* NVM offsets (in words) definitions */
1919 enum iwm_nvm_offsets {
1920 	/* NVM HW-Section offset (in words) definitions */
1921 	IWM_HW_ADDR = 0x15,
1922 
1923 /* NVM SW-Section offset (in words) definitions */
1924 	IWM_NVM_SW_SECTION = 0x1C0,
1925 	IWM_NVM_VERSION = 0,
1926 	IWM_RADIO_CFG = 1,
1927 	IWM_SKU = 2,
1928 	IWM_N_HW_ADDRS = 3,
1929 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1930 
1931 /* NVM calibration section offset (in words) definitions */
1932 	IWM_NVM_CALIB_SECTION = 0x2B8,
1933 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1934 };
1935 
1936 enum iwm_8000_nvm_offsets {
1937 	/* NVM HW-Section offset (in words) definitions */
1938 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1939 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1940 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1941 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1942 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1943 
1944 	/* NVM SW-Section offset (in words) definitions */
1945 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1946 	IWM_NVM_VERSION_8000 = 0,
1947 	IWM_RADIO_CFG_8000 = 0,
1948 	IWM_SKU_8000 = 2,
1949 	IWM_N_HW_ADDRS_8000 = 3,
1950 
1951 	/* NVM REGULATORY -Section offset (in words) definitions */
1952 	IWM_NVM_CHANNELS_8000 = 0,
1953 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1954 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1955 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1956 
1957 	/* NVM calibration section offset (in words) definitions */
1958 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1959 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1960 };
1961 
1962 /* SKU Capabilities (actual values from NVM definition) */
1963 enum nvm_sku_bits {
1964 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1965 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1966 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1967 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1968 };
1969 
1970 /* radio config bits (actual values from NVM definition) */
1971 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1972 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1973 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1974 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1975 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1976 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1977 
1978 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1979 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1980 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1981 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1982 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1983 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1984 
1985 /**
1986  * enum iwm_nvm_channel_flags - channel flags in NVM
1987  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1988  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1989  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1990  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1991  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1992  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1993  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1994  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1995  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1996  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1997  */
1998 enum iwm_nvm_channel_flags {
1999 	IWM_NVM_CHANNEL_VALID = (1 << 0),
2000 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
2001 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2002 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
2003 	IWM_NVM_CHANNEL_DFS = (1 << 7),
2004 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
2005 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2006 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2007 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2008 };
2009 
2010 /*
2011  * Translate EEPROM flags to net80211.
2012  */
2013 static uint32_t
2014 iwm_eeprom_channel_flags(uint16_t ch_flags)
2015 {
2016 	uint32_t nflags;
2017 
2018 	nflags = 0;
2019 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2020 		nflags |= IEEE80211_CHAN_PASSIVE;
2021 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2022 		nflags |= IEEE80211_CHAN_NOADHOC;
2023 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2024 		nflags |= IEEE80211_CHAN_DFS;
2025 		/* Just in case. */
2026 		nflags |= IEEE80211_CHAN_NOADHOC;
2027 	}
2028 
2029 	return (nflags);
2030 }
2031 
2032 static void
2033 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2034     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2035     const uint8_t bands[])
2036 {
2037 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2038 	uint32_t nflags;
2039 	uint16_t ch_flags;
2040 	uint8_t ieee;
2041 	int error;
2042 
2043 	for (; ch_idx < ch_num; ch_idx++) {
2044 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2045 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2046 			ieee = iwm_nvm_channels[ch_idx];
2047 		else
2048 			ieee = iwm_nvm_channels_8000[ch_idx];
2049 
2050 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2051 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2052 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2053 			    ieee, ch_flags,
2054 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2055 			    "5.2" : "2.4");
2056 			continue;
2057 		}
2058 
2059 		nflags = iwm_eeprom_channel_flags(ch_flags);
2060 		error = ieee80211_add_channel(chans, maxchans, nchans,
2061 		    ieee, 0, 0, nflags, bands);
2062 		if (error != 0)
2063 			break;
2064 
2065 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2066 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2067 		    ieee, ch_flags,
2068 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2069 		    "5.2" : "2.4");
2070 	}
2071 }
2072 
2073 static void
2074 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2075     struct ieee80211_channel chans[])
2076 {
2077 	struct iwm_softc *sc = ic->ic_softc;
2078 	struct iwm_nvm_data *data = sc->nvm_data;
2079 	uint8_t bands[IEEE80211_MODE_BYTES];
2080 	size_t ch_num;
2081 
2082 	memset(bands, 0, sizeof(bands));
2083 	/* 1-13: 11b/g channels. */
2084 	setbit(bands, IEEE80211_MODE_11B);
2085 	setbit(bands, IEEE80211_MODE_11G);
2086 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2087 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2088 
2089 	/* 14: 11b channel only. */
2090 	clrbit(bands, IEEE80211_MODE_11G);
2091 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2092 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2093 
2094 	if (data->sku_cap_band_52GHz_enable) {
2095 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2096 			ch_num = nitems(iwm_nvm_channels);
2097 		else
2098 			ch_num = nitems(iwm_nvm_channels_8000);
2099 		memset(bands, 0, sizeof(bands));
2100 		setbit(bands, IEEE80211_MODE_11A);
2101 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2102 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2103 	}
2104 }
2105 
2106 static void
2107 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2108 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2109 {
2110 	const uint8_t *hw_addr;
2111 
2112 	if (mac_override) {
2113 		static const uint8_t reserved_mac[] = {
2114 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2115 		};
2116 
2117 		hw_addr = (const uint8_t *)(mac_override +
2118 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2119 
2120 		/*
2121 		 * Store the MAC address from MAO section.
2122 		 * No byte swapping is required in MAO section
2123 		 */
2124 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2125 
2126 		/*
2127 		 * Force the use of the OTP MAC address in case of reserved MAC
2128 		 * address in the NVM, or if address is given but invalid.
2129 		 */
2130 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2131 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2132 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2133 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2134 			return;
2135 
2136 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2137 		    "%s: mac address from nvm override section invalid\n",
2138 		    __func__);
2139 	}
2140 
2141 	if (nvm_hw) {
2142 		/* read the mac address from WFMP registers */
2143 		uint32_t mac_addr0 =
2144 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2145 		uint32_t mac_addr1 =
2146 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2147 
2148 		hw_addr = (const uint8_t *)&mac_addr0;
2149 		data->hw_addr[0] = hw_addr[3];
2150 		data->hw_addr[1] = hw_addr[2];
2151 		data->hw_addr[2] = hw_addr[1];
2152 		data->hw_addr[3] = hw_addr[0];
2153 
2154 		hw_addr = (const uint8_t *)&mac_addr1;
2155 		data->hw_addr[4] = hw_addr[1];
2156 		data->hw_addr[5] = hw_addr[0];
2157 
2158 		return;
2159 	}
2160 
2161 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2162 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2163 }
2164 
2165 static int
2166 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2167 	    const uint16_t *phy_sku)
2168 {
2169 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2170 		return le16_to_cpup(nvm_sw + IWM_SKU);
2171 
2172 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2173 }
2174 
2175 static int
2176 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2177 {
2178 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2179 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2180 	else
2181 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2182 						IWM_NVM_VERSION_8000));
2183 }
2184 
2185 static int
2186 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2187 		  const uint16_t *phy_sku)
2188 {
2189         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2190                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2191 
2192         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2193 }
2194 
2195 static int
2196 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2197 {
2198 	int n_hw_addr;
2199 
2200 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2201 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2202 
2203 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2204 
2205         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2206 }
2207 
2208 static void
2209 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2210 		  uint32_t radio_cfg)
2211 {
2212 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2213 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2214 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2215 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2216 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2217 		return;
2218 	}
2219 
2220 	/* set the radio configuration for family 8000 */
2221 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2222 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2223 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2224 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2225 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2226 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2227 }
2228 
2229 static int
2230 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2231 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2232 {
2233 #ifdef notyet /* for FAMILY 9000 */
2234 	if (cfg->mac_addr_from_csr) {
2235 		iwm_set_hw_address_from_csr(sc, data);
2236         } else
2237 #endif
2238 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2239 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2240 
2241 		/* The byte order is little endian 16 bit, meaning 214365 */
2242 		data->hw_addr[0] = hw_addr[1];
2243 		data->hw_addr[1] = hw_addr[0];
2244 		data->hw_addr[2] = hw_addr[3];
2245 		data->hw_addr[3] = hw_addr[2];
2246 		data->hw_addr[4] = hw_addr[5];
2247 		data->hw_addr[5] = hw_addr[4];
2248 	} else {
2249 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2250 	}
2251 
2252 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2253 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2254 		return EINVAL;
2255 	}
2256 
2257 	return 0;
2258 }
2259 
2260 static struct iwm_nvm_data *
2261 iwm_parse_nvm_data(struct iwm_softc *sc,
2262 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2263 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2264 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2265 {
2266 	struct iwm_nvm_data *data;
2267 	uint32_t sku, radio_cfg;
2268 	uint16_t lar_config;
2269 
2270 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2271 		data = malloc(sizeof(*data) +
2272 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2273 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2274 	} else {
2275 		data = malloc(sizeof(*data) +
2276 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2277 		    M_DEVBUF, M_NOWAIT | M_ZERO);
2278 	}
2279 	if (!data)
2280 		return NULL;
2281 
2282 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2283 
2284 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2285 	iwm_set_radio_cfg(sc, data, radio_cfg);
2286 
2287 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2288 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2289 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2290 	data->sku_cap_11n_enable = 0;
2291 
2292 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2293 
2294 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2295 		/* TODO: use IWL_NVM_EXT */
2296 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2297 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2298 				       IWM_NVM_LAR_OFFSET_8000;
2299 
2300 		lar_config = le16_to_cpup(regulatory + lar_offset);
2301 		data->lar_enabled = !!(lar_config &
2302 				       IWM_NVM_LAR_ENABLED_8000);
2303 	}
2304 
2305 	/* If no valid mac address was found - bail out */
2306 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2307 		free(data, M_DEVBUF);
2308 		return NULL;
2309 	}
2310 
2311 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2312 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2313 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2314 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2315 	} else {
2316 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2317 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2318 	}
2319 
2320 	return data;
2321 }
2322 
2323 static void
2324 iwm_free_nvm_data(struct iwm_nvm_data *data)
2325 {
2326 	if (data != NULL)
2327 		free(data, M_DEVBUF);
2328 }
2329 
2330 static struct iwm_nvm_data *
2331 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2332 {
2333 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2334 
2335 	/* Checking for required sections */
2336 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2337 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2338 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2339 			device_printf(sc->sc_dev,
2340 			    "Can't parse empty OTP/NVM sections\n");
2341 			return NULL;
2342 		}
2343 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2344 		/* SW and REGULATORY sections are mandatory */
2345 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2346 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2347 			device_printf(sc->sc_dev,
2348 			    "Can't parse empty OTP/NVM sections\n");
2349 			return NULL;
2350 		}
2351 		/* MAC_OVERRIDE or at least HW section must exist */
2352 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2353 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2354 			device_printf(sc->sc_dev,
2355 			    "Can't parse mac_address, empty sections\n");
2356 			return NULL;
2357 		}
2358 
2359 		/* PHY_SKU section is mandatory in B0 */
2360 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2361 			device_printf(sc->sc_dev,
2362 			    "Can't parse phy_sku in B0, empty sections\n");
2363 			return NULL;
2364 		}
2365 	} else {
2366 		panic("unknown device family %d\n", sc->cfg->device_family);
2367 	}
2368 
2369 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2370 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2371 	calib = (const uint16_t *)
2372 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2373 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2374 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2375 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2376 	mac_override = (const uint16_t *)
2377 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2378 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2379 
2380 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2381 	    phy_sku, regulatory);
2382 }
2383 
2384 static int
2385 iwm_nvm_init(struct iwm_softc *sc)
2386 {
2387 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2388 	int i, ret, section;
2389 	uint32_t size_read = 0;
2390 	uint8_t *nvm_buffer, *temp;
2391 	uint16_t len;
2392 
2393 	memset(nvm_sections, 0, sizeof(nvm_sections));
2394 
2395 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2396 		return EINVAL;
2397 
2398 	/* load NVM values from nic */
2399 	/* Read From FW NVM */
2400 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2401 
2402 	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2403 	if (!nvm_buffer)
2404 		return ENOMEM;
2405 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2406 		/* we override the constness for initial read */
2407 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2408 					   &len, size_read);
2409 		if (ret)
2410 			continue;
2411 		size_read += len;
2412 		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2413 		if (!temp) {
2414 			ret = ENOMEM;
2415 			break;
2416 		}
2417 		memcpy(temp, nvm_buffer, len);
2418 
2419 		nvm_sections[section].data = temp;
2420 		nvm_sections[section].length = len;
2421 	}
2422 	if (!size_read)
2423 		device_printf(sc->sc_dev, "OTP is blank\n");
2424 	free(nvm_buffer, M_DEVBUF);
2425 
2426 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2427 	if (!sc->nvm_data)
2428 		return EINVAL;
2429 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2430 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2431 
2432 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2433 		if (nvm_sections[i].data != NULL)
2434 			free(nvm_sections[i].data, M_DEVBUF);
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 static int
2441 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2442 	const struct iwm_fw_desc *section)
2443 {
2444 	struct iwm_dma_info *dma = &sc->fw_dma;
2445 	uint8_t *v_addr;
2446 	bus_addr_t p_addr;
2447 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2448 	int ret = 0;
2449 
2450 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2451 		    "%s: [%d] uCode section being loaded...\n",
2452 		    __func__, section_num);
2453 
2454 	v_addr = dma->vaddr;
2455 	p_addr = dma->paddr;
2456 
2457 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2458 		uint32_t copy_size, dst_addr;
2459 		int extended_addr = FALSE;
2460 
2461 		copy_size = MIN(chunk_sz, section->len - offset);
2462 		dst_addr = section->offset + offset;
2463 
2464 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2465 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2466 			extended_addr = TRUE;
2467 
2468 		if (extended_addr)
2469 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2470 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2471 
2472 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2473 		    copy_size);
2474 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2475 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2476 						   copy_size);
2477 
2478 		if (extended_addr)
2479 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2480 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2481 
2482 		if (ret) {
2483 			device_printf(sc->sc_dev,
2484 			    "%s: Could not load the [%d] uCode section\n",
2485 			    __func__, section_num);
2486 			break;
2487 		}
2488 	}
2489 
2490 	return ret;
2491 }
2492 
2493 /*
2494  * ucode
2495  */
2496 static int
2497 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2498 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2499 {
2500 	sc->sc_fw_chunk_done = 0;
2501 
2502 	if (!iwm_nic_lock(sc))
2503 		return EBUSY;
2504 
2505 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2506 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2507 
2508 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2509 	    dst_addr);
2510 
2511 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2512 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2513 
2514 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2515 	    (iwm_get_dma_hi_addr(phy_addr)
2516 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2517 
2518 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2519 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2520 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2521 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2522 
2523 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2524 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2525 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2526 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2527 
2528 	iwm_nic_unlock(sc);
2529 
2530 	/* wait up to 5s for this segment to load */
2531 	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2532 
2533 	if (!sc->sc_fw_chunk_done) {
2534 		device_printf(sc->sc_dev,
2535 		    "fw chunk addr 0x%x len %d failed to load\n",
2536 		    dst_addr, byte_cnt);
2537 		return ETIMEDOUT;
2538 	}
2539 
2540 	return 0;
2541 }
2542 
2543 static int
2544 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2545 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2546 {
2547 	int shift_param;
2548 	int i, ret = 0, sec_num = 0x1;
2549 	uint32_t val, last_read_idx = 0;
2550 
2551 	if (cpu == 1) {
2552 		shift_param = 0;
2553 		*first_ucode_section = 0;
2554 	} else {
2555 		shift_param = 16;
2556 		(*first_ucode_section)++;
2557 	}
2558 
2559 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2560 		last_read_idx = i;
2561 
2562 		/*
2563 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2564 		 * CPU1 to CPU2.
2565 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2566 		 * CPU2 non paged to CPU2 paging sec.
2567 		 */
2568 		if (!image->sec[i].data ||
2569 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2570 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2571 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2572 				    "Break since Data not valid or Empty section, sec = %d\n",
2573 				    i);
2574 			break;
2575 		}
2576 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2577 		if (ret)
2578 			return ret;
2579 
2580 		/* Notify the ucode of the loaded section number and status */
2581 		if (iwm_nic_lock(sc)) {
2582 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2583 			val = val | (sec_num << shift_param);
2584 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2585 			sec_num = (sec_num << 1) | 0x1;
2586 			iwm_nic_unlock(sc);
2587 		}
2588 	}
2589 
2590 	*first_ucode_section = last_read_idx;
2591 
2592 	iwm_enable_interrupts(sc);
2593 
2594 	if (iwm_nic_lock(sc)) {
2595 		if (cpu == 1)
2596 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2597 		else
2598 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2599 		iwm_nic_unlock(sc);
2600 	}
2601 
2602 	return 0;
2603 }
2604 
2605 static int
2606 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2607 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2608 {
2609 	int shift_param;
2610 	int i, ret = 0;
2611 	uint32_t last_read_idx = 0;
2612 
2613 	if (cpu == 1) {
2614 		shift_param = 0;
2615 		*first_ucode_section = 0;
2616 	} else {
2617 		shift_param = 16;
2618 		(*first_ucode_section)++;
2619 	}
2620 
2621 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2622 		last_read_idx = i;
2623 
2624 		/*
2625 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2626 		 * CPU1 to CPU2.
2627 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2628 		 * CPU2 non paged to CPU2 paging sec.
2629 		 */
2630 		if (!image->sec[i].data ||
2631 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2632 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2633 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2634 				    "Break since Data not valid or Empty section, sec = %d\n",
2635 				     i);
2636 			break;
2637 		}
2638 
2639 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2640 		if (ret)
2641 			return ret;
2642 	}
2643 
2644 	*first_ucode_section = last_read_idx;
2645 
2646 	return 0;
2647 
2648 }
2649 
2650 static int
2651 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2652 {
2653 	int ret = 0;
2654 	int first_ucode_section;
2655 
2656 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2657 		     image->is_dual_cpus ? "Dual" : "Single");
2658 
2659 	/* load to FW the binary non secured sections of CPU1 */
2660 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2661 	if (ret)
2662 		return ret;
2663 
2664 	if (image->is_dual_cpus) {
2665 		/* set CPU2 header address */
2666 		if (iwm_nic_lock(sc)) {
2667 			iwm_write_prph(sc,
2668 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2669 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2670 			iwm_nic_unlock(sc);
2671 		}
2672 
2673 		/* load to FW the binary sections of CPU2 */
2674 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2675 						 &first_ucode_section);
2676 		if (ret)
2677 			return ret;
2678 	}
2679 
2680 	iwm_enable_interrupts(sc);
2681 
2682 	/* release CPU reset */
2683 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2684 
2685 	return 0;
2686 }
2687 
2688 int
2689 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2690 	const struct iwm_fw_img *image)
2691 {
2692 	int ret = 0;
2693 	int first_ucode_section;
2694 
2695 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2696 		    image->is_dual_cpus ? "Dual" : "Single");
2697 
2698 	/* configure the ucode to be ready to get the secured image */
2699 	/* release CPU reset */
2700 	if (iwm_nic_lock(sc)) {
2701 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2702 		    IWM_RELEASE_CPU_RESET_BIT);
2703 		iwm_nic_unlock(sc);
2704 	}
2705 
2706 	/* load to FW the binary Secured sections of CPU1 */
2707 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2708 	    &first_ucode_section);
2709 	if (ret)
2710 		return ret;
2711 
2712 	/* load to FW the binary sections of CPU2 */
2713 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2714 	    &first_ucode_section);
2715 }
2716 
2717 /* XXX Get rid of this definition */
2718 static inline void
2719 iwm_enable_fw_load_int(struct iwm_softc *sc)
2720 {
2721 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2722 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2723 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2724 }
2725 
2726 /* XXX Add proper rfkill support code */
2727 static int
2728 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2729 {
2730 	int ret;
2731 
2732 	/* This may fail if AMT took ownership of the device */
2733 	if (iwm_prepare_card_hw(sc)) {
2734 		device_printf(sc->sc_dev,
2735 		    "%s: Exit HW not ready\n", __func__);
2736 		ret = EIO;
2737 		goto out;
2738 	}
2739 
2740 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2741 
2742 	iwm_disable_interrupts(sc);
2743 
2744 	/* make sure rfkill handshake bits are cleared */
2745 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2746 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2747 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2748 
2749 	/* clear (again), then enable host interrupts */
2750 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2751 
2752 	ret = iwm_nic_init(sc);
2753 	if (ret) {
2754 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2755 		goto out;
2756 	}
2757 
2758 	/*
2759 	 * Now, we load the firmware and don't want to be interrupted, even
2760 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2761 	 * FH_TX interrupt which is needed to load the firmware). If the
2762 	 * RF-Kill switch is toggled, we will find out after having loaded
2763 	 * the firmware and return the proper value to the caller.
2764 	 */
2765 	iwm_enable_fw_load_int(sc);
2766 
2767 	/* really make sure rfkill handshake bits are cleared */
2768 	/* maybe we should write a few times more?  just to make sure */
2769 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2770 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2771 
2772 	/* Load the given image to the HW */
2773 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2774 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2775 	else
2776 		ret = iwm_pcie_load_given_ucode(sc, fw);
2777 
2778 	/* XXX re-check RF-Kill state */
2779 
2780 out:
2781 	return ret;
2782 }
2783 
2784 static int
2785 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2786 {
2787 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2788 		.valid = htole32(valid_tx_ant),
2789 	};
2790 
2791 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2792 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2793 }
2794 
2795 /* iwlwifi: mvm/fw.c */
2796 static int
2797 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2798 {
2799 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2800 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2801 
2802 	/* Set parameters */
2803 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2804 	phy_cfg_cmd.calib_control.event_trigger =
2805 	    sc->sc_default_calib[ucode_type].event_trigger;
2806 	phy_cfg_cmd.calib_control.flow_trigger =
2807 	    sc->sc_default_calib[ucode_type].flow_trigger;
2808 
2809 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2810 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2811 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2812 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2813 }
2814 
2815 static int
2816 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2817 {
2818 	struct iwm_alive_data *alive_data = data;
2819 	struct iwm_alive_resp_v3 *palive3;
2820 	struct iwm_alive_resp *palive;
2821 	struct iwm_umac_alive *umac;
2822 	struct iwm_lmac_alive *lmac1;
2823 	struct iwm_lmac_alive *lmac2 = NULL;
2824 	uint16_t status;
2825 
2826 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2827 		palive = (void *)pkt->data;
2828 		umac = &palive->umac_data;
2829 		lmac1 = &palive->lmac_data[0];
2830 		lmac2 = &palive->lmac_data[1];
2831 		status = le16toh(palive->status);
2832 	} else {
2833 		palive3 = (void *)pkt->data;
2834 		umac = &palive3->umac_data;
2835 		lmac1 = &palive3->lmac_data;
2836 		status = le16toh(palive3->status);
2837 	}
2838 
2839 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2840 	if (lmac2)
2841 		sc->error_event_table[1] =
2842 			le32toh(lmac2->error_event_table_ptr);
2843 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2844 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2845 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2846 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2847 	if (sc->umac_error_event_table)
2848 		sc->support_umac_log = TRUE;
2849 
2850 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2851 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2852 		    status, lmac1->ver_type, lmac1->ver_subtype);
2853 
2854 	if (lmac2)
2855 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2856 
2857 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2858 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2859 		    le32toh(umac->umac_major),
2860 		    le32toh(umac->umac_minor));
2861 
2862 	return TRUE;
2863 }
2864 
2865 static int
2866 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2867 	struct iwm_rx_packet *pkt, void *data)
2868 {
2869 	struct iwm_phy_db *phy_db = data;
2870 
2871 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2872 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2873 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2874 			    __func__, pkt->hdr.code);
2875 		}
2876 		return TRUE;
2877 	}
2878 
2879 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2880 		device_printf(sc->sc_dev,
2881 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2882 	}
2883 
2884 	return FALSE;
2885 }
2886 
2887 static int
2888 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2889 	enum iwm_ucode_type ucode_type)
2890 {
2891 	struct iwm_notification_wait alive_wait;
2892 	struct iwm_alive_data alive_data;
2893 	const struct iwm_fw_img *fw;
2894 	enum iwm_ucode_type old_type = sc->cur_ucode;
2895 	int error;
2896 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2897 
2898 	fw = &sc->sc_fw.img[ucode_type];
2899 	sc->cur_ucode = ucode_type;
2900 	sc->ucode_loaded = FALSE;
2901 
2902 	memset(&alive_data, 0, sizeof(alive_data));
2903 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2904 				   alive_cmd, nitems(alive_cmd),
2905 				   iwm_alive_fn, &alive_data);
2906 
2907 	error = iwm_start_fw(sc, fw);
2908 	if (error) {
2909 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2910 		sc->cur_ucode = old_type;
2911 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2912 		return error;
2913 	}
2914 
2915 	/*
2916 	 * Some things may run in the background now, but we
2917 	 * just wait for the ALIVE notification here.
2918 	 */
2919 	IWM_UNLOCK(sc);
2920 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2921 				      IWM_UCODE_ALIVE_TIMEOUT);
2922 	IWM_LOCK(sc);
2923 	if (error) {
2924 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2925 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2926 			if (iwm_nic_lock(sc)) {
2927 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2928 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2929 				iwm_nic_unlock(sc);
2930 			}
2931 			device_printf(sc->sc_dev,
2932 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2933 			    a, b);
2934 		}
2935 		sc->cur_ucode = old_type;
2936 		return error;
2937 	}
2938 
2939 	if (!alive_data.valid) {
2940 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2941 		    __func__);
2942 		sc->cur_ucode = old_type;
2943 		return EIO;
2944 	}
2945 
2946 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2947 
2948 	/*
2949 	 * configure and operate fw paging mechanism.
2950 	 * driver configures the paging flow only once, CPU2 paging image
2951 	 * included in the IWM_UCODE_INIT image.
2952 	 */
2953 	if (fw->paging_mem_size) {
2954 		error = iwm_save_fw_paging(sc, fw);
2955 		if (error) {
2956 			device_printf(sc->sc_dev,
2957 			    "%s: failed to save the FW paging image\n",
2958 			    __func__);
2959 			return error;
2960 		}
2961 
2962 		error = iwm_send_paging_cmd(sc, fw);
2963 		if (error) {
2964 			device_printf(sc->sc_dev,
2965 			    "%s: failed to send the paging cmd\n", __func__);
2966 			iwm_free_fw_paging(sc);
2967 			return error;
2968 		}
2969 	}
2970 
2971 	if (!error)
2972 		sc->ucode_loaded = TRUE;
2973 	return error;
2974 }
2975 
2976 /*
2977  * mvm misc bits
2978  */
2979 
2980 /*
2981  * follows iwlwifi/fw.c
2982  */
2983 static int
2984 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2985 {
2986 	struct iwm_notification_wait calib_wait;
2987 	static const uint16_t init_complete[] = {
2988 		IWM_INIT_COMPLETE_NOTIF,
2989 		IWM_CALIB_RES_NOTIF_PHY_DB
2990 	};
2991 	int ret;
2992 
2993 	/* do not operate with rfkill switch turned on */
2994 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2995 		device_printf(sc->sc_dev,
2996 		    "radio is disabled by hardware switch\n");
2997 		return EPERM;
2998 	}
2999 
3000 	iwm_init_notification_wait(sc->sc_notif_wait,
3001 				   &calib_wait,
3002 				   init_complete,
3003 				   nitems(init_complete),
3004 				   iwm_wait_phy_db_entry,
3005 				   sc->sc_phy_db);
3006 
3007 	/* Will also start the device */
3008 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3009 	if (ret) {
3010 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3011 		    ret);
3012 		goto error;
3013 	}
3014 
3015 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
3016 		ret = iwm_send_bt_init_conf(sc);
3017 		if (ret) {
3018 			device_printf(sc->sc_dev,
3019 			    "failed to send bt coex configuration: %d\n", ret);
3020 			goto error;
3021 		}
3022 	}
3023 
3024 	if (justnvm) {
3025 		/* Read nvm */
3026 		ret = iwm_nvm_init(sc);
3027 		if (ret) {
3028 			device_printf(sc->sc_dev, "failed to read nvm\n");
3029 			goto error;
3030 		}
3031 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3032 		goto error;
3033 	}
3034 
3035 	/* Send TX valid antennas before triggering calibrations */
3036 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
3037 	if (ret) {
3038 		device_printf(sc->sc_dev,
3039 		    "failed to send antennas before calibration: %d\n", ret);
3040 		goto error;
3041 	}
3042 
3043 	/*
3044 	 * Send phy configurations command to init uCode
3045 	 * to start the 16.0 uCode init image internal calibrations.
3046 	 */
3047 	ret = iwm_send_phy_cfg_cmd(sc);
3048 	if (ret) {
3049 		device_printf(sc->sc_dev,
3050 		    "%s: Failed to run INIT calibrations: %d\n",
3051 		    __func__, ret);
3052 		goto error;
3053 	}
3054 
3055 	/*
3056 	 * Nothing to do but wait for the init complete notification
3057 	 * from the firmware.
3058 	 */
3059 	IWM_UNLOCK(sc);
3060 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3061 	    IWM_UCODE_CALIB_TIMEOUT);
3062 	IWM_LOCK(sc);
3063 
3064 
3065 	goto out;
3066 
3067 error:
3068 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3069 out:
3070 	return ret;
3071 }
3072 
3073 static int
3074 iwm_config_ltr(struct iwm_softc *sc)
3075 {
3076 	struct iwm_ltr_config_cmd cmd = {
3077 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3078 	};
3079 
3080 	if (!sc->sc_ltr_enabled)
3081 		return 0;
3082 
3083 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3084 }
3085 
3086 /*
3087  * receive side
3088  */
3089 
3090 /* (re)stock rx ring, called at init-time and at runtime */
3091 static int
3092 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3093 {
3094 	struct iwm_rx_ring *ring = &sc->rxq;
3095 	struct iwm_rx_data *data = &ring->data[idx];
3096 	struct mbuf *m;
3097 	bus_dmamap_t dmamap;
3098 	bus_dma_segment_t seg;
3099 	int nsegs, error;
3100 
3101 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3102 	if (m == NULL)
3103 		return ENOBUFS;
3104 
3105 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3106 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3107 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3108 	if (error != 0) {
3109 		device_printf(sc->sc_dev,
3110 		    "%s: can't map mbuf, error %d\n", __func__, error);
3111 		m_freem(m);
3112 		return error;
3113 	}
3114 
3115 	if (data->m != NULL)
3116 		bus_dmamap_unload(ring->data_dmat, data->map);
3117 
3118 	/* Swap ring->spare_map with data->map */
3119 	dmamap = data->map;
3120 	data->map = ring->spare_map;
3121 	ring->spare_map = dmamap;
3122 
3123 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3124 	data->m = m;
3125 
3126 	/* Update RX descriptor. */
3127 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3128 	if (sc->cfg->mqrx_supported)
3129 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3130 	else
3131 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3132 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3133 	    BUS_DMASYNC_PREWRITE);
3134 
3135 	return 0;
3136 }
3137 
3138 static void
3139 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3140 {
3141 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3142 
3143 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3144 
3145 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3146 }
3147 
3148 /*
3149  * Retrieve the average noise (in dBm) among receivers.
3150  */
3151 static int
3152 iwm_get_noise(struct iwm_softc *sc,
3153     const struct iwm_statistics_rx_non_phy *stats)
3154 {
3155 	int i, total, nbant, noise;
3156 
3157 	total = nbant = noise = 0;
3158 	for (i = 0; i < 3; i++) {
3159 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3160 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3161 		    __func__,
3162 		    i,
3163 		    noise);
3164 
3165 		if (noise) {
3166 			total += noise;
3167 			nbant++;
3168 		}
3169 	}
3170 
3171 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3172 	    __func__, nbant, total);
3173 #if 0
3174 	/* There should be at least one antenna but check anyway. */
3175 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3176 #else
3177 	/* For now, just hard-code it to -96 to be safe */
3178 	return (-96);
3179 #endif
3180 }
3181 
3182 static void
3183 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3184 {
3185 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3186 
3187 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3188 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3189 }
3190 
3191 /* iwlwifi: mvm/rx.c */
3192 /*
3193  * iwm_get_signal_strength - use new rx PHY INFO API
3194  * values are reported by the fw as positive values - need to negate
3195  * to obtain their dBM.  Account for missing antennas by replacing 0
3196  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3197  */
3198 static int
3199 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3200     struct iwm_rx_phy_info *phy_info)
3201 {
3202 	int energy_a, energy_b, energy_c, max_energy;
3203 	uint32_t val;
3204 
3205 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3206 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3207 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3208 	energy_a = energy_a ? -energy_a : -256;
3209 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3210 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3211 	energy_b = energy_b ? -energy_b : -256;
3212 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3213 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3214 	energy_c = energy_c ? -energy_c : -256;
3215 	max_energy = MAX(energy_a, energy_b);
3216 	max_energy = MAX(max_energy, energy_c);
3217 
3218 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3219 	    "energy In A %d B %d C %d , and max %d\n",
3220 	    energy_a, energy_b, energy_c, max_energy);
3221 
3222 	return max_energy;
3223 }
3224 
3225 static int
3226 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3227     struct iwm_rx_mpdu_desc *desc)
3228 {
3229 	int energy_a, energy_b;
3230 
3231 	energy_a = desc->v1.energy_a;
3232 	energy_b = desc->v1.energy_b;
3233 	energy_a = energy_a ? -energy_a : -256;
3234 	energy_b = energy_b ? -energy_b : -256;
3235 	return MAX(energy_a, energy_b);
3236 }
3237 
3238 /*
3239  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3240  *
3241  * Handles the actual data of the Rx packet from the fw
3242  */
3243 static bool
3244 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3245     bool stolen)
3246 {
3247 	struct ieee80211com *ic = &sc->sc_ic;
3248 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3249 	struct ieee80211_frame *wh;
3250 	struct ieee80211_rx_stats rxs;
3251 	struct iwm_rx_phy_info *phy_info;
3252 	struct iwm_rx_mpdu_res_start *rx_res;
3253 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3254 	uint32_t len;
3255 	uint32_t rx_pkt_status;
3256 	int rssi;
3257 
3258 	phy_info = &sc->sc_last_phy_info;
3259 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3260 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3261 	len = le16toh(rx_res->byte_count);
3262 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3263 
3264 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3265 		device_printf(sc->sc_dev,
3266 		    "dsp size out of range [0,20]: %d\n",
3267 		    phy_info->cfg_phy_cnt);
3268 		return false;
3269 	}
3270 
3271 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3272 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3273 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3274 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3275 		return false;
3276 	}
3277 
3278 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3279 
3280 	/* Map it to relative value */
3281 	rssi = rssi - sc->sc_noise;
3282 
3283 	/* replenish ring for the buffer we're going to feed to the sharks */
3284 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3285 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3286 		    __func__);
3287 		return false;
3288 	}
3289 
3290 	m->m_data = pkt->data + sizeof(*rx_res);
3291 	m->m_pkthdr.len = m->m_len = len;
3292 
3293 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3294 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3295 
3296 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3297 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3298 	    __func__,
3299 	    le16toh(phy_info->channel),
3300 	    le16toh(phy_info->phy_flags));
3301 
3302 	/*
3303 	 * Populate an RX state struct with the provided information.
3304 	 */
3305 	bzero(&rxs, sizeof(rxs));
3306 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3307 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3308 	rxs.c_ieee = le16toh(phy_info->channel);
3309 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3310 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3311 	} else {
3312 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3313 	}
3314 
3315 	/* rssi is in 1/2db units */
3316 	rxs.c_rssi = rssi * 2;
3317 	rxs.c_nf = sc->sc_noise;
3318 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3319 		return false;
3320 
3321 	if (ieee80211_radiotap_active_vap(vap)) {
3322 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3323 
3324 		tap->wr_flags = 0;
3325 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3326 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3327 		tap->wr_chan_freq = htole16(rxs.c_freq);
3328 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3329 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3330 		tap->wr_dbm_antsignal = (int8_t)rssi;
3331 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3332 		tap->wr_tsft = phy_info->system_timestamp;
3333 		switch (phy_info->rate) {
3334 		/* CCK rates. */
3335 		case  10: tap->wr_rate =   2; break;
3336 		case  20: tap->wr_rate =   4; break;
3337 		case  55: tap->wr_rate =  11; break;
3338 		case 110: tap->wr_rate =  22; break;
3339 		/* OFDM rates. */
3340 		case 0xd: tap->wr_rate =  12; break;
3341 		case 0xf: tap->wr_rate =  18; break;
3342 		case 0x5: tap->wr_rate =  24; break;
3343 		case 0x7: tap->wr_rate =  36; break;
3344 		case 0x9: tap->wr_rate =  48; break;
3345 		case 0xb: tap->wr_rate =  72; break;
3346 		case 0x1: tap->wr_rate =  96; break;
3347 		case 0x3: tap->wr_rate = 108; break;
3348 		/* Unknown rate: should not happen. */
3349 		default:  tap->wr_rate =   0;
3350 		}
3351 	}
3352 
3353 	return true;
3354 }
3355 
3356 static bool
3357 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3358     bool stolen)
3359 {
3360 	struct ieee80211com *ic = &sc->sc_ic;
3361 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3362 	struct ieee80211_frame *wh;
3363 	struct ieee80211_rx_stats rxs;
3364 	struct iwm_rx_mpdu_desc *desc;
3365 	struct iwm_rx_packet *pkt;
3366 	int rssi;
3367 	uint32_t hdrlen, len, rate_n_flags;
3368 	uint16_t phy_info;
3369 	uint8_t channel;
3370 
3371 	pkt = mtodo(m, offset);
3372 	desc = (void *)pkt->data;
3373 
3374 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3375 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3376 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3377 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3378 		return false;
3379 	}
3380 
3381 	channel = desc->v1.channel;
3382 	len = le16toh(desc->mpdu_len);
3383 	phy_info = le16toh(desc->phy_info);
3384 	rate_n_flags = desc->v1.rate_n_flags;
3385 
3386 	wh = mtodo(m, sizeof(*desc));
3387 	m->m_data = pkt->data + sizeof(*desc);
3388 	m->m_pkthdr.len = m->m_len = len;
3389 	m->m_len = len;
3390 
3391 	/* Account for padding following the frame header. */
3392 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3393 		hdrlen = ieee80211_anyhdrsize(wh);
3394 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3395 		m->m_data = mtodo(m, 2);
3396 		wh = mtod(m, struct ieee80211_frame *);
3397 	}
3398 
3399 	/* Map it to relative value */
3400 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3401 	rssi = rssi - sc->sc_noise;
3402 
3403 	/* replenish ring for the buffer we're going to feed to the sharks */
3404 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3405 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3406 		    __func__);
3407 		return false;
3408 	}
3409 
3410 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3411 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3412 
3413 	/*
3414 	 * Populate an RX state struct with the provided information.
3415 	 */
3416 	bzero(&rxs, sizeof(rxs));
3417 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3418 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3419 	rxs.c_ieee = channel;
3420 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3421 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3422 
3423 	/* rssi is in 1/2db units */
3424 	rxs.c_rssi = rssi * 2;
3425 	rxs.c_nf = sc->sc_noise;
3426 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3427 		return false;
3428 
3429 	if (ieee80211_radiotap_active_vap(vap)) {
3430 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3431 
3432 		tap->wr_flags = 0;
3433 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3434 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3435 		tap->wr_chan_freq = htole16(rxs.c_freq);
3436 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3437 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3438 		tap->wr_dbm_antsignal = (int8_t)rssi;
3439 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3440 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3441 		switch ((rate_n_flags & 0xff)) {
3442 		/* CCK rates. */
3443 		case  10: tap->wr_rate =   2; break;
3444 		case  20: tap->wr_rate =   4; break;
3445 		case  55: tap->wr_rate =  11; break;
3446 		case 110: tap->wr_rate =  22; break;
3447 		/* OFDM rates. */
3448 		case 0xd: tap->wr_rate =  12; break;
3449 		case 0xf: tap->wr_rate =  18; break;
3450 		case 0x5: tap->wr_rate =  24; break;
3451 		case 0x7: tap->wr_rate =  36; break;
3452 		case 0x9: tap->wr_rate =  48; break;
3453 		case 0xb: tap->wr_rate =  72; break;
3454 		case 0x1: tap->wr_rate =  96; break;
3455 		case 0x3: tap->wr_rate = 108; break;
3456 		/* Unknown rate: should not happen. */
3457 		default:  tap->wr_rate =   0;
3458 		}
3459 	}
3460 
3461 	return true;
3462 }
3463 
3464 static bool
3465 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3466     bool stolen)
3467 {
3468   	struct epoch_tracker et;
3469 	struct ieee80211com *ic;
3470 	struct ieee80211_frame *wh;
3471 	struct ieee80211_node *ni;
3472 	bool ret;
3473 
3474 	ic = &sc->sc_ic;
3475 
3476 	ret = sc->cfg->mqrx_supported ?
3477 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3478 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3479 	if (!ret) {
3480 		counter_u64_add(ic->ic_ierrors, 1);
3481 		return (ret);
3482 	}
3483 
3484 	wh = mtod(m, struct ieee80211_frame *);
3485 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3486 
3487 	IWM_UNLOCK(sc);
3488 
3489 	NET_EPOCH_ENTER(et);
3490 	if (ni != NULL) {
3491 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3492 		ieee80211_input_mimo(ni, m);
3493 		ieee80211_free_node(ni);
3494 	} else {
3495 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3496 		ieee80211_input_mimo_all(ic, m);
3497 	}
3498 	NET_EPOCH_EXIT(et);
3499 
3500 	IWM_LOCK(sc);
3501 
3502 	return true;
3503 }
3504 
3505 static int
3506 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3507 	struct iwm_node *in)
3508 {
3509 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3510 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3511 	struct ieee80211_node *ni = &in->in_ni;
3512 	struct ieee80211vap *vap = ni->ni_vap;
3513 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3514 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3515 	boolean_t rate_matched;
3516 	uint8_t tx_resp_rate;
3517 
3518 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3519 
3520 	/* Update rate control statistics. */
3521 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3522 	    __func__,
3523 	    (int) le16toh(tx_resp->status.status),
3524 	    (int) le16toh(tx_resp->status.sequence),
3525 	    tx_resp->frame_count,
3526 	    tx_resp->bt_kill_count,
3527 	    tx_resp->failure_rts,
3528 	    tx_resp->failure_frame,
3529 	    le32toh(tx_resp->initial_rate),
3530 	    (int) le16toh(tx_resp->wireless_media_time));
3531 
3532 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3533 
3534 	/* For rate control, ignore frames sent at different initial rate */
3535 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3536 
3537 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3538 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3539 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3540 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3541 	}
3542 
3543 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3544 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3545 	txs->short_retries = tx_resp->failure_rts;
3546 	txs->long_retries = tx_resp->failure_frame;
3547 	if (status != IWM_TX_STATUS_SUCCESS &&
3548 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3549 		switch (status) {
3550 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3551 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3552 			break;
3553 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3554 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3555 			break;
3556 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3557 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3558 			break;
3559 		default:
3560 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3561 			break;
3562 		}
3563 	} else {
3564 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3565 	}
3566 
3567 	if (rate_matched) {
3568 		ieee80211_ratectl_tx_complete(ni, txs);
3569 
3570 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3571 		new_rate = vap->iv_bss->ni_txrate;
3572 		if (new_rate != 0 && new_rate != cur_rate) {
3573 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3574 			iwm_setrates(sc, in, rix);
3575 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3576 		}
3577  	}
3578 
3579 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3580 }
3581 
3582 static void
3583 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3584 {
3585 	struct iwm_cmd_header *cmd_hdr;
3586 	struct iwm_tx_ring *ring;
3587 	struct iwm_tx_data *txd;
3588 	struct iwm_node *in;
3589 	struct mbuf *m;
3590 	int idx, qid, qmsk, status;
3591 
3592 	cmd_hdr = &pkt->hdr;
3593 	idx = cmd_hdr->idx;
3594 	qid = cmd_hdr->qid;
3595 
3596 	ring = &sc->txq[qid];
3597 	txd = &ring->data[idx];
3598 	in = txd->in;
3599 	m = txd->m;
3600 
3601 	KASSERT(txd->done == 0, ("txd not done"));
3602 	KASSERT(txd->in != NULL, ("txd without node"));
3603 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3604 
3605 	sc->sc_tx_timer = 0;
3606 
3607 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3608 
3609 	/* Unmap and free mbuf. */
3610 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3611 	bus_dmamap_unload(ring->data_dmat, txd->map);
3612 
3613 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3614 	    "free txd %p, in %p\n", txd, txd->in);
3615 	txd->done = 1;
3616 	txd->m = NULL;
3617 	txd->in = NULL;
3618 
3619 	ieee80211_tx_complete(&in->in_ni, m, status);
3620 
3621 	qmsk = 1 << qid;
3622 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3623 		sc->qfullmsk &= ~qmsk;
3624 		if (sc->qfullmsk == 0)
3625 			iwm_start(sc);
3626 	}
3627 }
3628 
3629 /*
3630  * transmit side
3631  */
3632 
3633 /*
3634  * Process a "command done" firmware notification.  This is where we wakeup
3635  * processes waiting for a synchronous command completion.
3636  * from if_iwn
3637  */
3638 static void
3639 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3640 {
3641 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3642 	struct iwm_tx_data *data;
3643 
3644 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3645 		return;	/* Not a command ack. */
3646 	}
3647 
3648 	/* XXX wide commands? */
3649 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3650 	    "cmd notification type 0x%x qid %d idx %d\n",
3651 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3652 
3653 	data = &ring->data[pkt->hdr.idx];
3654 
3655 	/* If the command was mapped in an mbuf, free it. */
3656 	if (data->m != NULL) {
3657 		bus_dmamap_sync(ring->data_dmat, data->map,
3658 		    BUS_DMASYNC_POSTWRITE);
3659 		bus_dmamap_unload(ring->data_dmat, data->map);
3660 		m_freem(data->m);
3661 		data->m = NULL;
3662 	}
3663 	wakeup(&ring->desc[pkt->hdr.idx]);
3664 
3665 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3666 		device_printf(sc->sc_dev,
3667 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3668 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3669 		/* XXX call iwm_force_nmi() */
3670 	}
3671 
3672 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3673 	ring->queued--;
3674 	if (ring->queued == 0)
3675 		iwm_pcie_clear_cmd_in_flight(sc);
3676 }
3677 
3678 #if 0
3679 /*
3680  * necessary only for block ack mode
3681  */
3682 void
3683 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3684 	uint16_t len)
3685 {
3686 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3687 	uint16_t w_val;
3688 
3689 	scd_bc_tbl = sc->sched_dma.vaddr;
3690 
3691 	len += 8; /* magic numbers came naturally from paris */
3692 	len = roundup(len, 4) / 4;
3693 
3694 	w_val = htole16(sta_id << 12 | len);
3695 
3696 	/* Update TX scheduler. */
3697 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3698 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3699 	    BUS_DMASYNC_PREWRITE);
3700 
3701 	/* I really wonder what this is ?!? */
3702 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3703 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3704 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3705 		    BUS_DMASYNC_PREWRITE);
3706 	}
3707 }
3708 #endif
3709 
3710 static int
3711 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3712 {
3713 	int i;
3714 
3715 	for (i = 0; i < nitems(iwm_rates); i++) {
3716 		if (iwm_rates[i].rate == rate)
3717 			return (i);
3718 	}
3719 	/* XXX error? */
3720 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3721 	    "%s: couldn't find an entry for rate=%d\n",
3722 	    __func__,
3723 	    rate);
3724 	return (0);
3725 }
3726 
3727 /*
3728  * Fill in the rate related information for a transmit command.
3729  */
3730 static const struct iwm_rate *
3731 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3732 	struct mbuf *m, struct iwm_tx_cmd *tx)
3733 {
3734 	struct ieee80211_node *ni = &in->in_ni;
3735 	struct ieee80211_frame *wh;
3736 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3737 	const struct iwm_rate *rinfo;
3738 	int type;
3739 	int ridx, rate_flags;
3740 
3741 	wh = mtod(m, struct ieee80211_frame *);
3742 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3743 
3744 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3745 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3746 
3747 	if (type == IEEE80211_FC0_TYPE_MGT ||
3748 	    type == IEEE80211_FC0_TYPE_CTL ||
3749 	    (m->m_flags & M_EAPOL) != 0) {
3750 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3751 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3752 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3753 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3754 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3755 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3756 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3757 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3758 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3759 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3760 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3761 	} else {
3762 		/* for data frames, use RS table */
3763 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3764 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3765 		if (ridx == -1)
3766 			ridx = 0;
3767 
3768 		/* This is the index into the programmed table */
3769 		tx->initial_rate_index = 0;
3770 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3771 	}
3772 
3773 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3774 	    "%s: frame type=%d txrate %d\n",
3775 	        __func__, type, iwm_rates[ridx].rate);
3776 
3777 	rinfo = &iwm_rates[ridx];
3778 
3779 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3780 	    __func__, ridx,
3781 	    rinfo->rate,
3782 	    !! (IWM_RIDX_IS_CCK(ridx))
3783 	    );
3784 
3785 	/* XXX TODO: hard-coded TX antenna? */
3786 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3787 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3788 	else
3789 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3790 	if (IWM_RIDX_IS_CCK(ridx))
3791 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3792 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3793 
3794 	return rinfo;
3795 }
3796 
3797 #define TB0_SIZE 16
3798 static int
3799 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3800 {
3801 	struct ieee80211com *ic = &sc->sc_ic;
3802 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3803 	struct iwm_node *in = IWM_NODE(ni);
3804 	struct iwm_tx_ring *ring;
3805 	struct iwm_tx_data *data;
3806 	struct iwm_tfd *desc;
3807 	struct iwm_device_cmd *cmd;
3808 	struct iwm_tx_cmd *tx;
3809 	struct ieee80211_frame *wh;
3810 	struct ieee80211_key *k = NULL;
3811 	struct mbuf *m1;
3812 	const struct iwm_rate *rinfo;
3813 	uint32_t flags;
3814 	u_int hdrlen;
3815 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3816 	int nsegs;
3817 	uint8_t tid, type;
3818 	int i, totlen, error, pad;
3819 
3820 	wh = mtod(m, struct ieee80211_frame *);
3821 	hdrlen = ieee80211_anyhdrsize(wh);
3822 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3823 	tid = 0;
3824 	ring = &sc->txq[ac];
3825 	desc = &ring->desc[ring->cur];
3826 	data = &ring->data[ring->cur];
3827 
3828 	/* Fill out iwm_tx_cmd to send to the firmware */
3829 	cmd = &ring->cmd[ring->cur];
3830 	cmd->hdr.code = IWM_TX_CMD;
3831 	cmd->hdr.flags = 0;
3832 	cmd->hdr.qid = ring->qid;
3833 	cmd->hdr.idx = ring->cur;
3834 
3835 	tx = (void *)cmd->data;
3836 	memset(tx, 0, sizeof(*tx));
3837 
3838 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3839 
3840 	/* Encrypt the frame if need be. */
3841 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3842 		/* Retrieve key for TX && do software encryption. */
3843 		k = ieee80211_crypto_encap(ni, m);
3844 		if (k == NULL) {
3845 			m_freem(m);
3846 			return (ENOBUFS);
3847 		}
3848 		/* 802.11 header may have moved. */
3849 		wh = mtod(m, struct ieee80211_frame *);
3850 	}
3851 
3852 	if (ieee80211_radiotap_active_vap(vap)) {
3853 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3854 
3855 		tap->wt_flags = 0;
3856 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3857 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3858 		tap->wt_rate = rinfo->rate;
3859 		if (k != NULL)
3860 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3861 		ieee80211_radiotap_tx(vap, m);
3862 	}
3863 
3864 	flags = 0;
3865 	totlen = m->m_pkthdr.len;
3866 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3867 		flags |= IWM_TX_CMD_FLG_ACK;
3868 	}
3869 
3870 	if (type == IEEE80211_FC0_TYPE_DATA &&
3871 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3872 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3873 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3874 	}
3875 
3876 	tx->sta_id = IWM_STATION_ID;
3877 
3878 	if (type == IEEE80211_FC0_TYPE_MGT) {
3879 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3880 
3881 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3882 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3883 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3884 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3885 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3886 		} else {
3887 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3888 		}
3889 	} else {
3890 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3891 	}
3892 
3893 	if (hdrlen & 3) {
3894 		/* First segment length must be a multiple of 4. */
3895 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3896 		tx->offload_assist |= htole16(1 << IWM_TX_CMD_OFFLD_PAD);
3897 		pad = 4 - (hdrlen & 3);
3898 	} else {
3899 		tx->offload_assist = 0;
3900 		pad = 0;
3901 	}
3902 
3903 	tx->len = htole16(totlen);
3904 	tx->tid_tspec = tid;
3905 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3906 
3907 	/* Set physical address of "scratch area". */
3908 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3909 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3910 
3911 	/* Copy 802.11 header in TX command. */
3912 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3913 
3914 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3915 
3916 	tx->sec_ctl = 0;
3917 	tx->tx_flags |= htole32(flags);
3918 
3919 	/* Trim 802.11 header. */
3920 	m_adj(m, hdrlen);
3921 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3922 	    segs, &nsegs, BUS_DMA_NOWAIT);
3923 	if (error != 0) {
3924 		if (error != EFBIG) {
3925 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3926 			    error);
3927 			m_freem(m);
3928 			return error;
3929 		}
3930 		/* Too many DMA segments, linearize mbuf. */
3931 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3932 		if (m1 == NULL) {
3933 			device_printf(sc->sc_dev,
3934 			    "%s: could not defrag mbuf\n", __func__);
3935 			m_freem(m);
3936 			return (ENOBUFS);
3937 		}
3938 		m = m1;
3939 
3940 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3941 		    segs, &nsegs, BUS_DMA_NOWAIT);
3942 		if (error != 0) {
3943 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3944 			    error);
3945 			m_freem(m);
3946 			return error;
3947 		}
3948 	}
3949 	data->m = m;
3950 	data->in = in;
3951 	data->done = 0;
3952 
3953 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3954 	    "sending txd %p, in %p\n", data, data->in);
3955 	KASSERT(data->in != NULL, ("node is NULL"));
3956 
3957 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3958 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3959 	    ring->qid, ring->cur, totlen, nsegs,
3960 	    le32toh(tx->tx_flags),
3961 	    le32toh(tx->rate_n_flags),
3962 	    tx->initial_rate_index
3963 	    );
3964 
3965 	/* Fill TX descriptor. */
3966 	memset(desc, 0, sizeof(*desc));
3967 	desc->num_tbs = 2 + nsegs;
3968 
3969 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3970 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3971 	    (TB0_SIZE << 4));
3972 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3973 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3974 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3975 	    hdrlen + pad - TB0_SIZE) << 4));
3976 
3977 	/* Other DMA segments are for data payload. */
3978 	for (i = 0; i < nsegs; i++) {
3979 		seg = &segs[i];
3980 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3981 		desc->tbs[i + 2].hi_n_len =
3982 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3983 		    (seg->ds_len << 4);
3984 	}
3985 
3986 	bus_dmamap_sync(ring->data_dmat, data->map,
3987 	    BUS_DMASYNC_PREWRITE);
3988 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3989 	    BUS_DMASYNC_PREWRITE);
3990 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3991 	    BUS_DMASYNC_PREWRITE);
3992 
3993 #if 0
3994 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3995 #endif
3996 
3997 	/* Kick TX ring. */
3998 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3999 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4000 
4001 	/* Mark TX ring as full if we reach a certain threshold. */
4002 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4003 		sc->qfullmsk |= 1 << ring->qid;
4004 	}
4005 
4006 	return 0;
4007 }
4008 
4009 static int
4010 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4011     const struct ieee80211_bpf_params *params)
4012 {
4013 	struct ieee80211com *ic = ni->ni_ic;
4014 	struct iwm_softc *sc = ic->ic_softc;
4015 	int error = 0;
4016 
4017 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4018 	    "->%s begin\n", __func__);
4019 
4020 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4021 		m_freem(m);
4022 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4023 		    "<-%s not RUNNING\n", __func__);
4024 		return (ENETDOWN);
4025         }
4026 
4027 	IWM_LOCK(sc);
4028 	/* XXX fix this */
4029         if (params == NULL) {
4030 		error = iwm_tx(sc, m, ni, 0);
4031 	} else {
4032 		error = iwm_tx(sc, m, ni, 0);
4033 	}
4034 	if (sc->sc_tx_timer == 0)
4035 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4036 	sc->sc_tx_timer = 5;
4037 	IWM_UNLOCK(sc);
4038 
4039         return (error);
4040 }
4041 
4042 /*
4043  * mvm/tx.c
4044  */
4045 
4046 /*
4047  * Note that there are transports that buffer frames before they reach
4048  * the firmware. This means that after flush_tx_path is called, the
4049  * queue might not be empty. The race-free way to handle this is to:
4050  * 1) set the station as draining
4051  * 2) flush the Tx path
4052  * 3) wait for the transport queues to be empty
4053  */
4054 int
4055 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
4056 {
4057 	int ret;
4058 	struct iwm_tx_path_flush_cmd flush_cmd = {
4059 		.queues_ctl = htole32(tfd_msk),
4060 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4061 	};
4062 
4063 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4064 	    sizeof(flush_cmd), &flush_cmd);
4065 	if (ret)
4066                 device_printf(sc->sc_dev,
4067 		    "Flushing tx queue failed: %d\n", ret);
4068 	return ret;
4069 }
4070 
4071 /*
4072  * BEGIN mvm/quota.c
4073  */
4074 
4075 static int
4076 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4077 {
4078 	struct iwm_time_quota_cmd cmd;
4079 	int i, idx, ret, num_active_macs, quota, quota_rem;
4080 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4081 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4082 	uint16_t id;
4083 
4084 	memset(&cmd, 0, sizeof(cmd));
4085 
4086 	/* currently, PHY ID == binding ID */
4087 	if (ivp) {
4088 		id = ivp->phy_ctxt->id;
4089 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4090 		colors[id] = ivp->phy_ctxt->color;
4091 
4092 		if (1)
4093 			n_ifs[id] = 1;
4094 	}
4095 
4096 	/*
4097 	 * The FW's scheduling session consists of
4098 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4099 	 * equally between all the bindings that require quota
4100 	 */
4101 	num_active_macs = 0;
4102 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4103 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4104 		num_active_macs += n_ifs[i];
4105 	}
4106 
4107 	quota = 0;
4108 	quota_rem = 0;
4109 	if (num_active_macs) {
4110 		quota = IWM_MAX_QUOTA / num_active_macs;
4111 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4112 	}
4113 
4114 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4115 		if (colors[i] < 0)
4116 			continue;
4117 
4118 		cmd.quotas[idx].id_and_color =
4119 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4120 
4121 		if (n_ifs[i] <= 0) {
4122 			cmd.quotas[idx].quota = htole32(0);
4123 			cmd.quotas[idx].max_duration = htole32(0);
4124 		} else {
4125 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4126 			cmd.quotas[idx].max_duration = htole32(0);
4127 		}
4128 		idx++;
4129 	}
4130 
4131 	/* Give the remainder of the session to the first binding */
4132 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4133 
4134 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4135 	    sizeof(cmd), &cmd);
4136 	if (ret)
4137 		device_printf(sc->sc_dev,
4138 		    "%s: Failed to send quota: %d\n", __func__, ret);
4139 	return ret;
4140 }
4141 
4142 /*
4143  * END mvm/quota.c
4144  */
4145 
4146 /*
4147  * ieee80211 routines
4148  */
4149 
4150 /*
4151  * Change to AUTH state in 80211 state machine.  Roughly matches what
4152  * Linux does in bss_info_changed().
4153  */
4154 static int
4155 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4156 {
4157 	struct ieee80211_node *ni;
4158 	struct iwm_node *in;
4159 	struct iwm_vap *iv = IWM_VAP(vap);
4160 	uint32_t duration;
4161 	int error;
4162 
4163 	/*
4164 	 * XXX i have a feeling that the vap node is being
4165 	 * freed from underneath us. Grr.
4166 	 */
4167 	ni = ieee80211_ref_node(vap->iv_bss);
4168 	in = IWM_NODE(ni);
4169 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4170 	    "%s: called; vap=%p, bss ni=%p\n",
4171 	    __func__,
4172 	    vap,
4173 	    ni);
4174 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4175 	    __func__, ether_sprintf(ni->ni_bssid));
4176 
4177 	in->in_assoc = 0;
4178 	iv->iv_auth = 1;
4179 
4180 	/*
4181 	 * Firmware bug - it'll crash if the beacon interval is less
4182 	 * than 16. We can't avoid connecting at all, so refuse the
4183 	 * station state change, this will cause net80211 to abandon
4184 	 * attempts to connect to this AP, and eventually wpa_s will
4185 	 * blacklist the AP...
4186 	 */
4187 	if (ni->ni_intval < 16) {
4188 		device_printf(sc->sc_dev,
4189 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4190 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4191 		error = EINVAL;
4192 		goto out;
4193 	}
4194 
4195 	error = iwm_allow_mcast(vap, sc);
4196 	if (error) {
4197 		device_printf(sc->sc_dev,
4198 		    "%s: failed to set multicast\n", __func__);
4199 		goto out;
4200 	}
4201 
4202 	/*
4203 	 * This is where it deviates from what Linux does.
4204 	 *
4205 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4206 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4207 	 * and always does a mac_ctx_changed().
4208 	 *
4209 	 * The openbsd port doesn't attempt to do that - it reset things
4210 	 * at odd states and does the add here.
4211 	 *
4212 	 * So, until the state handling is fixed (ie, we never reset
4213 	 * the NIC except for a firmware failure, which should drag
4214 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4215 	 * contexts that are required), let's do a dirty hack here.
4216 	 */
4217 	if (iv->is_uploaded) {
4218 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4219 			device_printf(sc->sc_dev,
4220 			    "%s: failed to update MAC\n", __func__);
4221 			goto out;
4222 		}
4223 	} else {
4224 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4225 			device_printf(sc->sc_dev,
4226 			    "%s: failed to add MAC\n", __func__);
4227 			goto out;
4228 		}
4229 	}
4230 	sc->sc_firmware_state = 1;
4231 
4232 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4233 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4234 		device_printf(sc->sc_dev,
4235 		    "%s: failed update phy ctxt\n", __func__);
4236 		goto out;
4237 	}
4238 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4239 
4240 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4241 		device_printf(sc->sc_dev,
4242 		    "%s: binding update cmd\n", __func__);
4243 		goto out;
4244 	}
4245 	sc->sc_firmware_state = 2;
4246 	/*
4247 	 * Authentication becomes unreliable when powersaving is left enabled
4248 	 * here. Powersaving will be activated again when association has
4249 	 * finished or is aborted.
4250 	 */
4251 	iv->ps_disabled = TRUE;
4252 	error = iwm_power_update_mac(sc);
4253 	iv->ps_disabled = FALSE;
4254 	if (error != 0) {
4255 		device_printf(sc->sc_dev,
4256 		    "%s: failed to update power management\n",
4257 		    __func__);
4258 		goto out;
4259 	}
4260 	if ((error = iwm_add_sta(sc, in)) != 0) {
4261 		device_printf(sc->sc_dev,
4262 		    "%s: failed to add sta\n", __func__);
4263 		goto out;
4264 	}
4265 	sc->sc_firmware_state = 3;
4266 
4267 	/*
4268 	 * Prevent the FW from wandering off channel during association
4269 	 * by "protecting" the session with a time event.
4270 	 */
4271 	/* XXX duration is in units of TU, not MS */
4272 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4273 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4274 
4275 	error = 0;
4276 out:
4277 	if (error != 0)
4278 		iv->iv_auth = 0;
4279 	ieee80211_free_node(ni);
4280 	return (error);
4281 }
4282 
4283 static struct ieee80211_node *
4284 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4285 {
4286 	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4287 	    M_NOWAIT | M_ZERO);
4288 }
4289 
4290 static uint8_t
4291 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4292 {
4293 	uint8_t plcp = rate_n_flags & 0xff;
4294 	int i;
4295 
4296 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4297 		if (iwm_rates[i].plcp == plcp)
4298 			return iwm_rates[i].rate;
4299 	}
4300 	return 0;
4301 }
4302 
4303 uint8_t
4304 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4305 {
4306 	int i;
4307 	uint8_t rval;
4308 
4309 	for (i = 0; i < rs->rs_nrates; i++) {
4310 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4311 		if (rval == iwm_rates[ridx].rate)
4312 			return rs->rs_rates[i];
4313 	}
4314 
4315 	return 0;
4316 }
4317 
4318 static int
4319 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4320 {
4321 	int i;
4322 
4323 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4324 		if (iwm_rates[i].rate == rate)
4325 			return i;
4326 	}
4327 
4328 	device_printf(sc->sc_dev,
4329 	    "%s: WARNING: device rate for %u not found!\n",
4330 	    __func__, rate);
4331 
4332 	return -1;
4333 }
4334 
4335 
4336 static void
4337 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4338 {
4339 	struct ieee80211_node *ni = &in->in_ni;
4340 	struct iwm_lq_cmd *lq = &in->in_lq;
4341 	struct ieee80211_rateset *rs = &ni->ni_rates;
4342 	int nrates = rs->rs_nrates;
4343 	int i, ridx, tab = 0;
4344 //	int txant = 0;
4345 
4346 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4347 
4348 	if (nrates > nitems(lq->rs_table)) {
4349 		device_printf(sc->sc_dev,
4350 		    "%s: node supports %d rates, driver handles "
4351 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4352 		return;
4353 	}
4354 	if (nrates == 0) {
4355 		device_printf(sc->sc_dev,
4356 		    "%s: node supports 0 rates, odd!\n", __func__);
4357 		return;
4358 	}
4359 	nrates = imin(rix + 1, nrates);
4360 
4361 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4362 	    "%s: nrates=%d\n", __func__, nrates);
4363 
4364 	/* then construct a lq_cmd based on those */
4365 	memset(lq, 0, sizeof(*lq));
4366 	lq->sta_id = IWM_STATION_ID;
4367 
4368 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4369 	if (ni->ni_flags & IEEE80211_NODE_HT)
4370 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4371 
4372 	/*
4373 	 * are these used? (we don't do SISO or MIMO)
4374 	 * need to set them to non-zero, though, or we get an error.
4375 	 */
4376 	lq->single_stream_ant_msk = 1;
4377 	lq->dual_stream_ant_msk = 1;
4378 
4379 	/*
4380 	 * Build the actual rate selection table.
4381 	 * The lowest bits are the rates.  Additionally,
4382 	 * CCK needs bit 9 to be set.  The rest of the bits
4383 	 * we add to the table select the tx antenna
4384 	 * Note that we add the rates in the highest rate first
4385 	 * (opposite of ni_rates).
4386 	 */
4387 	for (i = 0; i < nrates; i++) {
4388 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4389 		int nextant;
4390 
4391 		/* Map 802.11 rate to HW rate index. */
4392 		ridx = iwm_rate2ridx(sc, rate);
4393 		if (ridx == -1)
4394 			continue;
4395 
4396 #if 0
4397 		if (txant == 0)
4398 			txant = iwm_get_valid_tx_ant(sc);
4399 		nextant = 1<<(ffs(txant)-1);
4400 		txant &= ~nextant;
4401 #else
4402 		nextant = iwm_get_valid_tx_ant(sc);
4403 #endif
4404 		tab = iwm_rates[ridx].plcp;
4405 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4406 		if (IWM_RIDX_IS_CCK(ridx))
4407 			tab |= IWM_RATE_MCS_CCK_MSK;
4408 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4409 		    "station rate i=%d, rate=%d, hw=%x\n",
4410 		    i, iwm_rates[ridx].rate, tab);
4411 		lq->rs_table[i] = htole32(tab);
4412 	}
4413 	/* then fill the rest with the lowest possible rate */
4414 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4415 		KASSERT(tab != 0, ("invalid tab"));
4416 		lq->rs_table[i] = htole32(tab);
4417 	}
4418 }
4419 
4420 static int
4421 iwm_media_change(struct ifnet *ifp)
4422 {
4423 	struct ieee80211vap *vap = ifp->if_softc;
4424 	struct ieee80211com *ic = vap->iv_ic;
4425 	struct iwm_softc *sc = ic->ic_softc;
4426 	int error;
4427 
4428 	error = ieee80211_media_change(ifp);
4429 	if (error != ENETRESET)
4430 		return error;
4431 
4432 	IWM_LOCK(sc);
4433 	if (ic->ic_nrunning > 0) {
4434 		iwm_stop(sc);
4435 		iwm_init(sc);
4436 	}
4437 	IWM_UNLOCK(sc);
4438 	return error;
4439 }
4440 
4441 static void
4442 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4443 {
4444 	struct iwm_vap *ivp = IWM_VAP(vap);
4445 	int error;
4446 
4447 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4448 	sc->sc_tx_timer = 0;
4449 
4450 	ivp->iv_auth = 0;
4451 	if (sc->sc_firmware_state == 3) {
4452 		iwm_xmit_queue_drain(sc);
4453 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4454 		error = iwm_rm_sta(sc, vap, TRUE);
4455 		if (error) {
4456 			device_printf(sc->sc_dev,
4457 			    "%s: Failed to remove station: %d\n",
4458 			    __func__, error);
4459 		}
4460 	}
4461 	if (sc->sc_firmware_state == 3) {
4462 		error = iwm_mac_ctxt_changed(sc, vap);
4463 		if (error) {
4464 			device_printf(sc->sc_dev,
4465 			    "%s: Failed to change mac context: %d\n",
4466 			    __func__, error);
4467 		}
4468 	}
4469 	if (sc->sc_firmware_state == 3) {
4470 		error = iwm_sf_update(sc, vap, FALSE);
4471 		if (error) {
4472 			device_printf(sc->sc_dev,
4473 			    "%s: Failed to update smart FIFO: %d\n",
4474 			    __func__, error);
4475 		}
4476 	}
4477 	if (sc->sc_firmware_state == 3) {
4478 		error = iwm_rm_sta_id(sc, vap);
4479 		if (error) {
4480 			device_printf(sc->sc_dev,
4481 			    "%s: Failed to remove station id: %d\n",
4482 			    __func__, error);
4483 		}
4484 	}
4485 	if (sc->sc_firmware_state == 3) {
4486 		error = iwm_update_quotas(sc, NULL);
4487 		if (error) {
4488 			device_printf(sc->sc_dev,
4489 			    "%s: Failed to update PHY quota: %d\n",
4490 			    __func__, error);
4491 		}
4492 	}
4493 	if (sc->sc_firmware_state == 3) {
4494 		/* XXX Might need to specify bssid correctly. */
4495 		error = iwm_mac_ctxt_changed(sc, vap);
4496 		if (error) {
4497 			device_printf(sc->sc_dev,
4498 			    "%s: Failed to change mac context: %d\n",
4499 			    __func__, error);
4500 		}
4501 	}
4502 	if (sc->sc_firmware_state == 3) {
4503 		sc->sc_firmware_state = 2;
4504 	}
4505 	if (sc->sc_firmware_state > 1) {
4506 		error = iwm_binding_remove_vif(sc, ivp);
4507 		if (error) {
4508 			device_printf(sc->sc_dev,
4509 			    "%s: Failed to remove channel ctx: %d\n",
4510 			    __func__, error);
4511 		}
4512 	}
4513 	if (sc->sc_firmware_state > 1) {
4514 		sc->sc_firmware_state = 1;
4515 	}
4516 	ivp->phy_ctxt = NULL;
4517 	if (sc->sc_firmware_state > 0) {
4518 		error = iwm_mac_ctxt_changed(sc, vap);
4519 		if (error) {
4520 			device_printf(sc->sc_dev,
4521 			    "%s: Failed to change mac context: %d\n",
4522 			    __func__, error);
4523 		}
4524 	}
4525 	if (sc->sc_firmware_state > 0) {
4526 		error = iwm_power_update_mac(sc);
4527 		if (error != 0) {
4528 			device_printf(sc->sc_dev,
4529 			    "%s: failed to update power management\n",
4530 			    __func__);
4531 		}
4532 	}
4533 	sc->sc_firmware_state = 0;
4534 }
4535 
4536 static int
4537 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4538 {
4539 	struct iwm_vap *ivp = IWM_VAP(vap);
4540 	struct ieee80211com *ic = vap->iv_ic;
4541 	struct iwm_softc *sc = ic->ic_softc;
4542 	struct iwm_node *in;
4543 	int error;
4544 
4545 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4546 	    "switching state %s -> %s arg=0x%x\n",
4547 	    ieee80211_state_name[vap->iv_state],
4548 	    ieee80211_state_name[nstate],
4549 	    arg);
4550 
4551 	IEEE80211_UNLOCK(ic);
4552 	IWM_LOCK(sc);
4553 
4554 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4555 	    (nstate == IEEE80211_S_AUTH ||
4556 	     nstate == IEEE80211_S_ASSOC ||
4557 	     nstate == IEEE80211_S_RUN)) {
4558 		/* Stop blinking for a scan, when authenticating. */
4559 		iwm_led_blink_stop(sc);
4560 	}
4561 
4562 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4563 		iwm_led_disable(sc);
4564 		/* disable beacon filtering if we're hopping out of RUN */
4565 		iwm_disable_beacon_filter(sc);
4566 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4567 			in->in_assoc = 0;
4568 	}
4569 
4570 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4571 	     vap->iv_state == IEEE80211_S_ASSOC ||
4572 	     vap->iv_state == IEEE80211_S_RUN) &&
4573 	    (nstate == IEEE80211_S_INIT ||
4574 	     nstate == IEEE80211_S_SCAN ||
4575 	     nstate == IEEE80211_S_AUTH)) {
4576 		iwm_stop_session_protection(sc, ivp);
4577 	}
4578 
4579 	if ((vap->iv_state == IEEE80211_S_RUN ||
4580 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4581 	    nstate == IEEE80211_S_INIT) {
4582 		/*
4583 		 * In this case, iv_newstate() wants to send an 80211 frame on
4584 		 * the network that we are leaving. So we need to call it,
4585 		 * before tearing down all the firmware state.
4586 		 */
4587 		IWM_UNLOCK(sc);
4588 		IEEE80211_LOCK(ic);
4589 		ivp->iv_newstate(vap, nstate, arg);
4590 		IEEE80211_UNLOCK(ic);
4591 		IWM_LOCK(sc);
4592 		iwm_bring_down_firmware(sc, vap);
4593 		IWM_UNLOCK(sc);
4594 		IEEE80211_LOCK(ic);
4595 		return 0;
4596 	}
4597 
4598 	switch (nstate) {
4599 	case IEEE80211_S_INIT:
4600 	case IEEE80211_S_SCAN:
4601 		break;
4602 
4603 	case IEEE80211_S_AUTH:
4604 		iwm_bring_down_firmware(sc, vap);
4605 		if ((error = iwm_auth(vap, sc)) != 0) {
4606 			device_printf(sc->sc_dev,
4607 			    "%s: could not move to auth state: %d\n",
4608 			    __func__, error);
4609 			iwm_bring_down_firmware(sc, vap);
4610 			IWM_UNLOCK(sc);
4611 			IEEE80211_LOCK(ic);
4612 			return 1;
4613 		}
4614 		break;
4615 
4616 	case IEEE80211_S_ASSOC:
4617 		/*
4618 		 * EBS may be disabled due to previous failures reported by FW.
4619 		 * Reset EBS status here assuming environment has been changed.
4620 		 */
4621 		sc->last_ebs_successful = TRUE;
4622 		break;
4623 
4624 	case IEEE80211_S_RUN:
4625 		in = IWM_NODE(vap->iv_bss);
4626 		/* Update the association state, now we have it all */
4627 		/* (eg associd comes in at this point */
4628 		error = iwm_update_sta(sc, in);
4629 		if (error != 0) {
4630 			device_printf(sc->sc_dev,
4631 			    "%s: failed to update STA\n", __func__);
4632 			IWM_UNLOCK(sc);
4633 			IEEE80211_LOCK(ic);
4634 			return error;
4635 		}
4636 		in->in_assoc = 1;
4637 		error = iwm_mac_ctxt_changed(sc, vap);
4638 		if (error != 0) {
4639 			device_printf(sc->sc_dev,
4640 			    "%s: failed to update MAC: %d\n", __func__, error);
4641 		}
4642 
4643 		iwm_sf_update(sc, vap, FALSE);
4644 		iwm_enable_beacon_filter(sc, ivp);
4645 		iwm_power_update_mac(sc);
4646 		iwm_update_quotas(sc, ivp);
4647 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4648 		iwm_setrates(sc, in, rix);
4649 
4650 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4651 			device_printf(sc->sc_dev,
4652 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4653 		}
4654 
4655 		iwm_led_enable(sc);
4656 		break;
4657 
4658 	default:
4659 		break;
4660 	}
4661 	IWM_UNLOCK(sc);
4662 	IEEE80211_LOCK(ic);
4663 
4664 	return (ivp->iv_newstate(vap, nstate, arg));
4665 }
4666 
4667 void
4668 iwm_endscan_cb(void *arg, int pending)
4669 {
4670 	struct iwm_softc *sc = arg;
4671 	struct ieee80211com *ic = &sc->sc_ic;
4672 
4673 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4674 	    "%s: scan ended\n",
4675 	    __func__);
4676 
4677 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4678 }
4679 
4680 static int
4681 iwm_send_bt_init_conf(struct iwm_softc *sc)
4682 {
4683 	struct iwm_bt_coex_cmd bt_cmd;
4684 
4685 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4686 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4687 
4688 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4689 	    &bt_cmd);
4690 }
4691 
4692 static boolean_t
4693 iwm_is_lar_supported(struct iwm_softc *sc)
4694 {
4695 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4696 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4697 
4698 	if (iwm_lar_disable)
4699 		return FALSE;
4700 
4701 	/*
4702 	 * Enable LAR only if it is supported by the FW (TLV) &&
4703 	 * enabled in the NVM
4704 	 */
4705 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4706 		return nvm_lar && tlv_lar;
4707 	else
4708 		return tlv_lar;
4709 }
4710 
4711 static boolean_t
4712 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4713 {
4714 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4715 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4716 }
4717 
4718 static int
4719 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4720 {
4721 	struct iwm_mcc_update_cmd mcc_cmd;
4722 	struct iwm_host_cmd hcmd = {
4723 		.id = IWM_MCC_UPDATE_CMD,
4724 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4725 		.data = { &mcc_cmd },
4726 	};
4727 	int ret;
4728 #ifdef IWM_DEBUG
4729 	struct iwm_rx_packet *pkt;
4730 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4731 	struct iwm_mcc_update_resp *mcc_resp;
4732 	int n_channels;
4733 	uint16_t mcc;
4734 #endif
4735 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4736 
4737 	if (!iwm_is_lar_supported(sc)) {
4738 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4739 		    __func__);
4740 		return 0;
4741 	}
4742 
4743 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4744 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4745 	if (iwm_is_wifi_mcc_supported(sc))
4746 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4747 	else
4748 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4749 
4750 	if (resp_v2)
4751 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4752 	else
4753 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4754 
4755 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4756 	    "send MCC update to FW with '%c%c' src = %d\n",
4757 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4758 
4759 	ret = iwm_send_cmd(sc, &hcmd);
4760 	if (ret)
4761 		return ret;
4762 
4763 #ifdef IWM_DEBUG
4764 	pkt = hcmd.resp_pkt;
4765 
4766 	/* Extract MCC response */
4767 	if (resp_v2) {
4768 		mcc_resp = (void *)pkt->data;
4769 		mcc = mcc_resp->mcc;
4770 		n_channels =  le32toh(mcc_resp->n_channels);
4771 	} else {
4772 		mcc_resp_v1 = (void *)pkt->data;
4773 		mcc = mcc_resp_v1->mcc;
4774 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4775 	}
4776 
4777 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4778 	if (mcc == 0)
4779 		mcc = 0x3030;  /* "00" - world */
4780 
4781 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4782 	    "regulatory domain '%c%c' (%d channels available)\n",
4783 	    mcc >> 8, mcc & 0xff, n_channels);
4784 #endif
4785 	iwm_free_resp(sc, &hcmd);
4786 
4787 	return 0;
4788 }
4789 
4790 static void
4791 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4792 {
4793 	struct iwm_host_cmd cmd = {
4794 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4795 		.len = { sizeof(uint32_t), },
4796 		.data = { &backoff, },
4797 	};
4798 
4799 	if (iwm_send_cmd(sc, &cmd) != 0) {
4800 		device_printf(sc->sc_dev,
4801 		    "failed to change thermal tx backoff\n");
4802 	}
4803 }
4804 
4805 static int
4806 iwm_init_hw(struct iwm_softc *sc)
4807 {
4808 	struct ieee80211com *ic = &sc->sc_ic;
4809 	int error, i, ac;
4810 
4811 	sc->sf_state = IWM_SF_UNINIT;
4812 
4813 	if ((error = iwm_start_hw(sc)) != 0) {
4814 		printf("iwm_start_hw: failed %d\n", error);
4815 		return error;
4816 	}
4817 
4818 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4819 		printf("iwm_run_init_ucode: failed %d\n", error);
4820 		return error;
4821 	}
4822 
4823 	/*
4824 	 * should stop and start HW since that INIT
4825 	 * image just loaded
4826 	 */
4827 	iwm_stop_device(sc);
4828 	sc->sc_ps_disabled = FALSE;
4829 	if ((error = iwm_start_hw(sc)) != 0) {
4830 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4831 		return error;
4832 	}
4833 
4834 	/* omstart, this time with the regular firmware */
4835 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4836 	if (error) {
4837 		device_printf(sc->sc_dev, "could not load firmware\n");
4838 		goto error;
4839 	}
4840 
4841 	error = iwm_sf_update(sc, NULL, FALSE);
4842 	if (error)
4843 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4844 
4845 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4846 		device_printf(sc->sc_dev, "bt init conf failed\n");
4847 		goto error;
4848 	}
4849 
4850 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4851 	if (error != 0) {
4852 		device_printf(sc->sc_dev, "antenna config failed\n");
4853 		goto error;
4854 	}
4855 
4856 	/* Send phy db control command and then phy db calibration */
4857 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4858 		goto error;
4859 
4860 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4861 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4862 		goto error;
4863 	}
4864 
4865 	/* Add auxiliary station for scanning */
4866 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4867 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4868 		goto error;
4869 	}
4870 
4871 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4872 		/*
4873 		 * The channel used here isn't relevant as it's
4874 		 * going to be overwritten in the other flows.
4875 		 * For now use the first channel we have.
4876 		 */
4877 		if ((error = iwm_phy_ctxt_add(sc,
4878 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4879 			goto error;
4880 	}
4881 
4882 	/* Initialize tx backoffs to the minimum. */
4883 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4884 		iwm_tt_tx_backoff(sc, 0);
4885 
4886 	if (iwm_config_ltr(sc) != 0)
4887 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4888 
4889 	error = iwm_power_update_device(sc);
4890 	if (error)
4891 		goto error;
4892 
4893 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4894 		goto error;
4895 
4896 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4897 		if ((error = iwm_config_umac_scan(sc)) != 0)
4898 			goto error;
4899 	}
4900 
4901 	/* Enable Tx queues. */
4902 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4903 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4904 		    iwm_ac_to_tx_fifo[ac]);
4905 		if (error)
4906 			goto error;
4907 	}
4908 
4909 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4910 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4911 		goto error;
4912 	}
4913 
4914 	return 0;
4915 
4916  error:
4917 	iwm_stop_device(sc);
4918 	return error;
4919 }
4920 
4921 /* Allow multicast from our BSSID. */
4922 static int
4923 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4924 {
4925 	struct ieee80211_node *ni = vap->iv_bss;
4926 	struct iwm_mcast_filter_cmd *cmd;
4927 	size_t size;
4928 	int error;
4929 
4930 	size = roundup(sizeof(*cmd), 4);
4931 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4932 	if (cmd == NULL)
4933 		return ENOMEM;
4934 	cmd->filter_own = 1;
4935 	cmd->port_id = 0;
4936 	cmd->count = 0;
4937 	cmd->pass_all = 1;
4938 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4939 
4940 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4941 	    IWM_CMD_SYNC, size, cmd);
4942 	free(cmd, M_DEVBUF);
4943 
4944 	return (error);
4945 }
4946 
4947 /*
4948  * ifnet interfaces
4949  */
4950 
4951 static void
4952 iwm_init(struct iwm_softc *sc)
4953 {
4954 	int error;
4955 
4956 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4957 		return;
4958 	}
4959 	sc->sc_generation++;
4960 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4961 
4962 	if ((error = iwm_init_hw(sc)) != 0) {
4963 		printf("iwm_init_hw failed %d\n", error);
4964 		iwm_stop(sc);
4965 		return;
4966 	}
4967 
4968 	/*
4969 	 * Ok, firmware loaded and we are jogging
4970 	 */
4971 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4972 }
4973 
4974 static int
4975 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4976 {
4977 	struct iwm_softc *sc;
4978 	int error;
4979 
4980 	sc = ic->ic_softc;
4981 
4982 	IWM_LOCK(sc);
4983 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4984 		IWM_UNLOCK(sc);
4985 		return (ENXIO);
4986 	}
4987 	error = mbufq_enqueue(&sc->sc_snd, m);
4988 	if (error) {
4989 		IWM_UNLOCK(sc);
4990 		return (error);
4991 	}
4992 	iwm_start(sc);
4993 	IWM_UNLOCK(sc);
4994 	return (0);
4995 }
4996 
4997 /*
4998  * Dequeue packets from sendq and call send.
4999  */
5000 static void
5001 iwm_start(struct iwm_softc *sc)
5002 {
5003 	struct ieee80211_node *ni;
5004 	struct mbuf *m;
5005 	int ac = 0;
5006 
5007 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5008 	while (sc->qfullmsk == 0 &&
5009 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5010 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5011 		if (iwm_tx(sc, m, ni, ac) != 0) {
5012 			if_inc_counter(ni->ni_vap->iv_ifp,
5013 			    IFCOUNTER_OERRORS, 1);
5014 			ieee80211_free_node(ni);
5015 			continue;
5016 		}
5017 		if (sc->sc_tx_timer == 0) {
5018 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
5019 			    sc);
5020 		}
5021 		sc->sc_tx_timer = 15;
5022 	}
5023 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5024 }
5025 
5026 static void
5027 iwm_stop(struct iwm_softc *sc)
5028 {
5029 
5030 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5031 	sc->sc_flags |= IWM_FLAG_STOPPED;
5032 	sc->sc_generation++;
5033 	iwm_led_blink_stop(sc);
5034 	sc->sc_tx_timer = 0;
5035 	iwm_stop_device(sc);
5036 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5037 }
5038 
5039 static void
5040 iwm_watchdog(void *arg)
5041 {
5042 	struct iwm_softc *sc = arg;
5043 	struct ieee80211com *ic = &sc->sc_ic;
5044 
5045 	if (sc->sc_attached == 0)
5046 		return;
5047 
5048 	if (sc->sc_tx_timer > 0) {
5049 		if (--sc->sc_tx_timer == 0) {
5050 			device_printf(sc->sc_dev, "device timeout\n");
5051 #ifdef IWM_DEBUG
5052 			iwm_nic_error(sc);
5053 #endif
5054 			ieee80211_restart_all(ic);
5055 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5056 			return;
5057 		}
5058 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5059 	}
5060 }
5061 
5062 static void
5063 iwm_parent(struct ieee80211com *ic)
5064 {
5065 	struct iwm_softc *sc = ic->ic_softc;
5066 	int startall = 0;
5067 
5068 	IWM_LOCK(sc);
5069 	if (ic->ic_nrunning > 0) {
5070 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5071 			iwm_init(sc);
5072 			startall = 1;
5073 		}
5074 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5075 		iwm_stop(sc);
5076 	IWM_UNLOCK(sc);
5077 	if (startall)
5078 		ieee80211_start_all(ic);
5079 }
5080 
5081 /*
5082  * The interrupt side of things
5083  */
5084 
5085 /*
5086  * error dumping routines are from iwlwifi/mvm/utils.c
5087  */
5088 
5089 /*
5090  * Note: This structure is read from the device with IO accesses,
5091  * and the reading already does the endian conversion. As it is
5092  * read with uint32_t-sized accesses, any members with a different size
5093  * need to be ordered correctly though!
5094  */
5095 struct iwm_error_event_table {
5096 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5097 	uint32_t error_id;		/* type of error */
5098 	uint32_t trm_hw_status0;	/* TRM HW status */
5099 	uint32_t trm_hw_status1;	/* TRM HW status */
5100 	uint32_t blink2;		/* branch link */
5101 	uint32_t ilink1;		/* interrupt link */
5102 	uint32_t ilink2;		/* interrupt link */
5103 	uint32_t data1;		/* error-specific data */
5104 	uint32_t data2;		/* error-specific data */
5105 	uint32_t data3;		/* error-specific data */
5106 	uint32_t bcon_time;		/* beacon timer */
5107 	uint32_t tsf_low;		/* network timestamp function timer */
5108 	uint32_t tsf_hi;		/* network timestamp function timer */
5109 	uint32_t gp1;		/* GP1 timer register */
5110 	uint32_t gp2;		/* GP2 timer register */
5111 	uint32_t fw_rev_type;	/* firmware revision type */
5112 	uint32_t major;		/* uCode version major */
5113 	uint32_t minor;		/* uCode version minor */
5114 	uint32_t hw_ver;		/* HW Silicon version */
5115 	uint32_t brd_ver;		/* HW board version */
5116 	uint32_t log_pc;		/* log program counter */
5117 	uint32_t frame_ptr;		/* frame pointer */
5118 	uint32_t stack_ptr;		/* stack pointer */
5119 	uint32_t hcmd;		/* last host command header */
5120 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5121 				 * rxtx_flag */
5122 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5123 				 * host_flag */
5124 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5125 				 * enc_flag */
5126 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5127 				 * time_flag */
5128 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5129 				 * wico interrupt */
5130 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5131 	uint32_t wait_event;		/* wait event() caller address */
5132 	uint32_t l2p_control;	/* L2pControlField */
5133 	uint32_t l2p_duration;	/* L2pDurationField */
5134 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5135 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5136 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5137 				 * (LMPM_PMG_SEL) */
5138 	uint32_t u_timestamp;	/* indicate when the date and time of the
5139 				 * compilation */
5140 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5141 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5142 
5143 /*
5144  * UMAC error struct - relevant starting from family 8000 chip.
5145  * Note: This structure is read from the device with IO accesses,
5146  * and the reading already does the endian conversion. As it is
5147  * read with u32-sized accesses, any members with a different size
5148  * need to be ordered correctly though!
5149  */
5150 struct iwm_umac_error_event_table {
5151 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5152 	uint32_t error_id;	/* type of error */
5153 	uint32_t blink1;	/* branch link */
5154 	uint32_t blink2;	/* branch link */
5155 	uint32_t ilink1;	/* interrupt link */
5156 	uint32_t ilink2;	/* interrupt link */
5157 	uint32_t data1;		/* error-specific data */
5158 	uint32_t data2;		/* error-specific data */
5159 	uint32_t data3;		/* error-specific data */
5160 	uint32_t umac_major;
5161 	uint32_t umac_minor;
5162 	uint32_t frame_pointer;	/* core register 27*/
5163 	uint32_t stack_pointer;	/* core register 28 */
5164 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5165 	uint32_t nic_isr_pref;	/* ISR status register */
5166 } __packed;
5167 
5168 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5169 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5170 
5171 #ifdef IWM_DEBUG
5172 struct {
5173 	const char *name;
5174 	uint8_t num;
5175 } advanced_lookup[] = {
5176 	{ "NMI_INTERRUPT_WDG", 0x34 },
5177 	{ "SYSASSERT", 0x35 },
5178 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5179 	{ "BAD_COMMAND", 0x38 },
5180 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5181 	{ "FATAL_ERROR", 0x3D },
5182 	{ "NMI_TRM_HW_ERR", 0x46 },
5183 	{ "NMI_INTERRUPT_TRM", 0x4C },
5184 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5185 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5186 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5187 	{ "NMI_INTERRUPT_HOST", 0x66 },
5188 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5189 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5190 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5191 	{ "ADVANCED_SYSASSERT", 0 },
5192 };
5193 
5194 static const char *
5195 iwm_desc_lookup(uint32_t num)
5196 {
5197 	int i;
5198 
5199 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5200 		if (advanced_lookup[i].num == num)
5201 			return advanced_lookup[i].name;
5202 
5203 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5204 	return advanced_lookup[i].name;
5205 }
5206 
5207 static void
5208 iwm_nic_umac_error(struct iwm_softc *sc)
5209 {
5210 	struct iwm_umac_error_event_table table;
5211 	uint32_t base;
5212 
5213 	base = sc->umac_error_event_table;
5214 
5215 	if (base < 0x800000) {
5216 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5217 		    base);
5218 		return;
5219 	}
5220 
5221 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5222 		device_printf(sc->sc_dev, "reading errlog failed\n");
5223 		return;
5224 	}
5225 
5226 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5227 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5228 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5229 		    sc->sc_flags, table.valid);
5230 	}
5231 
5232 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5233 		iwm_desc_lookup(table.error_id));
5234 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5235 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5236 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5237 	    table.ilink1);
5238 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5239 	    table.ilink2);
5240 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5241 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5242 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5243 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5244 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5245 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5246 	    table.frame_pointer);
5247 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5248 	    table.stack_pointer);
5249 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5250 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5251 	    table.nic_isr_pref);
5252 }
5253 
5254 /*
5255  * Support for dumping the error log seemed like a good idea ...
5256  * but it's mostly hex junk and the only sensible thing is the
5257  * hw/ucode revision (which we know anyway).  Since it's here,
5258  * I'll just leave it in, just in case e.g. the Intel guys want to
5259  * help us decipher some "ADVANCED_SYSASSERT" later.
5260  */
5261 static void
5262 iwm_nic_error(struct iwm_softc *sc)
5263 {
5264 	struct iwm_error_event_table table;
5265 	uint32_t base;
5266 
5267 	device_printf(sc->sc_dev, "dumping device error log\n");
5268 	base = sc->error_event_table[0];
5269 	if (base < 0x800000) {
5270 		device_printf(sc->sc_dev,
5271 		    "Invalid error log pointer 0x%08x\n", base);
5272 		return;
5273 	}
5274 
5275 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5276 		device_printf(sc->sc_dev, "reading errlog failed\n");
5277 		return;
5278 	}
5279 
5280 	if (!table.valid) {
5281 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5282 		return;
5283 	}
5284 
5285 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5286 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5287 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5288 		    sc->sc_flags, table.valid);
5289 	}
5290 
5291 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5292 	    iwm_desc_lookup(table.error_id));
5293 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5294 	    table.trm_hw_status0);
5295 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5296 	    table.trm_hw_status1);
5297 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5298 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5299 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5300 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5301 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5302 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5303 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5304 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5305 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5306 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5307 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5308 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5309 	    table.fw_rev_type);
5310 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5311 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5312 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5313 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5314 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5315 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5316 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5317 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5318 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5319 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5320 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5321 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5322 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5323 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5324 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5325 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5326 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5327 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5328 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5329 
5330 	if (sc->umac_error_event_table)
5331 		iwm_nic_umac_error(sc);
5332 }
5333 #endif
5334 
5335 static void
5336 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5337 {
5338 	struct ieee80211com *ic = &sc->sc_ic;
5339 	struct iwm_cmd_response *cresp;
5340 	struct mbuf *m1;
5341 	uint32_t offset = 0;
5342 	uint32_t maxoff = IWM_RBUF_SIZE;
5343 	uint32_t nextoff;
5344 	boolean_t stolen = FALSE;
5345 
5346 #define HAVEROOM(a)	\
5347     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5348 
5349 	while (HAVEROOM(offset)) {
5350 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5351 		    offset);
5352 		int qid, idx, code, len;
5353 
5354 		qid = pkt->hdr.qid;
5355 		idx = pkt->hdr.idx;
5356 
5357 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5358 
5359 		/*
5360 		 * randomly get these from the firmware, no idea why.
5361 		 * they at least seem harmless, so just ignore them for now
5362 		 */
5363 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5364 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5365 			break;
5366 		}
5367 
5368 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5369 		    "rx packet qid=%d idx=%d type=%x\n",
5370 		    qid & ~0x80, pkt->hdr.idx, code);
5371 
5372 		len = iwm_rx_packet_len(pkt);
5373 		len += sizeof(uint32_t); /* account for status word */
5374 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5375 
5376 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5377 
5378 		switch (code) {
5379 		case IWM_REPLY_RX_PHY_CMD:
5380 			iwm_rx_rx_phy_cmd(sc, pkt);
5381 			break;
5382 
5383 		case IWM_REPLY_RX_MPDU_CMD: {
5384 			/*
5385 			 * If this is the last frame in the RX buffer, we
5386 			 * can directly feed the mbuf to the sharks here.
5387 			 */
5388 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5389 			    struct iwm_rx_packet *, nextoff);
5390 			if (!HAVEROOM(nextoff) ||
5391 			    (nextpkt->hdr.code == 0 &&
5392 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5393 			     nextpkt->hdr.idx == 0) ||
5394 			    (nextpkt->len_n_flags ==
5395 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5396 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5397 					stolen = FALSE;
5398 					/* Make sure we abort the loop */
5399 					nextoff = maxoff;
5400 				}
5401 				break;
5402 			}
5403 
5404 			/*
5405 			 * Use m_copym instead of m_split, because that
5406 			 * makes it easier to keep a valid rx buffer in
5407 			 * the ring, when iwm_rx_mpdu() fails.
5408 			 *
5409 			 * We need to start m_copym() at offset 0, to get the
5410 			 * M_PKTHDR flag preserved.
5411 			 */
5412 			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5413 			if (m1) {
5414 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5415 					stolen = TRUE;
5416 				else
5417 					m_freem(m1);
5418 			}
5419 			break;
5420 		}
5421 
5422 		case IWM_TX_CMD:
5423 			iwm_rx_tx_cmd(sc, pkt);
5424 			break;
5425 
5426 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5427 			struct iwm_missed_beacons_notif *resp;
5428 			int missed;
5429 
5430 			/* XXX look at mac_id to determine interface ID */
5431 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5432 
5433 			resp = (void *)pkt->data;
5434 			missed = le32toh(resp->consec_missed_beacons);
5435 
5436 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5437 			    "%s: MISSED_BEACON: mac_id=%d, "
5438 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5439 			    "num_rx=%d\n",
5440 			    __func__,
5441 			    le32toh(resp->mac_id),
5442 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5443 			    le32toh(resp->consec_missed_beacons),
5444 			    le32toh(resp->num_expected_beacons),
5445 			    le32toh(resp->num_recvd_beacons));
5446 
5447 			/* Be paranoid */
5448 			if (vap == NULL)
5449 				break;
5450 
5451 			/* XXX no net80211 locking? */
5452 			if (vap->iv_state == IEEE80211_S_RUN &&
5453 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5454 				if (missed > vap->iv_bmissthreshold) {
5455 					/* XXX bad locking; turn into task */
5456 					IWM_UNLOCK(sc);
5457 					ieee80211_beacon_miss(ic);
5458 					IWM_LOCK(sc);
5459 				}
5460 			}
5461 
5462 			break;
5463 		}
5464 
5465 		case IWM_MFUART_LOAD_NOTIFICATION:
5466 			break;
5467 
5468 		case IWM_ALIVE:
5469 			break;
5470 
5471 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5472 			break;
5473 
5474 		case IWM_STATISTICS_NOTIFICATION:
5475 			iwm_handle_rx_statistics(sc, pkt);
5476 			break;
5477 
5478 		case IWM_NVM_ACCESS_CMD:
5479 		case IWM_MCC_UPDATE_CMD:
5480 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5481 				memcpy(sc->sc_cmd_resp,
5482 				    pkt, sizeof(sc->sc_cmd_resp));
5483 			}
5484 			break;
5485 
5486 		case IWM_MCC_CHUB_UPDATE_CMD: {
5487 			struct iwm_mcc_chub_notif *notif;
5488 			notif = (void *)pkt->data;
5489 
5490 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5491 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5492 			sc->sc_fw_mcc[2] = '\0';
5493 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5494 			    "fw source %d sent CC '%s'\n",
5495 			    notif->source_id, sc->sc_fw_mcc);
5496 			break;
5497 		}
5498 
5499 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5500 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5501 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5502 			struct iwm_dts_measurement_notif_v1 *notif;
5503 
5504 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5505 				device_printf(sc->sc_dev,
5506 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5507 				break;
5508 			}
5509 			notif = (void *)pkt->data;
5510 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5511 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5512 			    notif->temp);
5513 			break;
5514 		}
5515 
5516 		case IWM_PHY_CONFIGURATION_CMD:
5517 		case IWM_TX_ANT_CONFIGURATION_CMD:
5518 		case IWM_ADD_STA:
5519 		case IWM_MAC_CONTEXT_CMD:
5520 		case IWM_REPLY_SF_CFG_CMD:
5521 		case IWM_POWER_TABLE_CMD:
5522 		case IWM_LTR_CONFIG:
5523 		case IWM_PHY_CONTEXT_CMD:
5524 		case IWM_BINDING_CONTEXT_CMD:
5525 		case IWM_TIME_EVENT_CMD:
5526 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5527 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5528 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5529 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5530 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5531 		case IWM_REPLY_BEACON_FILTERING_CMD:
5532 		case IWM_MAC_PM_POWER_TABLE:
5533 		case IWM_TIME_QUOTA_CMD:
5534 		case IWM_REMOVE_STA:
5535 		case IWM_TXPATH_FLUSH:
5536 		case IWM_LQ_CMD:
5537 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5538 				 IWM_FW_PAGING_BLOCK_CMD):
5539 		case IWM_BT_CONFIG:
5540 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5541 			cresp = (void *)pkt->data;
5542 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5543 				memcpy(sc->sc_cmd_resp,
5544 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5545 			}
5546 			break;
5547 
5548 		/* ignore */
5549 		case IWM_PHY_DB_CMD:
5550 			break;
5551 
5552 		case IWM_INIT_COMPLETE_NOTIF:
5553 			break;
5554 
5555 		case IWM_SCAN_OFFLOAD_COMPLETE:
5556 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5557 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5558 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5559 				ieee80211_runtask(ic, &sc->sc_es_task);
5560 			}
5561 			break;
5562 
5563 		case IWM_SCAN_ITERATION_COMPLETE: {
5564 			struct iwm_lmac_scan_complete_notif *notif;
5565 			notif = (void *)pkt->data;
5566 			break;
5567 		}
5568 
5569 		case IWM_SCAN_COMPLETE_UMAC:
5570 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5571 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5572 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5573 				ieee80211_runtask(ic, &sc->sc_es_task);
5574 			}
5575 			break;
5576 
5577 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5578 			struct iwm_umac_scan_iter_complete_notif *notif;
5579 			notif = (void *)pkt->data;
5580 
5581 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5582 			    "complete, status=0x%x, %d channels scanned\n",
5583 			    notif->status, notif->scanned_channels);
5584 			break;
5585 		}
5586 
5587 		case IWM_REPLY_ERROR: {
5588 			struct iwm_error_resp *resp;
5589 			resp = (void *)pkt->data;
5590 
5591 			device_printf(sc->sc_dev,
5592 			    "firmware error 0x%x, cmd 0x%x\n",
5593 			    le32toh(resp->error_type),
5594 			    resp->cmd_id);
5595 			break;
5596 		}
5597 
5598 		case IWM_TIME_EVENT_NOTIFICATION:
5599 			iwm_rx_time_event_notif(sc, pkt);
5600 			break;
5601 
5602 		/*
5603 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5604 		 * messages. Just ignore them for now.
5605 		 */
5606 		case IWM_DEBUG_LOG_MSG:
5607 			break;
5608 
5609 		case IWM_MCAST_FILTER_CMD:
5610 			break;
5611 
5612 		case IWM_SCD_QUEUE_CFG: {
5613 			struct iwm_scd_txq_cfg_rsp *rsp;
5614 			rsp = (void *)pkt->data;
5615 
5616 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5617 			    "queue cfg token=0x%x sta_id=%d "
5618 			    "tid=%d scd_queue=%d\n",
5619 			    rsp->token, rsp->sta_id, rsp->tid,
5620 			    rsp->scd_queue);
5621 			break;
5622 		}
5623 
5624 		default:
5625 			device_printf(sc->sc_dev,
5626 			    "frame %d/%d %x UNHANDLED (this should "
5627 			    "not happen)\n", qid & ~0x80, idx,
5628 			    pkt->len_n_flags);
5629 			break;
5630 		}
5631 
5632 		/*
5633 		 * Why test bit 0x80?  The Linux driver:
5634 		 *
5635 		 * There is one exception:  uCode sets bit 15 when it
5636 		 * originates the response/notification, i.e. when the
5637 		 * response/notification is not a direct response to a
5638 		 * command sent by the driver.  For example, uCode issues
5639 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5640 		 * it is not a direct response to any driver command.
5641 		 *
5642 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5643 		 * uses a slightly different format for pkt->hdr, and "qid"
5644 		 * is actually the upper byte of a two-byte field.
5645 		 */
5646 		if (!(qid & (1 << 7)))
5647 			iwm_cmd_done(sc, pkt);
5648 
5649 		offset = nextoff;
5650 	}
5651 	if (stolen)
5652 		m_freem(m);
5653 #undef HAVEROOM
5654 }
5655 
5656 /*
5657  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5658  * Basic structure from if_iwn
5659  */
5660 static void
5661 iwm_notif_intr(struct iwm_softc *sc)
5662 {
5663 	int count;
5664 	uint32_t wreg;
5665 	uint16_t hw;
5666 
5667 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5668 	    BUS_DMASYNC_POSTREAD);
5669 
5670 	if (sc->cfg->mqrx_supported) {
5671 		count = IWM_RX_MQ_RING_COUNT;
5672 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5673 	} else {
5674 		count = IWM_RX_LEGACY_RING_COUNT;
5675 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5676 	}
5677 
5678 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5679 
5680 	/*
5681 	 * Process responses
5682 	 */
5683 	while (sc->rxq.cur != hw) {
5684 		struct iwm_rx_ring *ring = &sc->rxq;
5685 		struct iwm_rx_data *data = &ring->data[ring->cur];
5686 
5687 		bus_dmamap_sync(ring->data_dmat, data->map,
5688 		    BUS_DMASYNC_POSTREAD);
5689 
5690 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5691 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5692 		iwm_handle_rxb(sc, data->m);
5693 
5694 		ring->cur = (ring->cur + 1) % count;
5695 	}
5696 
5697 	/*
5698 	 * Tell the firmware that it can reuse the ring entries that
5699 	 * we have just processed.
5700 	 * Seems like the hardware gets upset unless we align
5701 	 * the write by 8??
5702 	 */
5703 	hw = (hw == 0) ? count - 1 : hw - 1;
5704 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5705 }
5706 
5707 static void
5708 iwm_intr(void *arg)
5709 {
5710 	struct iwm_softc *sc = arg;
5711 	int handled = 0;
5712 	int r1, r2, rv = 0;
5713 	int isperiodic = 0;
5714 
5715 	IWM_LOCK(sc);
5716 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5717 
5718 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5719 		uint32_t *ict = sc->ict_dma.vaddr;
5720 		int tmp;
5721 
5722 		tmp = htole32(ict[sc->ict_cur]);
5723 		if (!tmp)
5724 			goto out_ena;
5725 
5726 		/*
5727 		 * ok, there was something.  keep plowing until we have all.
5728 		 */
5729 		r1 = r2 = 0;
5730 		while (tmp) {
5731 			r1 |= tmp;
5732 			ict[sc->ict_cur] = 0;
5733 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5734 			tmp = htole32(ict[sc->ict_cur]);
5735 		}
5736 
5737 		/* this is where the fun begins.  don't ask */
5738 		if (r1 == 0xffffffff)
5739 			r1 = 0;
5740 
5741 		/* i am not expected to understand this */
5742 		if (r1 & 0xc0000)
5743 			r1 |= 0x8000;
5744 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5745 	} else {
5746 		r1 = IWM_READ(sc, IWM_CSR_INT);
5747 		/* "hardware gone" (where, fishing?) */
5748 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5749 			goto out;
5750 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5751 	}
5752 	if (r1 == 0 && r2 == 0) {
5753 		goto out_ena;
5754 	}
5755 
5756 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5757 
5758 	/* Safely ignore these bits for debug checks below */
5759 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5760 
5761 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5762 		int i;
5763 		struct ieee80211com *ic = &sc->sc_ic;
5764 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5765 
5766 #ifdef IWM_DEBUG
5767 		iwm_nic_error(sc);
5768 #endif
5769 		/* Dump driver status (TX and RX rings) while we're here. */
5770 		device_printf(sc->sc_dev, "driver status:\n");
5771 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5772 			struct iwm_tx_ring *ring = &sc->txq[i];
5773 			device_printf(sc->sc_dev,
5774 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5775 			    "queued=%-3d\n",
5776 			    i, ring->qid, ring->cur, ring->queued);
5777 		}
5778 		device_printf(sc->sc_dev,
5779 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5780 		device_printf(sc->sc_dev,
5781 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5782 
5783 		/* Reset our firmware state tracking. */
5784 		sc->sc_firmware_state = 0;
5785 		/* Don't stop the device; just do a VAP restart */
5786 		IWM_UNLOCK(sc);
5787 
5788 		if (vap == NULL) {
5789 			printf("%s: null vap\n", __func__);
5790 			return;
5791 		}
5792 
5793 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5794 		    "restarting\n", __func__, vap->iv_state);
5795 
5796 		ieee80211_restart_all(ic);
5797 		return;
5798 	}
5799 
5800 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5801 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5802 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5803 		iwm_stop(sc);
5804 		rv = 1;
5805 		goto out;
5806 	}
5807 
5808 	/* firmware chunk loaded */
5809 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5810 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5811 		handled |= IWM_CSR_INT_BIT_FH_TX;
5812 		sc->sc_fw_chunk_done = 1;
5813 		wakeup(&sc->sc_fw);
5814 	}
5815 
5816 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5817 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5818 		if (iwm_check_rfkill(sc)) {
5819 			device_printf(sc->sc_dev,
5820 			    "%s: rfkill switch, disabling interface\n",
5821 			    __func__);
5822 			iwm_stop(sc);
5823 		}
5824 	}
5825 
5826 	/*
5827 	 * The Linux driver uses periodic interrupts to avoid races.
5828 	 * We cargo-cult like it's going out of fashion.
5829 	 */
5830 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5831 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5832 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5833 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5834 			IWM_WRITE_1(sc,
5835 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5836 		isperiodic = 1;
5837 	}
5838 
5839 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5840 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5841 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5842 
5843 		iwm_notif_intr(sc);
5844 
5845 		/* enable periodic interrupt, see above */
5846 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5847 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5848 			    IWM_CSR_INT_PERIODIC_ENA);
5849 	}
5850 
5851 	if (__predict_false(r1 & ~handled))
5852 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5853 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5854 	rv = 1;
5855 
5856  out_ena:
5857 	iwm_restore_interrupts(sc);
5858  out:
5859 	IWM_UNLOCK(sc);
5860 	return;
5861 }
5862 
5863 /*
5864  * Autoconf glue-sniffing
5865  */
5866 #define	PCI_VENDOR_INTEL		0x8086
5867 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5868 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5869 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5870 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5871 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5872 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5873 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5874 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5875 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5876 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5877 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5878 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5879 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
5880 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
5881 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
5882 
5883 static const struct iwm_devices {
5884 	uint16_t		device;
5885 	const struct iwm_cfg	*cfg;
5886 } iwm_devices[] = {
5887 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5888 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5889 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5890 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5891 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5892 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5893 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5894 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5895 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5896 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5897 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5898 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5899 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5900 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5901 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5902 };
5903 
5904 static int
5905 iwm_probe(device_t dev)
5906 {
5907 	int i;
5908 
5909 	for (i = 0; i < nitems(iwm_devices); i++) {
5910 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5911 		    pci_get_device(dev) == iwm_devices[i].device) {
5912 			device_set_desc(dev, iwm_devices[i].cfg->name);
5913 			return (BUS_PROBE_DEFAULT);
5914 		}
5915 	}
5916 
5917 	return (ENXIO);
5918 }
5919 
5920 static int
5921 iwm_dev_check(device_t dev)
5922 {
5923 	struct iwm_softc *sc;
5924 	uint16_t devid;
5925 	int i;
5926 
5927 	sc = device_get_softc(dev);
5928 
5929 	devid = pci_get_device(dev);
5930 	for (i = 0; i < nitems(iwm_devices); i++) {
5931 		if (iwm_devices[i].device == devid) {
5932 			sc->cfg = iwm_devices[i].cfg;
5933 			return (0);
5934 		}
5935 	}
5936 	device_printf(dev, "unknown adapter type\n");
5937 	return ENXIO;
5938 }
5939 
5940 /* PCI registers */
5941 #define PCI_CFG_RETRY_TIMEOUT	0x041
5942 
5943 static int
5944 iwm_pci_attach(device_t dev)
5945 {
5946 	struct iwm_softc *sc;
5947 	int count, error, rid;
5948 	uint16_t reg;
5949 
5950 	sc = device_get_softc(dev);
5951 
5952 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5953 	 * PCI Tx retries from interfering with C3 CPU state */
5954 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5955 
5956 	/* Enable bus-mastering and hardware bug workaround. */
5957 	pci_enable_busmaster(dev);
5958 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5959 	/* if !MSI */
5960 	if (reg & PCIM_STATUS_INTxSTATE) {
5961 		reg &= ~PCIM_STATUS_INTxSTATE;
5962 	}
5963 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5964 
5965 	rid = PCIR_BAR(0);
5966 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5967 	    RF_ACTIVE);
5968 	if (sc->sc_mem == NULL) {
5969 		device_printf(sc->sc_dev, "can't map mem space\n");
5970 		return (ENXIO);
5971 	}
5972 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5973 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5974 
5975 	/* Install interrupt handler. */
5976 	count = 1;
5977 	rid = 0;
5978 	if (pci_alloc_msi(dev, &count) == 0)
5979 		rid = 1;
5980 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5981 	    (rid != 0 ? 0 : RF_SHAREABLE));
5982 	if (sc->sc_irq == NULL) {
5983 		device_printf(dev, "can't map interrupt\n");
5984 			return (ENXIO);
5985 	}
5986 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5987 	    NULL, iwm_intr, sc, &sc->sc_ih);
5988 	if (sc->sc_ih == NULL) {
5989 		device_printf(dev, "can't establish interrupt");
5990 			return (ENXIO);
5991 	}
5992 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5993 
5994 	return (0);
5995 }
5996 
5997 static void
5998 iwm_pci_detach(device_t dev)
5999 {
6000 	struct iwm_softc *sc = device_get_softc(dev);
6001 
6002 	if (sc->sc_irq != NULL) {
6003 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6004 		bus_release_resource(dev, SYS_RES_IRQ,
6005 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
6006 		pci_release_msi(dev);
6007         }
6008 	if (sc->sc_mem != NULL)
6009 		bus_release_resource(dev, SYS_RES_MEMORY,
6010 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
6011 }
6012 
6013 static int
6014 iwm_attach(device_t dev)
6015 {
6016 	struct iwm_softc *sc = device_get_softc(dev);
6017 	struct ieee80211com *ic = &sc->sc_ic;
6018 	int error;
6019 	int txq_i, i;
6020 
6021 	sc->sc_dev = dev;
6022 	sc->sc_attached = 1;
6023 	IWM_LOCK_INIT(sc);
6024 	mbufq_init(&sc->sc_snd, ifqmaxlen);
6025 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6026 	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6027 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6028 
6029 	error = iwm_dev_check(dev);
6030 	if (error != 0)
6031 		goto fail;
6032 
6033 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6034 	if (sc->sc_notif_wait == NULL) {
6035 		device_printf(dev, "failed to init notification wait struct\n");
6036 		goto fail;
6037 	}
6038 
6039 	sc->sf_state = IWM_SF_UNINIT;
6040 
6041 	/* Init phy db */
6042 	sc->sc_phy_db = iwm_phy_db_init(sc);
6043 	if (!sc->sc_phy_db) {
6044 		device_printf(dev, "Cannot init phy_db\n");
6045 		goto fail;
6046 	}
6047 
6048 	/* Set EBS as successful as long as not stated otherwise by the FW. */
6049 	sc->last_ebs_successful = TRUE;
6050 
6051 	/* PCI attach */
6052 	error = iwm_pci_attach(dev);
6053 	if (error != 0)
6054 		goto fail;
6055 
6056 	sc->sc_wantresp = -1;
6057 
6058 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6059 	/*
6060 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6061 	 * changed, and now the revision step also includes bit 0-1 (no more
6062 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6063 	 * in the old format.
6064 	 */
6065 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
6066 		int ret;
6067 		uint32_t hw_step;
6068 
6069 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6070 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6071 
6072 		if (iwm_prepare_card_hw(sc) != 0) {
6073 			device_printf(dev, "could not initialize hardware\n");
6074 			goto fail;
6075 		}
6076 
6077 		/*
6078 		 * In order to recognize C step the driver should read the
6079 		 * chip version id located at the AUX bus MISC address.
6080 		 */
6081 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6082 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6083 		DELAY(2);
6084 
6085 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6086 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6087 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6088 				   25000);
6089 		if (!ret) {
6090 			device_printf(sc->sc_dev,
6091 			    "Failed to wake up the nic\n");
6092 			goto fail;
6093 		}
6094 
6095 		if (iwm_nic_lock(sc)) {
6096 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6097 			hw_step |= IWM_ENABLE_WFPM;
6098 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6099 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6100 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6101 			if (hw_step == 0x3)
6102 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6103 						(IWM_SILICON_C_STEP << 2);
6104 			iwm_nic_unlock(sc);
6105 		} else {
6106 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6107 			goto fail;
6108 		}
6109 	}
6110 
6111 	/* special-case 7265D, it has the same PCI IDs. */
6112 	if (sc->cfg == &iwm7265_cfg &&
6113 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6114 		sc->cfg = &iwm7265d_cfg;
6115 	}
6116 
6117 	/* Allocate DMA memory for firmware transfers. */
6118 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6119 		device_printf(dev, "could not allocate memory for firmware\n");
6120 		goto fail;
6121 	}
6122 
6123 	/* Allocate "Keep Warm" page. */
6124 	if ((error = iwm_alloc_kw(sc)) != 0) {
6125 		device_printf(dev, "could not allocate keep warm page\n");
6126 		goto fail;
6127 	}
6128 
6129 	/* We use ICT interrupts */
6130 	if ((error = iwm_alloc_ict(sc)) != 0) {
6131 		device_printf(dev, "could not allocate ICT table\n");
6132 		goto fail;
6133 	}
6134 
6135 	/* Allocate TX scheduler "rings". */
6136 	if ((error = iwm_alloc_sched(sc)) != 0) {
6137 		device_printf(dev, "could not allocate TX scheduler rings\n");
6138 		goto fail;
6139 	}
6140 
6141 	/* Allocate TX rings */
6142 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6143 		if ((error = iwm_alloc_tx_ring(sc,
6144 		    &sc->txq[txq_i], txq_i)) != 0) {
6145 			device_printf(dev,
6146 			    "could not allocate TX ring %d\n",
6147 			    txq_i);
6148 			goto fail;
6149 		}
6150 	}
6151 
6152 	/* Allocate RX ring. */
6153 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6154 		device_printf(dev, "could not allocate RX ring\n");
6155 		goto fail;
6156 	}
6157 
6158 	/* Clear pending interrupts. */
6159 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6160 
6161 	ic->ic_softc = sc;
6162 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6163 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6164 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6165 
6166 	/* Set device capabilities. */
6167 	ic->ic_caps =
6168 	    IEEE80211_C_STA |
6169 	    IEEE80211_C_WPA |		/* WPA/RSN */
6170 	    IEEE80211_C_WME |
6171 	    IEEE80211_C_PMGT |
6172 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6173 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6174 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6175 	    ;
6176 	/* Advertise full-offload scanning */
6177 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6178 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6179 		sc->sc_phyctxt[i].id = i;
6180 		sc->sc_phyctxt[i].color = 0;
6181 		sc->sc_phyctxt[i].ref = 0;
6182 		sc->sc_phyctxt[i].channel = NULL;
6183 	}
6184 
6185 	/* Default noise floor */
6186 	sc->sc_noise = -96;
6187 
6188 	/* Max RSSI */
6189 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6190 
6191 #ifdef IWM_DEBUG
6192 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6193 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6194 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6195 #endif
6196 
6197 	error = iwm_read_firmware(sc);
6198 	if (error) {
6199 		goto fail;
6200 	} else if (sc->sc_fw.fw_fp == NULL) {
6201 		/*
6202 		 * XXX Add a solution for properly deferring firmware load
6203 		 *     during bootup.
6204 		 */
6205 		goto fail;
6206 	} else {
6207 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6208 		sc->sc_preinit_hook.ich_arg = sc;
6209 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6210 			device_printf(dev,
6211 			    "config_intrhook_establish failed\n");
6212 			goto fail;
6213 		}
6214 	}
6215 
6216 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6217 	    "<-%s\n", __func__);
6218 
6219 	return 0;
6220 
6221 	/* Free allocated memory if something failed during attachment. */
6222 fail:
6223 	iwm_detach_local(sc, 0);
6224 
6225 	return ENXIO;
6226 }
6227 
6228 static int
6229 iwm_is_valid_ether_addr(uint8_t *addr)
6230 {
6231 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6232 
6233 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6234 		return (FALSE);
6235 
6236 	return (TRUE);
6237 }
6238 
6239 static int
6240 iwm_wme_update(struct ieee80211com *ic)
6241 {
6242 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6243 	struct iwm_softc *sc = ic->ic_softc;
6244 	struct chanAccParams chp;
6245 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6246 	struct iwm_vap *ivp = IWM_VAP(vap);
6247 	struct iwm_node *in;
6248 	struct wmeParams tmp[WME_NUM_AC];
6249 	int aci, error;
6250 
6251 	if (vap == NULL)
6252 		return (0);
6253 
6254 	ieee80211_wme_ic_getparams(ic, &chp);
6255 
6256 	IEEE80211_LOCK(ic);
6257 	for (aci = 0; aci < WME_NUM_AC; aci++)
6258 		tmp[aci] = chp.cap_wmeParams[aci];
6259 	IEEE80211_UNLOCK(ic);
6260 
6261 	IWM_LOCK(sc);
6262 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6263 		const struct wmeParams *ac = &tmp[aci];
6264 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6265 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6266 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6267 		ivp->queue_params[aci].edca_txop =
6268 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6269 	}
6270 	ivp->have_wme = TRUE;
6271 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6272 		in = IWM_NODE(vap->iv_bss);
6273 		if (in->in_assoc) {
6274 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6275 				device_printf(sc->sc_dev,
6276 				    "%s: failed to update MAC\n", __func__);
6277 			}
6278 		}
6279 	}
6280 	IWM_UNLOCK(sc);
6281 
6282 	return (0);
6283 #undef IWM_EXP2
6284 }
6285 
6286 static void
6287 iwm_preinit(void *arg)
6288 {
6289 	struct iwm_softc *sc = arg;
6290 	device_t dev = sc->sc_dev;
6291 	struct ieee80211com *ic = &sc->sc_ic;
6292 	int error;
6293 
6294 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6295 	    "->%s\n", __func__);
6296 
6297 	IWM_LOCK(sc);
6298 	if ((error = iwm_start_hw(sc)) != 0) {
6299 		device_printf(dev, "could not initialize hardware\n");
6300 		IWM_UNLOCK(sc);
6301 		goto fail;
6302 	}
6303 
6304 	error = iwm_run_init_ucode(sc, 1);
6305 	iwm_stop_device(sc);
6306 	if (error) {
6307 		IWM_UNLOCK(sc);
6308 		goto fail;
6309 	}
6310 	device_printf(dev,
6311 	    "hw rev 0x%x, fw ver %s, address %s\n",
6312 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6313 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6314 
6315 	/* not all hardware can do 5GHz band */
6316 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6317 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6318 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6319 	IWM_UNLOCK(sc);
6320 
6321 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6322 	    ic->ic_channels);
6323 
6324 	/*
6325 	 * At this point we've committed - if we fail to do setup,
6326 	 * we now also have to tear down the net80211 state.
6327 	 */
6328 	ieee80211_ifattach(ic);
6329 	ic->ic_vap_create = iwm_vap_create;
6330 	ic->ic_vap_delete = iwm_vap_delete;
6331 	ic->ic_raw_xmit = iwm_raw_xmit;
6332 	ic->ic_node_alloc = iwm_node_alloc;
6333 	ic->ic_scan_start = iwm_scan_start;
6334 	ic->ic_scan_end = iwm_scan_end;
6335 	ic->ic_update_mcast = iwm_update_mcast;
6336 	ic->ic_getradiocaps = iwm_init_channel_map;
6337 	ic->ic_set_channel = iwm_set_channel;
6338 	ic->ic_scan_curchan = iwm_scan_curchan;
6339 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6340 	ic->ic_wme.wme_update = iwm_wme_update;
6341 	ic->ic_parent = iwm_parent;
6342 	ic->ic_transmit = iwm_transmit;
6343 	iwm_radiotap_attach(sc);
6344 	if (bootverbose)
6345 		ieee80211_announce(ic);
6346 
6347 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6348 	    "<-%s\n", __func__);
6349 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6350 
6351 	return;
6352 fail:
6353 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6354 	iwm_detach_local(sc, 0);
6355 }
6356 
6357 /*
6358  * Attach the interface to 802.11 radiotap.
6359  */
6360 static void
6361 iwm_radiotap_attach(struct iwm_softc *sc)
6362 {
6363         struct ieee80211com *ic = &sc->sc_ic;
6364 
6365 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6366 	    "->%s begin\n", __func__);
6367         ieee80211_radiotap_attach(ic,
6368             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6369                 IWM_TX_RADIOTAP_PRESENT,
6370             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6371                 IWM_RX_RADIOTAP_PRESENT);
6372 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6373 	    "->%s end\n", __func__);
6374 }
6375 
6376 static struct ieee80211vap *
6377 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6378     enum ieee80211_opmode opmode, int flags,
6379     const uint8_t bssid[IEEE80211_ADDR_LEN],
6380     const uint8_t mac[IEEE80211_ADDR_LEN])
6381 {
6382 	struct iwm_vap *ivp;
6383 	struct ieee80211vap *vap;
6384 
6385 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6386 		return NULL;
6387 	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6388 	vap = &ivp->iv_vap;
6389 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6390 	vap->iv_bmissthreshold = 10;            /* override default */
6391 	/* Override with driver methods. */
6392 	ivp->iv_newstate = vap->iv_newstate;
6393 	vap->iv_newstate = iwm_newstate;
6394 
6395 	ivp->id = IWM_DEFAULT_MACID;
6396 	ivp->color = IWM_DEFAULT_COLOR;
6397 
6398 	ivp->have_wme = FALSE;
6399 	ivp->ps_disabled = FALSE;
6400 
6401 	ieee80211_ratectl_init(vap);
6402 	/* Complete setup. */
6403 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6404 	    mac);
6405 	ic->ic_opmode = opmode;
6406 
6407 	return vap;
6408 }
6409 
6410 static void
6411 iwm_vap_delete(struct ieee80211vap *vap)
6412 {
6413 	struct iwm_vap *ivp = IWM_VAP(vap);
6414 
6415 	ieee80211_ratectl_deinit(vap);
6416 	ieee80211_vap_detach(vap);
6417 	free(ivp, M_80211_VAP);
6418 }
6419 
6420 static void
6421 iwm_xmit_queue_drain(struct iwm_softc *sc)
6422 {
6423 	struct mbuf *m;
6424 	struct ieee80211_node *ni;
6425 
6426 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6427 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6428 		ieee80211_free_node(ni);
6429 		m_freem(m);
6430 	}
6431 }
6432 
6433 static void
6434 iwm_scan_start(struct ieee80211com *ic)
6435 {
6436 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6437 	struct iwm_softc *sc = ic->ic_softc;
6438 	int error;
6439 
6440 	IWM_LOCK(sc);
6441 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6442 		/* This should not be possible */
6443 		device_printf(sc->sc_dev,
6444 		    "%s: Previous scan not completed yet\n", __func__);
6445 	}
6446 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6447 		error = iwm_umac_scan(sc);
6448 	else
6449 		error = iwm_lmac_scan(sc);
6450 	if (error != 0) {
6451 		device_printf(sc->sc_dev, "could not initiate scan\n");
6452 		IWM_UNLOCK(sc);
6453 		ieee80211_cancel_scan(vap);
6454 	} else {
6455 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6456 		iwm_led_blink_start(sc);
6457 		IWM_UNLOCK(sc);
6458 	}
6459 }
6460 
6461 static void
6462 iwm_scan_end(struct ieee80211com *ic)
6463 {
6464 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6465 	struct iwm_softc *sc = ic->ic_softc;
6466 
6467 	IWM_LOCK(sc);
6468 	iwm_led_blink_stop(sc);
6469 	if (vap->iv_state == IEEE80211_S_RUN)
6470 		iwm_led_enable(sc);
6471 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6472 		/*
6473 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6474 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6475 		 * taskqueue.
6476 		 */
6477 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6478 		iwm_scan_stop_wait(sc);
6479 	}
6480 	IWM_UNLOCK(sc);
6481 
6482 	/*
6483 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6484 	 * This is to make sure that it won't call ieee80211_scan_done
6485 	 * when we have already started the next scan.
6486 	 */
6487 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6488 }
6489 
6490 static void
6491 iwm_update_mcast(struct ieee80211com *ic)
6492 {
6493 }
6494 
6495 static void
6496 iwm_set_channel(struct ieee80211com *ic)
6497 {
6498 }
6499 
6500 static void
6501 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6502 {
6503 }
6504 
6505 static void
6506 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6507 {
6508 }
6509 
6510 void
6511 iwm_init_task(void *arg1)
6512 {
6513 	struct iwm_softc *sc = arg1;
6514 
6515 	IWM_LOCK(sc);
6516 	while (sc->sc_flags & IWM_FLAG_BUSY)
6517 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6518 	sc->sc_flags |= IWM_FLAG_BUSY;
6519 	iwm_stop(sc);
6520 	if (sc->sc_ic.ic_nrunning > 0)
6521 		iwm_init(sc);
6522 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6523 	wakeup(&sc->sc_flags);
6524 	IWM_UNLOCK(sc);
6525 }
6526 
6527 static int
6528 iwm_resume(device_t dev)
6529 {
6530 	struct iwm_softc *sc = device_get_softc(dev);
6531 	int do_reinit = 0;
6532 
6533 	/*
6534 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6535 	 * PCI Tx retries from interfering with C3 CPU state.
6536 	 */
6537 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6538 
6539 	if (!sc->sc_attached)
6540 		return 0;
6541 
6542 	iwm_init_task(device_get_softc(dev));
6543 
6544 	IWM_LOCK(sc);
6545 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6546 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6547 		do_reinit = 1;
6548 	}
6549 	IWM_UNLOCK(sc);
6550 
6551 	if (do_reinit)
6552 		ieee80211_resume_all(&sc->sc_ic);
6553 
6554 	return 0;
6555 }
6556 
6557 static int
6558 iwm_suspend(device_t dev)
6559 {
6560 	int do_stop = 0;
6561 	struct iwm_softc *sc = device_get_softc(dev);
6562 
6563 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6564 
6565 	if (!sc->sc_attached)
6566 		return (0);
6567 
6568 	ieee80211_suspend_all(&sc->sc_ic);
6569 
6570 	if (do_stop) {
6571 		IWM_LOCK(sc);
6572 		iwm_stop(sc);
6573 		sc->sc_flags |= IWM_FLAG_SCANNING;
6574 		IWM_UNLOCK(sc);
6575 	}
6576 
6577 	return (0);
6578 }
6579 
6580 static int
6581 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6582 {
6583 	struct iwm_fw_info *fw = &sc->sc_fw;
6584 	device_t dev = sc->sc_dev;
6585 	int i;
6586 
6587 	if (!sc->sc_attached)
6588 		return 0;
6589 	sc->sc_attached = 0;
6590 	if (do_net80211) {
6591 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6592 	}
6593 	iwm_stop_device(sc);
6594 	if (do_net80211) {
6595 		IWM_LOCK(sc);
6596 		iwm_xmit_queue_drain(sc);
6597 		IWM_UNLOCK(sc);
6598 		ieee80211_ifdetach(&sc->sc_ic);
6599 	}
6600 	callout_drain(&sc->sc_led_blink_to);
6601 	callout_drain(&sc->sc_watchdog_to);
6602 
6603 	iwm_phy_db_free(sc->sc_phy_db);
6604 	sc->sc_phy_db = NULL;
6605 
6606 	iwm_free_nvm_data(sc->nvm_data);
6607 
6608 	/* Free descriptor rings */
6609 	iwm_free_rx_ring(sc, &sc->rxq);
6610 	for (i = 0; i < nitems(sc->txq); i++)
6611 		iwm_free_tx_ring(sc, &sc->txq[i]);
6612 
6613 	/* Free firmware */
6614 	if (fw->fw_fp != NULL)
6615 		iwm_fw_info_free(fw);
6616 
6617 	/* Free scheduler */
6618 	iwm_dma_contig_free(&sc->sched_dma);
6619 	iwm_dma_contig_free(&sc->ict_dma);
6620 	iwm_dma_contig_free(&sc->kw_dma);
6621 	iwm_dma_contig_free(&sc->fw_dma);
6622 
6623 	iwm_free_fw_paging(sc);
6624 
6625 	/* Finished with the hardware - detach things */
6626 	iwm_pci_detach(dev);
6627 
6628 	if (sc->sc_notif_wait != NULL) {
6629 		iwm_notification_wait_free(sc->sc_notif_wait);
6630 		sc->sc_notif_wait = NULL;
6631 	}
6632 
6633 	IWM_LOCK_DESTROY(sc);
6634 
6635 	return (0);
6636 }
6637 
6638 static int
6639 iwm_detach(device_t dev)
6640 {
6641 	struct iwm_softc *sc = device_get_softc(dev);
6642 
6643 	return (iwm_detach_local(sc, 1));
6644 }
6645 
6646 static device_method_t iwm_pci_methods[] = {
6647         /* Device interface */
6648         DEVMETHOD(device_probe,         iwm_probe),
6649         DEVMETHOD(device_attach,        iwm_attach),
6650         DEVMETHOD(device_detach,        iwm_detach),
6651         DEVMETHOD(device_suspend,       iwm_suspend),
6652         DEVMETHOD(device_resume,        iwm_resume),
6653 
6654         DEVMETHOD_END
6655 };
6656 
6657 static driver_t iwm_pci_driver = {
6658         "iwm",
6659         iwm_pci_methods,
6660         sizeof (struct iwm_softc)
6661 };
6662 
6663 static devclass_t iwm_devclass;
6664 
6665 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6666 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6667     iwm_devices, nitems(iwm_devices));
6668 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6669 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6670 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6671