xref: /dragonfly/sys/dev/netif/iwm/if_iwm.c (revision a9783bc6)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 /*-
20  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
21  * which were used as the reference documentation for this implementation.
22  *
23  * Driver version we are currently based off of is
24  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  *
35  * This program is free software; you can redistribute it and/or modify
36  * it under the terms of version 2 of the GNU General Public License as
37  * published by the Free Software Foundation.
38  *
39  * This program is distributed in the hope that it will be useful, but
40  * WITHOUT ANY WARRANTY; without even the implied warranty of
41  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
42  * General Public License for more details.
43  *
44  * You should have received a copy of the GNU General Public License
45  * along with this program; if not, write to the Free Software
46  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
47  * USA
48  *
49  * The full GNU General Public License is included in this distribution
50  * in the file called COPYING.
51  *
52  * Contact Information:
53  *  Intel Linux Wireless <ilw@linux.intel.com>
54  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
55  *
56  *
57  * BSD LICENSE
58  *
59  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions
64  * are met:
65  *
66  *  * Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  *  * Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in
70  *    the documentation and/or other materials provided with the
71  *    distribution.
72  *  * Neither the name Intel Corporation nor the names of its
73  *    contributors may be used to endorse or promote products derived
74  *    from this software without specific prior written permission.
75  *
76  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
77  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
78  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
79  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
80  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
81  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
82  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
83  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
84  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
85  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
86  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
87  */
88 
89 /*-
90  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
91  *
92  * Permission to use, copy, modify, and distribute this software for any
93  * purpose with or without fee is hereby granted, provided that the above
94  * copyright notice and this permission notice appear in all copies.
95  *
96  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
97  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
98  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
99  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
100  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
101  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
102  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
103  */
104 /*
105  *                             DragonFly work
106  *
107  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
108  *      changes to remove per-device network interface (DragonFly has not
109  *      caught up to that yet on the WLAN side).
110  *
111  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
112  *     malloc -> kmalloc       (in particular, changing improper M_NOWAIT
113  *                             specifications to M_INTWAIT.  We still don't
114  *                             understand why FreeBSD uses M_NOWAIT for
115  *                             critical must-not-fail kmalloc()s).
116  *     free -> kfree
117  *     printf -> kprintf
118  *     (bug fix) memset in iwm_reset_rx_ring.
119  *     (debug)   added several kprintf()s on error
120  *
121  *     header file paths (DFly allows localized path specifications).
122  *     minor header file differences.
123  *
124  * Comprehensive list of adjustments for DragonFly #ifdef'd:
125  *     (safety)  added register read-back serialization in iwm_reset_rx_ring().
126  *     packet counters
127  *     msleep -> lksleep
128  *     mtx -> lk  (mtx functions -> lockmgr functions)
129  *     callout differences
130  *     taskqueue differences
131  *     MSI differences
132  *     bus_setup_intr() differences
133  *     minor PCI config register naming differences
134  */
135 #include <sys/param.h>
136 #include <sys/bus.h>
137 #include <sys/endian.h>
138 #include <sys/firmware.h>
139 #include <sys/kernel.h>
140 #include <sys/malloc.h>
141 #include <sys/mbuf.h>
142 #include <sys/module.h>
143 #include <sys/rman.h>
144 #include <sys/sysctl.h>
145 #include <sys/linker.h>
146 
147 #include <machine/endian.h>
148 
149 #include <bus/pci/pcivar.h>
150 #include <bus/pci/pcireg.h>
151 
152 #include <net/bpf.h>
153 
154 #include <net/if.h>
155 #include <net/if_var.h>
156 #include <net/if_arp.h>
157 #include <net/if_dl.h>
158 #include <net/if_media.h>
159 #include <net/if_types.h>
160 
161 #include <netinet/in.h>
162 #include <netinet/in_systm.h>
163 #include <netinet/if_ether.h>
164 #include <netinet/ip.h>
165 
166 #include <netproto/802_11/ieee80211_var.h>
167 #include <netproto/802_11/ieee80211_regdomain.h>
168 #include <netproto/802_11/ieee80211_ratectl.h>
169 #include <netproto/802_11/ieee80211_radiotap.h>
170 
171 #include "if_iwmreg.h"
172 #include "if_iwmvar.h"
173 #include "if_iwm_config.h"
174 #include "if_iwm_debug.h"
175 #include "if_iwm_notif_wait.h"
176 #include "if_iwm_util.h"
177 #include "if_iwm_binding.h"
178 #include "if_iwm_phy_db.h"
179 #include "if_iwm_mac_ctxt.h"
180 #include "if_iwm_phy_ctxt.h"
181 #include "if_iwm_time_event.h"
182 #include "if_iwm_power.h"
183 #include "if_iwm_scan.h"
184 #include "if_iwm_sf.h"
185 #include "if_iwm_sta.h"
186 
187 #include "if_iwm_pcie_trans.h"
188 #include "if_iwm_led.h"
189 #include "if_iwm_fw.h"
190 
191 #if defined(__DragonFly__)
192 #define mtodo(m, off)	mtodoff((m), void *, (off))
193 #endif
194 
195 const uint8_t iwm_nvm_channels[] = {
196 	/* 2.4 GHz */
197 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
198 	/* 5 GHz */
199 	36, 40, 44, 48, 52, 56, 60, 64,
200 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
201 	149, 153, 157, 161, 165
202 };
203 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
204     "IWM_NUM_CHANNELS is too small");
205 
206 const uint8_t iwm_nvm_channels_8000[] = {
207 	/* 2.4 GHz */
208 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
209 	/* 5 GHz */
210 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
211 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
212 	149, 153, 157, 161, 165, 169, 173, 177, 181
213 };
214 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
215     "IWM_NUM_CHANNELS_8000 is too small");
216 
217 #define IWM_NUM_2GHZ_CHANNELS	14
218 #define IWM_N_HW_ADDR_MASK	0xF
219 
220 /*
221  * XXX For now, there's simply a fixed set of rate table entries
222  * that are populated.
223  */
224 const struct iwm_rate {
225 	uint8_t rate;
226 	uint8_t plcp;
227 } iwm_rates[] = {
228 	{   2,	IWM_RATE_1M_PLCP  },
229 	{   4,	IWM_RATE_2M_PLCP  },
230 	{  11,	IWM_RATE_5M_PLCP  },
231 	{  22,	IWM_RATE_11M_PLCP },
232 	{  12,	IWM_RATE_6M_PLCP  },
233 	{  18,	IWM_RATE_9M_PLCP  },
234 	{  24,	IWM_RATE_12M_PLCP },
235 	{  36,	IWM_RATE_18M_PLCP },
236 	{  48,	IWM_RATE_24M_PLCP },
237 	{  72,	IWM_RATE_36M_PLCP },
238 	{  96,	IWM_RATE_48M_PLCP },
239 	{ 108,	IWM_RATE_54M_PLCP },
240 };
241 #define IWM_RIDX_CCK	0
242 #define IWM_RIDX_OFDM	4
243 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
244 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
245 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
246 
247 struct iwm_nvm_section {
248 	uint16_t length;
249 	uint8_t *data;
250 };
251 
252 #define IWM_UCODE_ALIVE_TIMEOUT	hz
253 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
254 
255 struct iwm_alive_data {
256 	int valid;
257 	uint32_t scd_base_addr;
258 };
259 
260 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
261 static int	iwm_firmware_store_section(struct iwm_softc *,
262                                            enum iwm_ucode_type,
263                                            const uint8_t *, size_t);
264 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
265 static void	iwm_fw_info_free(struct iwm_fw_info *);
266 static int	iwm_read_firmware(struct iwm_softc *);
267 static int	iwm_alloc_fwmem(struct iwm_softc *);
268 static int	iwm_alloc_sched(struct iwm_softc *);
269 static int	iwm_alloc_kw(struct iwm_softc *);
270 static int	iwm_alloc_ict(struct iwm_softc *);
271 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
275                                   int);
276 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void	iwm_enable_interrupts(struct iwm_softc *);
279 static void	iwm_restore_interrupts(struct iwm_softc *);
280 static void	iwm_disable_interrupts(struct iwm_softc *);
281 static void	iwm_ict_reset(struct iwm_softc *);
282 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
283 static void	iwm_stop_device(struct iwm_softc *);
284 static void	iwm_nic_config(struct iwm_softc *);
285 static int	iwm_nic_rx_init(struct iwm_softc *);
286 static int	iwm_nic_tx_init(struct iwm_softc *);
287 static int	iwm_nic_init(struct iwm_softc *);
288 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
289 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
290                                    uint16_t, uint8_t *, uint16_t *);
291 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
292 				     uint16_t *, uint32_t);
293 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
294 static void	iwm_add_channel_band(struct iwm_softc *,
295 		    struct ieee80211_channel[], int, int *, int, size_t,
296 		    const uint8_t[]);
297 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
298 		    struct ieee80211_channel[]);
299 static struct iwm_nvm_data *
300 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
301 			   const uint16_t *, const uint16_t *,
302 			   const uint16_t *, const uint16_t *,
303 			   const uint16_t *);
304 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
305 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
306 					       struct iwm_nvm_data *,
307 					       const uint16_t *,
308 					       const uint16_t *);
309 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
310 			    const uint16_t *);
311 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
312 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
313 				  const uint16_t *);
314 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
315 				   const uint16_t *);
316 static void	iwm_set_radio_cfg(const struct iwm_softc *,
317 				  struct iwm_nvm_data *, uint32_t);
318 static struct iwm_nvm_data *
319 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
320 static int	iwm_nvm_init(struct iwm_softc *);
321 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
322 				      const struct iwm_fw_desc *);
323 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
324 					     bus_addr_t, uint32_t);
325 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
326 						const struct iwm_fw_img *,
327 						int, int *);
328 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
329 					   const struct iwm_fw_img *,
330 					   int, int *);
331 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
332 					       const struct iwm_fw_img *);
333 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
334 					  const struct iwm_fw_img *);
335 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
336 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
337 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
338 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
339                                               enum iwm_ucode_type);
340 static int	iwm_run_init_ucode(struct iwm_softc *, int);
341 static int	iwm_config_ltr(struct iwm_softc *sc);
342 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
343 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
344                                       struct iwm_rx_packet *);
345 static int	iwm_get_noise(struct iwm_softc *,
346 		    const struct iwm_statistics_rx_non_phy *);
347 static void	iwm_handle_rx_statistics(struct iwm_softc *,
348 		    struct iwm_rx_packet *);
349 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
350 		    uint32_t, bool);
351 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
352                                          struct iwm_rx_packet *,
353 				         struct iwm_node *);
354 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
355 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
356 #if 0
357 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
358                                  uint16_t);
359 #endif
360 static const struct iwm_rate *
361 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
362 			struct mbuf *, struct iwm_tx_cmd *);
363 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
364                        struct ieee80211_node *, int);
365 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
366 			     const struct ieee80211_bpf_params *);
367 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
368 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
369 static struct ieee80211_node *
370 		iwm_node_alloc(struct ieee80211vap *,
371 		               const uint8_t[IEEE80211_ADDR_LEN]);
372 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
373 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
374 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
375 static int	iwm_media_change(struct ifnet *);
376 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
377 static void	iwm_endscan_cb(void *, int);
378 static int	iwm_send_bt_init_conf(struct iwm_softc *);
379 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
380 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
381 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
382 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
383 static int	iwm_init_hw(struct iwm_softc *);
384 static void	iwm_init(struct iwm_softc *);
385 static void	iwm_start(struct iwm_softc *);
386 static void	iwm_stop(struct iwm_softc *);
387 static void	iwm_watchdog(void *);
388 static void	iwm_parent(struct ieee80211com *);
389 #ifdef IWM_DEBUG
390 static const char *
391 		iwm_desc_lookup(uint32_t);
392 static void	iwm_nic_error(struct iwm_softc *);
393 static void	iwm_nic_umac_error(struct iwm_softc *);
394 #endif
395 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
396 static void	iwm_notif_intr(struct iwm_softc *);
397 static void	iwm_intr(void *);
398 static int	iwm_attach(device_t);
399 static int	iwm_is_valid_ether_addr(uint8_t *);
400 static void	iwm_preinit(void *);
401 static int	iwm_detach_local(struct iwm_softc *sc, int);
402 static void	iwm_init_task(void *);
403 static void	iwm_radiotap_attach(struct iwm_softc *);
404 static struct ieee80211vap *
405 		iwm_vap_create(struct ieee80211com *,
406 		               const char [IFNAMSIZ], int,
407 		               enum ieee80211_opmode, int,
408 		               const uint8_t [IEEE80211_ADDR_LEN],
409 		               const uint8_t [IEEE80211_ADDR_LEN]);
410 static void	iwm_vap_delete(struct ieee80211vap *);
411 static void	iwm_xmit_queue_drain(struct iwm_softc *);
412 static void	iwm_scan_start(struct ieee80211com *);
413 static void	iwm_scan_end(struct ieee80211com *);
414 static void	iwm_update_mcast(struct ieee80211com *);
415 static void	iwm_set_channel(struct ieee80211com *);
416 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
417 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
418 static int	iwm_detach(device_t);
419 
420 #if defined(__DragonFly__)
421 static int     iwm_msi_enable = 1;
422 
423 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
424 #endif
425 
426 static int	iwm_lar_disable = 0;
427 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
428 
429 /*
430  * Firmware parser.
431  */
432 
433 static int
434 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
435 {
436 	const struct iwm_fw_cscheme_list *l = (const void *)data;
437 
438 	if (dlen < sizeof(*l) ||
439 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
440 		return EINVAL;
441 
442 	/* we don't actually store anything for now, always use s/w crypto */
443 
444 	return 0;
445 }
446 
447 static int
448 iwm_firmware_store_section(struct iwm_softc *sc,
449     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
450 {
451 	struct iwm_fw_img *fws;
452 	struct iwm_fw_desc *fwone;
453 
454 	if (type >= IWM_UCODE_TYPE_MAX)
455 		return EINVAL;
456 	if (dlen < sizeof(uint32_t))
457 		return EINVAL;
458 
459 	fws = &sc->sc_fw.img[type];
460 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
461 		return EINVAL;
462 
463 	fwone = &fws->sec[fws->fw_count];
464 
465 	/* first 32bit are device load offset */
466 	memcpy(&fwone->offset, data, sizeof(uint32_t));
467 
468 	/* rest is data */
469 	fwone->data = data + sizeof(uint32_t);
470 	fwone->len = dlen - sizeof(uint32_t);
471 
472 	fws->fw_count++;
473 
474 	return 0;
475 }
476 
477 #define IWM_DEFAULT_SCAN_CHANNELS 40
478 
479 /* iwlwifi: iwl-drv.c */
480 struct iwm_tlv_calib_data {
481 	uint32_t ucode_type;
482 	struct iwm_tlv_calib_ctrl calib;
483 } __packed;
484 
485 static int
486 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
487 {
488 	const struct iwm_tlv_calib_data *def_calib = data;
489 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
490 
491 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
492 		device_printf(sc->sc_dev,
493 		    "Wrong ucode_type %u for default "
494 		    "calibration.\n", ucode_type);
495 		return EINVAL;
496 	}
497 
498 	sc->sc_default_calib[ucode_type].flow_trigger =
499 	    def_calib->calib.flow_trigger;
500 	sc->sc_default_calib[ucode_type].event_trigger =
501 	    def_calib->calib.event_trigger;
502 
503 	return 0;
504 }
505 
506 static int
507 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
508 			struct iwm_ucode_capabilities *capa)
509 {
510 	const struct iwm_ucode_api *ucode_api = (const void *)data;
511 	uint32_t api_index = le32toh(ucode_api->api_index);
512 	uint32_t api_flags = le32toh(ucode_api->api_flags);
513 	int i;
514 
515 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
516 		device_printf(sc->sc_dev,
517 		    "api flags index %d larger than supported by driver\n",
518 		    api_index);
519 		/* don't return an error so we can load FW that has more bits */
520 		return 0;
521 	}
522 
523 	for (i = 0; i < 32; i++) {
524 		if (api_flags & (1U << i))
525 			setbit(capa->enabled_api, i + 32 * api_index);
526 	}
527 
528 	return 0;
529 }
530 
531 static int
532 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
533 			   struct iwm_ucode_capabilities *capa)
534 {
535 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
536 	uint32_t api_index = le32toh(ucode_capa->api_index);
537 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
538 	int i;
539 
540 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
541 		device_printf(sc->sc_dev,
542 		    "capa flags index %d larger than supported by driver\n",
543 		    api_index);
544 		/* don't return an error so we can load FW that has more bits */
545 		return 0;
546 	}
547 
548 	for (i = 0; i < 32; i++) {
549 		if (api_flags & (1U << i))
550 			setbit(capa->enabled_capa, i + 32 * api_index);
551 	}
552 
553 	return 0;
554 }
555 
556 static void
557 iwm_fw_info_free(struct iwm_fw_info *fw)
558 {
559 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
560 	fw->fw_fp = NULL;
561 	memset(fw->img, 0, sizeof(fw->img));
562 }
563 
564 static int
565 iwm_read_firmware(struct iwm_softc *sc)
566 {
567 	struct iwm_fw_info *fw = &sc->sc_fw;
568 	const struct iwm_tlv_ucode_header *uhdr;
569 	const struct iwm_ucode_tlv *tlv;
570 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
571 	enum iwm_ucode_tlv_type tlv_type;
572 	const struct firmware *fwp;
573 	const uint8_t *data;
574 	uint32_t tlv_len;
575 	uint32_t usniffer_img;
576 	const uint8_t *tlv_data;
577 	uint32_t paging_mem_size;
578 	int num_of_cpus;
579 	int error = 0;
580 	size_t len;
581 
582 	/*
583 	 * Load firmware into driver memory.
584 	 * fw_fp will be set.
585 	 */
586 	fwp = firmware_get(sc->cfg->fw_name);
587 	if (fwp == NULL) {
588 		device_printf(sc->sc_dev,
589 		    "could not read firmware %s (error %d)\n",
590 		    sc->cfg->fw_name, error);
591 		goto out;
592 	}
593 	fw->fw_fp = fwp;
594 
595 	/* (Re-)Initialize default values. */
596 	capa->flags = 0;
597 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
598 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
599 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
600 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
601 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
602 
603 	/*
604 	 * Parse firmware contents
605 	 */
606 
607 	uhdr = (const void *)fw->fw_fp->data;
608 	if (*(const uint32_t *)fw->fw_fp->data != 0
609 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
610 		device_printf(sc->sc_dev, "invalid firmware %s\n",
611 		    sc->cfg->fw_name);
612 		error = EINVAL;
613 		goto out;
614 	}
615 
616 	ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
617 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
618 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
619 	    IWM_UCODE_API(le32toh(uhdr->ver)));
620 	data = uhdr->data;
621 	len = fw->fw_fp->datasize - sizeof(*uhdr);
622 
623 	while (len >= sizeof(*tlv)) {
624 		len -= sizeof(*tlv);
625 		tlv = (const void *)data;
626 
627 		tlv_len = le32toh(tlv->length);
628 		tlv_type = le32toh(tlv->type);
629 		tlv_data = tlv->data;
630 
631 		if (len < tlv_len) {
632 			device_printf(sc->sc_dev,
633 			    "firmware too short: %zu bytes\n",
634 			    len);
635 			error = EINVAL;
636 			goto parse_out;
637 		}
638 		len -= roundup2(tlv_len, 4);
639 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
640 
641 		switch ((int)tlv_type) {
642 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
643 			if (tlv_len != sizeof(uint32_t)) {
644 				device_printf(sc->sc_dev,
645 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
646 				    __func__, tlv_len);
647 				error = EINVAL;
648 				goto parse_out;
649 			}
650 			capa->max_probe_length =
651 			    le32_to_cpup((const uint32_t *)tlv_data);
652 			/* limit it to something sensible */
653 			if (capa->max_probe_length >
654 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
655 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
656 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
657 				    "ridiculous\n", __func__);
658 				error = EINVAL;
659 				goto parse_out;
660 			}
661 			break;
662 		case IWM_UCODE_TLV_PAN:
663 			if (tlv_len) {
664 				device_printf(sc->sc_dev,
665 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
666 				    __func__, tlv_len);
667 				error = EINVAL;
668 				goto parse_out;
669 			}
670 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
671 			break;
672 		case IWM_UCODE_TLV_FLAGS:
673 			if (tlv_len < sizeof(uint32_t)) {
674 				device_printf(sc->sc_dev,
675 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
676 				    __func__, tlv_len);
677 				error = EINVAL;
678 				goto parse_out;
679 			}
680 			if (tlv_len % sizeof(uint32_t)) {
681 				device_printf(sc->sc_dev,
682 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
683 				    __func__, tlv_len);
684 				error = EINVAL;
685 				goto parse_out;
686 			}
687 			/*
688 			 * Apparently there can be many flags, but Linux driver
689 			 * parses only the first one, and so do we.
690 			 *
691 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
692 			 * Intentional or a bug?  Observations from
693 			 * current firmware file:
694 			 *  1) TLV_PAN is parsed first
695 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
696 			 * ==> this resets TLV_PAN to itself... hnnnk
697 			 */
698 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
699 			break;
700 		case IWM_UCODE_TLV_CSCHEME:
701 			if ((error = iwm_store_cscheme(sc,
702 			    tlv_data, tlv_len)) != 0) {
703 				device_printf(sc->sc_dev,
704 				    "%s: iwm_store_cscheme(): returned %d\n",
705 				    __func__, error);
706 				goto parse_out;
707 			}
708 			break;
709 		case IWM_UCODE_TLV_NUM_OF_CPU:
710 			if (tlv_len != sizeof(uint32_t)) {
711 				device_printf(sc->sc_dev,
712 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
713 				    __func__, tlv_len);
714 				error = EINVAL;
715 				goto parse_out;
716 			}
717 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
718 			if (num_of_cpus == 2) {
719 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
720 					TRUE;
721 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
722 					TRUE;
723 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
724 					TRUE;
725 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
726 				device_printf(sc->sc_dev,
727 				    "%s: Driver supports only 1 or 2 CPUs\n",
728 				    __func__);
729 				error = EINVAL;
730 				goto parse_out;
731 			}
732 			break;
733 		case IWM_UCODE_TLV_SEC_RT:
734 			if ((error = iwm_firmware_store_section(sc,
735 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
736 				device_printf(sc->sc_dev,
737 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
738 				    __func__, error);
739 				goto parse_out;
740 			}
741 			break;
742 		case IWM_UCODE_TLV_SEC_INIT:
743 			if ((error = iwm_firmware_store_section(sc,
744 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
745 				device_printf(sc->sc_dev,
746 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
747 				    __func__, error);
748 				goto parse_out;
749 			}
750 			break;
751 		case IWM_UCODE_TLV_SEC_WOWLAN:
752 			if ((error = iwm_firmware_store_section(sc,
753 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
754 				device_printf(sc->sc_dev,
755 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
756 				    __func__, error);
757 				goto parse_out;
758 			}
759 			break;
760 		case IWM_UCODE_TLV_DEF_CALIB:
761 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
762 				device_printf(sc->sc_dev,
763 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
764 				    __func__, tlv_len,
765 				    sizeof(struct iwm_tlv_calib_data));
766 				error = EINVAL;
767 				goto parse_out;
768 			}
769 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
770 				device_printf(sc->sc_dev,
771 				    "%s: iwm_set_default_calib() failed: %d\n",
772 				    __func__, error);
773 				goto parse_out;
774 			}
775 			break;
776 		case IWM_UCODE_TLV_PHY_SKU:
777 			if (tlv_len != sizeof(uint32_t)) {
778 				error = EINVAL;
779 				device_printf(sc->sc_dev,
780 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
781 				    __func__, tlv_len);
782 				goto parse_out;
783 			}
784 			sc->sc_fw.phy_config =
785 			    le32_to_cpup((const uint32_t *)tlv_data);
786 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
787 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
788 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
789 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
790 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
791 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
792 			break;
793 
794 		case IWM_UCODE_TLV_API_CHANGES_SET: {
795 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
796 				error = EINVAL;
797 				goto parse_out;
798 			}
799 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
800 				error = EINVAL;
801 				goto parse_out;
802 			}
803 			break;
804 		}
805 
806 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
807 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
808 				error = EINVAL;
809 				goto parse_out;
810 			}
811 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
812 				error = EINVAL;
813 				goto parse_out;
814 			}
815 			break;
816 		}
817 
818 		case IWM_UCODE_TLV_CMD_VERSIONS:
819 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
820 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
821 			/* ignore, not used by current driver */
822 			break;
823 
824 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
825 			if ((error = iwm_firmware_store_section(sc,
826 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
827 			    tlv_len)) != 0)
828 				goto parse_out;
829 			break;
830 
831 		case IWM_UCODE_TLV_PAGING:
832 			if (tlv_len != sizeof(uint32_t)) {
833 				error = EINVAL;
834 				goto parse_out;
835 			}
836 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
837 
838 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
839 			    "%s: Paging: paging enabled (size = %u bytes)\n",
840 			    __func__, paging_mem_size);
841 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
842 				device_printf(sc->sc_dev,
843 					"%s: Paging: driver supports up to %u bytes for paging image\n",
844 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
845 				error = EINVAL;
846 				goto out;
847 			}
848 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
849 				device_printf(sc->sc_dev,
850 				    "%s: Paging: image isn't multiple %u\n",
851 				    __func__, IWM_FW_PAGING_SIZE);
852 				error = EINVAL;
853 				goto out;
854 			}
855 
856 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
857 			    paging_mem_size;
858 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
859 			sc->sc_fw.img[usniffer_img].paging_mem_size =
860 			    paging_mem_size;
861 			break;
862 
863 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
864 			if (tlv_len != sizeof(uint32_t)) {
865 				error = EINVAL;
866 				goto parse_out;
867 			}
868 			capa->n_scan_channels =
869 			    le32_to_cpup((const uint32_t *)tlv_data);
870 			break;
871 
872 		case IWM_UCODE_TLV_FW_VERSION:
873 			if (tlv_len != sizeof(uint32_t) * 3) {
874 				error = EINVAL;
875 				goto parse_out;
876 			}
877 			ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
878 			    "%u.%u.%u",
879 			    le32toh(((const uint32_t *)tlv_data)[0]),
880 			    le32toh(((const uint32_t *)tlv_data)[1]),
881 			    le32toh(((const uint32_t *)tlv_data)[2]));
882 			break;
883 
884 		case IWM_UCODE_TLV_FW_MEM_SEG:
885 			break;
886 
887 		default:
888 			device_printf(sc->sc_dev,
889 			    "%s: unknown firmware section %d, abort\n",
890 			    __func__, tlv_type);
891 			error = EINVAL;
892 			goto parse_out;
893 		}
894 	}
895 
896 	KASSERT(error == 0, ("unhandled error"));
897 
898  parse_out:
899 	if (error) {
900 		device_printf(sc->sc_dev, "firmware parse error %d, "
901 		    "section type %d\n", error, tlv_type);
902 	}
903 
904  out:
905 	if (error) {
906 		if (fw->fw_fp != NULL)
907 			iwm_fw_info_free(fw);
908 	}
909 
910 	return error;
911 }
912 
913 /*
914  * DMA resource routines
915  */
916 
917 /* fwmem is used to load firmware onto the card */
918 static int
919 iwm_alloc_fwmem(struct iwm_softc *sc)
920 {
921 	/* Must be aligned on a 16-byte boundary. */
922 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
923 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
924 }
925 
926 /* tx scheduler rings.  not used? */
927 static int
928 iwm_alloc_sched(struct iwm_softc *sc)
929 {
930 	/* TX scheduler rings must be aligned on a 1KB boundary. */
931 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
932 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
933 }
934 
935 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
936 static int
937 iwm_alloc_kw(struct iwm_softc *sc)
938 {
939 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
940 }
941 
942 /* interrupt cause table */
943 static int
944 iwm_alloc_ict(struct iwm_softc *sc)
945 {
946 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
947 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
948 }
949 
950 static int
951 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
952 {
953 	bus_size_t size;
954 	size_t descsz;
955 	int count, i, error;
956 
957 	ring->cur = 0;
958 	if (sc->cfg->mqrx_supported) {
959 		count = IWM_RX_MQ_RING_COUNT;
960 		descsz = sizeof(uint64_t);
961 	} else {
962 		count = IWM_RX_LEGACY_RING_COUNT;
963 		descsz = sizeof(uint32_t);
964 	}
965 
966 	/* Allocate RX descriptors (256-byte aligned). */
967 	size = count * descsz;
968 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
969 	    256);
970 	if (error != 0) {
971 		device_printf(sc->sc_dev,
972 		    "could not allocate RX ring DMA memory\n");
973 		goto fail;
974 	}
975 	ring->desc = ring->free_desc_dma.vaddr;
976 
977 	/* Allocate RX status area (16-byte aligned). */
978 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
979 	    sizeof(*ring->stat), 16);
980 	if (error != 0) {
981 		device_printf(sc->sc_dev,
982 		    "could not allocate RX status DMA memory\n");
983 		goto fail;
984 	}
985 	ring->stat = ring->stat_dma.vaddr;
986 
987 	if (sc->cfg->mqrx_supported) {
988 		size = count * sizeof(uint32_t);
989 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
990 		    size, 256);
991 		if (error != 0) {
992 			device_printf(sc->sc_dev,
993 			    "could not allocate RX ring DMA memory\n");
994 			goto fail;
995 		}
996 	}
997 
998         /* Create RX buffer DMA tag. */
999 #if defined(__DragonFly__)
1000 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1001 				   0,
1002 				   BUS_SPACE_MAXADDR_32BIT,
1003 				   BUS_SPACE_MAXADDR,
1004 				   NULL, NULL,
1005 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1006 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1007 #else
1008         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1009             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1010             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1011 #endif
1012         if (error != 0) {
1013                 device_printf(sc->sc_dev,
1014                     "%s: could not create RX buf DMA tag, error %d\n",
1015                     __func__, error);
1016                 goto fail;
1017         }
1018 
1019 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1020 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1021 	if (error != 0) {
1022 		device_printf(sc->sc_dev,
1023 		    "%s: could not create RX buf DMA map, error %d\n",
1024 		    __func__, error);
1025 		goto fail;
1026 	}
1027 
1028 	/*
1029 	 * Allocate and map RX buffers.
1030 	 */
1031 	for (i = 0; i < count; i++) {
1032 		struct iwm_rx_data *data = &ring->data[i];
1033 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1034 		if (error != 0) {
1035 			device_printf(sc->sc_dev,
1036 			    "%s: could not create RX buf DMA map, error %d\n",
1037 			    __func__, error);
1038 			goto fail;
1039 		}
1040 		data->m = NULL;
1041 
1042 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1043 			goto fail;
1044 		}
1045 	}
1046 	return 0;
1047 
1048 fail:	iwm_free_rx_ring(sc, ring);
1049 	return error;
1050 }
1051 
1052 static void
1053 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1054 {
1055 	/* Reset the ring state */
1056 	ring->cur = 0;
1057 
1058 	/*
1059 	 * The hw rx ring index in shared memory must also be cleared,
1060 	 * otherwise the discrepancy can cause reprocessing chaos.
1061 	 */
1062 	if (sc->rxq.stat)
1063 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1064 }
1065 
1066 static void
1067 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1068 {
1069 	int count, i;
1070 
1071 	iwm_dma_contig_free(&ring->free_desc_dma);
1072 	iwm_dma_contig_free(&ring->stat_dma);
1073 	iwm_dma_contig_free(&ring->used_desc_dma);
1074 
1075 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1076 	    IWM_RX_LEGACY_RING_COUNT;
1077 
1078 	for (i = 0; i < count; i++) {
1079 		struct iwm_rx_data *data = &ring->data[i];
1080 
1081 		if (data->m != NULL) {
1082 			bus_dmamap_sync(ring->data_dmat, data->map,
1083 			    BUS_DMASYNC_POSTREAD);
1084 			bus_dmamap_unload(ring->data_dmat, data->map);
1085 			m_freem(data->m);
1086 			data->m = NULL;
1087 		}
1088 		if (data->map != NULL) {
1089 			bus_dmamap_destroy(ring->data_dmat, data->map);
1090 			data->map = NULL;
1091 		}
1092 	}
1093 	if (ring->spare_map != NULL) {
1094 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1095 		ring->spare_map = NULL;
1096 	}
1097 	if (ring->data_dmat != NULL) {
1098 		bus_dma_tag_destroy(ring->data_dmat);
1099 		ring->data_dmat = NULL;
1100 	}
1101 }
1102 
1103 static int
1104 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1105 {
1106 	bus_addr_t paddr;
1107 	bus_size_t size;
1108 	size_t maxsize;
1109 	int nsegments;
1110 	int i, error;
1111 
1112 	ring->qid = qid;
1113 	ring->queued = 0;
1114 	ring->cur = 0;
1115 
1116 	/* Allocate TX descriptors (256-byte aligned). */
1117 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1118 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1119 	if (error != 0) {
1120 		device_printf(sc->sc_dev,
1121 		    "could not allocate TX ring DMA memory\n");
1122 		goto fail;
1123 	}
1124 	ring->desc = ring->desc_dma.vaddr;
1125 
1126 	/*
1127 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1128 	 * to allocate commands space for other rings.
1129 	 */
1130 	if (qid > IWM_CMD_QUEUE)
1131 		return 0;
1132 
1133 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1134 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1135 	if (error != 0) {
1136 		device_printf(sc->sc_dev,
1137 		    "could not allocate TX cmd DMA memory\n");
1138 		goto fail;
1139 	}
1140 	ring->cmd = ring->cmd_dma.vaddr;
1141 
1142 	/* FW commands may require more mapped space than packets. */
1143 	if (qid == IWM_CMD_QUEUE) {
1144 		maxsize = IWM_RBUF_SIZE;
1145 		nsegments = 1;
1146 	} else {
1147 		maxsize = MCLBYTES;
1148 		nsegments = IWM_MAX_SCATTER - 2;
1149 	}
1150 
1151 #if defined(__DragonFly__)
1152 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1153 				   0,
1154 				   BUS_SPACE_MAXADDR_32BIT,
1155 				   BUS_SPACE_MAXADDR,
1156 				   NULL, NULL,
1157 				   maxsize, nsegments, maxsize,
1158 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1159 #else
1160 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1161 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1162             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1163 #endif
1164 	if (error != 0) {
1165 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1166 		goto fail;
1167 	}
1168 
1169 	paddr = ring->cmd_dma.paddr;
1170 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1171 		struct iwm_tx_data *data = &ring->data[i];
1172 
1173 		data->cmd_paddr = paddr;
1174 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1175 		    + offsetof(struct iwm_tx_cmd, scratch);
1176 		paddr += sizeof(struct iwm_device_cmd);
1177 
1178 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1179 		if (error != 0) {
1180 			device_printf(sc->sc_dev,
1181 			    "could not create TX buf DMA map\n");
1182 			goto fail;
1183 		}
1184 	}
1185 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1186 	    ("invalid physical address"));
1187 	return 0;
1188 
1189 fail:	iwm_free_tx_ring(sc, ring);
1190 	return error;
1191 }
1192 
1193 static void
1194 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1195 {
1196 	int i;
1197 
1198 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1199 		struct iwm_tx_data *data = &ring->data[i];
1200 
1201 		if (data->m != NULL) {
1202 			bus_dmamap_sync(ring->data_dmat, data->map,
1203 			    BUS_DMASYNC_POSTWRITE);
1204 			bus_dmamap_unload(ring->data_dmat, data->map);
1205 			m_freem(data->m);
1206 			data->m = NULL;
1207 		}
1208 	}
1209 	/* Clear TX descriptors. */
1210 	memset(ring->desc, 0, ring->desc_dma.size);
1211 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1212 	    BUS_DMASYNC_PREWRITE);
1213 	sc->qfullmsk &= ~(1 << ring->qid);
1214 	ring->queued = 0;
1215 	ring->cur = 0;
1216 
1217 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1218 		iwm_pcie_clear_cmd_in_flight(sc);
1219 }
1220 
1221 static void
1222 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1223 {
1224 	int i;
1225 
1226 	iwm_dma_contig_free(&ring->desc_dma);
1227 	iwm_dma_contig_free(&ring->cmd_dma);
1228 
1229 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1230 		struct iwm_tx_data *data = &ring->data[i];
1231 
1232 		if (data->m != NULL) {
1233 			bus_dmamap_sync(ring->data_dmat, data->map,
1234 			    BUS_DMASYNC_POSTWRITE);
1235 			bus_dmamap_unload(ring->data_dmat, data->map);
1236 			m_freem(data->m);
1237 			data->m = NULL;
1238 		}
1239 		if (data->map != NULL) {
1240 			bus_dmamap_destroy(ring->data_dmat, data->map);
1241 			data->map = NULL;
1242 		}
1243 	}
1244 	if (ring->data_dmat != NULL) {
1245 		bus_dma_tag_destroy(ring->data_dmat);
1246 		ring->data_dmat = NULL;
1247 	}
1248 }
1249 
1250 /*
1251  * High-level hardware frobbing routines
1252  */
1253 
1254 static void
1255 iwm_enable_interrupts(struct iwm_softc *sc)
1256 {
1257 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1258 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1259 }
1260 
1261 static void
1262 iwm_restore_interrupts(struct iwm_softc *sc)
1263 {
1264 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1265 }
1266 
1267 static void
1268 iwm_disable_interrupts(struct iwm_softc *sc)
1269 {
1270 	/* disable interrupts */
1271 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1272 
1273 	/* acknowledge all interrupts */
1274 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1275 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1276 }
1277 
1278 static void
1279 iwm_ict_reset(struct iwm_softc *sc)
1280 {
1281 	iwm_disable_interrupts(sc);
1282 
1283 	/* Reset ICT table. */
1284 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1285 	sc->ict_cur = 0;
1286 
1287 	/* Set physical address of ICT table (4KB aligned). */
1288 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1289 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1290 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1291 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1292 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1293 
1294 	/* Switch to ICT interrupt mode in driver. */
1295 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1296 
1297 	/* Re-enable interrupts. */
1298 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1299 	iwm_enable_interrupts(sc);
1300 }
1301 
1302 /* iwlwifi pcie/trans.c */
1303 
1304 /*
1305  * Since this .. hard-resets things, it's time to actually
1306  * mark the first vap (if any) as having no mac context.
1307  * It's annoying, but since the driver is potentially being
1308  * stop/start'ed whilst active (thanks openbsd port!) we
1309  * have to correctly track this.
1310  */
1311 static void
1312 iwm_stop_device(struct iwm_softc *sc)
1313 {
1314 	struct ieee80211com *ic = &sc->sc_ic;
1315 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1316 	int chnl, qid;
1317 	uint32_t mask = 0;
1318 
1319 	/* tell the device to stop sending interrupts */
1320 	iwm_disable_interrupts(sc);
1321 
1322 	/*
1323 	 * FreeBSD-local: mark the first vap as not-uploaded,
1324 	 * so the next transition through auth/assoc
1325 	 * will correctly populate the MAC context.
1326 	 */
1327 	if (vap) {
1328 		struct iwm_vap *iv = IWM_VAP(vap);
1329 		iv->phy_ctxt = NULL;
1330 		iv->is_uploaded = 0;
1331 	}
1332 	sc->sc_firmware_state = 0;
1333 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1334 
1335 	/* device going down, Stop using ICT table */
1336 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1337 
1338 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1339 
1340 	if (iwm_nic_lock(sc)) {
1341 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1342 
1343 		/* Stop each Tx DMA channel */
1344 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1345 			IWM_WRITE(sc,
1346 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1347 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1348 		}
1349 
1350 		/* Wait for DMA channels to be idle */
1351 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1352 		    5000)) {
1353 			device_printf(sc->sc_dev,
1354 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1355 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1356 		}
1357 		iwm_nic_unlock(sc);
1358 	}
1359 	iwm_pcie_rx_stop(sc);
1360 
1361 	/* Stop RX ring. */
1362 	iwm_reset_rx_ring(sc, &sc->rxq);
1363 
1364 	/* Reset all TX rings. */
1365 	for (qid = 0; qid < nitems(sc->txq); qid++)
1366 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1367 
1368 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1369 		/* Power-down device's busmaster DMA clocks */
1370 		if (iwm_nic_lock(sc)) {
1371 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1372 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1373 			iwm_nic_unlock(sc);
1374 		}
1375 		DELAY(5);
1376 	}
1377 
1378 	/* Make sure (redundant) we've released our request to stay awake */
1379 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1380 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1381 
1382 	/* Stop the device, and put it in low power state */
1383 	iwm_apm_stop(sc);
1384 
1385 	/* stop and reset the on-board processor */
1386 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1387 	DELAY(5000);
1388 
1389 	/*
1390 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1391 	 */
1392 	iwm_disable_interrupts(sc);
1393 
1394 	/*
1395 	 * Even if we stop the HW, we still want the RF kill
1396 	 * interrupt
1397 	 */
1398 	iwm_enable_rfkill_int(sc);
1399 	iwm_check_rfkill(sc);
1400 
1401 	iwm_prepare_card_hw(sc);
1402 }
1403 
1404 /* iwlwifi: mvm/ops.c */
1405 static void
1406 iwm_nic_config(struct iwm_softc *sc)
1407 {
1408 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1409 	uint32_t reg_val = 0;
1410 	uint32_t phy_config = iwm_get_phy_config(sc);
1411 
1412 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1413 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1414 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1415 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1416 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1417 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1418 
1419 	/* SKU control */
1420 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1421 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1422 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1423 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1424 
1425 	/* radio configuration */
1426 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1427 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1428 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1429 
1430 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1431 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1432 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1433 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1434 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1435 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1436 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1437 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1438 	    reg_val);
1439 
1440 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1441 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1442 	    radio_cfg_step, radio_cfg_dash);
1443 
1444 	/*
1445 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1446 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1447 	 * to lose ownership and not being able to obtain it back.
1448 	 */
1449 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1450 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1451 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1452 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1453 	}
1454 }
1455 
1456 static int
1457 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1458 {
1459 	int enabled;
1460 
1461 	if (!iwm_nic_lock(sc))
1462 		return EBUSY;
1463 
1464 	/* Stop RX DMA. */
1465 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1466 	/* Disable RX used and free queue operation. */
1467 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1468 
1469 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1470 	    sc->rxq.free_desc_dma.paddr);
1471 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1472 	    sc->rxq.used_desc_dma.paddr);
1473 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1474 	    sc->rxq.stat_dma.paddr);
1475 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1476 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1477 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1478 
1479 	/* We configure only queue 0 for now. */
1480 	enabled = ((1 << 0) << 16) | (1 << 0);
1481 
1482 	/* Enable RX DMA, 4KB buffer size. */
1483 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1484 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1485 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1486 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1487 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1488 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1489 
1490 	/* Enable RX DMA snooping. */
1491 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1492 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1493 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1494 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1495 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1496 
1497 	/* Enable the configured queue(s). */
1498 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1499 
1500 	iwm_nic_unlock(sc);
1501 
1502 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1503 
1504 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1505 
1506 	return (0);
1507 }
1508 
1509 static int
1510 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1511 {
1512 
1513 	/* Stop Rx DMA */
1514 	iwm_pcie_rx_stop(sc);
1515 
1516 	if (!iwm_nic_lock(sc))
1517 		return EBUSY;
1518 
1519 	/* reset and flush pointers */
1520 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1521 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1522 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1523 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1524 
1525 	/* Set physical address of RX ring (256-byte aligned). */
1526 	IWM_WRITE(sc,
1527 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1528 	    sc->rxq.free_desc_dma.paddr >> 8);
1529 
1530 	/* Set physical address of RX status (16-byte aligned). */
1531 	IWM_WRITE(sc,
1532 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1533 
1534 #if defined(__DragonFly__)
1535 	/* Force serialization (probably not needed but don't trust the HW) */
1536 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1537 #endif
1538 
1539 
1540 	/* Enable Rx DMA
1541 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1542 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1543 	 *      the credit mechanism in 5000 HW RX FIFO
1544 	 * Direct rx interrupts to hosts
1545 	 * Rx buffer size 4 or 8k or 12k
1546 	 * RB timeout 0x10
1547 	 * 256 RBDs
1548 	 */
1549 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1550 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1551 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1552 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1553 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1554 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1555 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1556 
1557 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1558 
1559 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1560 	if (sc->cfg->host_interrupt_operation_mode)
1561 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1562 
1563 	iwm_nic_unlock(sc);
1564 
1565 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1566 
1567 	return 0;
1568 }
1569 
1570 static int
1571 iwm_nic_rx_init(struct iwm_softc *sc)
1572 {
1573 	if (sc->cfg->mqrx_supported)
1574 		return iwm_nic_rx_mq_init(sc);
1575 	else
1576 		return iwm_nic_rx_legacy_init(sc);
1577 }
1578 
1579 static int
1580 iwm_nic_tx_init(struct iwm_softc *sc)
1581 {
1582 	int qid;
1583 
1584 	if (!iwm_nic_lock(sc))
1585 		return EBUSY;
1586 
1587 	/* Deactivate TX scheduler. */
1588 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1589 
1590 	/* Set physical address of "keep warm" page (16-byte aligned). */
1591 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1592 
1593 	/* Initialize TX rings. */
1594 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1595 		struct iwm_tx_ring *txq = &sc->txq[qid];
1596 
1597 		/* Set physical address of TX ring (256-byte aligned). */
1598 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1599 		    txq->desc_dma.paddr >> 8);
1600 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1601 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1602 		    __func__,
1603 		    qid, txq->desc,
1604 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1605 	}
1606 
1607 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1608 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1609 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1610 
1611 	iwm_nic_unlock(sc);
1612 
1613 	return 0;
1614 }
1615 
1616 static int
1617 iwm_nic_init(struct iwm_softc *sc)
1618 {
1619 	int error;
1620 
1621 	iwm_apm_init(sc);
1622 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1623 		iwm_set_pwr(sc);
1624 
1625 	iwm_nic_config(sc);
1626 
1627 	if ((error = iwm_nic_rx_init(sc)) != 0)
1628 		return error;
1629 
1630 	/*
1631 	 * Ditto for TX, from iwn
1632 	 */
1633 	if ((error = iwm_nic_tx_init(sc)) != 0)
1634 		return error;
1635 
1636 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1637 	    "%s: shadow registers enabled\n", __func__);
1638 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1639 
1640 	return 0;
1641 }
1642 
1643 int
1644 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1645 {
1646 	int qmsk;
1647 
1648 	qmsk = 1 << qid;
1649 
1650 	if (!iwm_nic_lock(sc)) {
1651 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1652 		    __func__, qid);
1653 		return EBUSY;
1654 	}
1655 
1656 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1657 
1658 	if (qid == IWM_CMD_QUEUE) {
1659 		/* Disable the scheduler. */
1660 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1661 
1662 		/* Stop the TX queue prior to configuration. */
1663 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1664 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1665 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1666 
1667 		iwm_nic_unlock(sc);
1668 
1669 		/* Disable aggregations for this queue. */
1670 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1671 
1672 		if (!iwm_nic_lock(sc)) {
1673 			device_printf(sc->sc_dev,
1674 			    "%s: cannot enable txq %d\n", __func__, qid);
1675 			return EBUSY;
1676 		}
1677 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1678 		iwm_nic_unlock(sc);
1679 
1680 		iwm_write_mem32(sc,
1681 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1682 		/* Set scheduler window size and frame limit. */
1683 		iwm_write_mem32(sc,
1684 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1685 		    sizeof(uint32_t),
1686 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1687 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1688 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1689 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1690 
1691 		if (!iwm_nic_lock(sc)) {
1692 			device_printf(sc->sc_dev,
1693 			    "%s: cannot enable txq %d\n", __func__, qid);
1694 			return EBUSY;
1695 		}
1696 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1697 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1698 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1699 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1700 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1701 
1702 		/* Enable the scheduler for this queue. */
1703 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1704 	} else {
1705 		struct iwm_scd_txq_cfg_cmd cmd;
1706 		int error;
1707 
1708 		iwm_nic_unlock(sc);
1709 
1710 		memset(&cmd, 0, sizeof(cmd));
1711 		cmd.scd_queue = qid;
1712 		cmd.enable = 1;
1713 		cmd.sta_id = sta_id;
1714 		cmd.tx_fifo = fifo;
1715 		cmd.aggregate = 0;
1716 		cmd.window = IWM_FRAME_LIMIT;
1717 
1718 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1719 		    sizeof(cmd), &cmd);
1720 		if (error) {
1721 			device_printf(sc->sc_dev,
1722 			    "cannot enable txq %d\n", qid);
1723 			return error;
1724 		}
1725 
1726 		if (!iwm_nic_lock(sc))
1727 			return EBUSY;
1728 	}
1729 
1730 	iwm_nic_unlock(sc);
1731 
1732 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1733 	    __func__, qid, fifo);
1734 
1735 	return 0;
1736 }
1737 
1738 static int
1739 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1740 {
1741 	int error, chnl;
1742 
1743 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1744 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1745 
1746 	if (!iwm_nic_lock(sc))
1747 		return EBUSY;
1748 
1749 	iwm_ict_reset(sc);
1750 
1751 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1752 	if (scd_base_addr != 0 &&
1753 	    scd_base_addr != sc->scd_base_addr) {
1754 		device_printf(sc->sc_dev,
1755 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1756 		    __func__, sc->scd_base_addr, scd_base_addr);
1757 	}
1758 
1759 	iwm_nic_unlock(sc);
1760 
1761 	/* reset context data, TX status and translation data */
1762 	error = iwm_write_mem(sc,
1763 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1764 	    NULL, clear_dwords);
1765 	if (error)
1766 		return EBUSY;
1767 
1768 	if (!iwm_nic_lock(sc))
1769 		return EBUSY;
1770 
1771 	/* Set physical address of TX scheduler rings (1KB aligned). */
1772 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1773 
1774 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1775 
1776 	iwm_nic_unlock(sc);
1777 
1778 	/* enable command channel */
1779 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1780 	if (error)
1781 		return error;
1782 
1783 	if (!iwm_nic_lock(sc))
1784 		return EBUSY;
1785 
1786 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1787 
1788 	/* Enable DMA channels. */
1789 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1790 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1791 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1792 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1793 	}
1794 
1795 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1796 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1797 
1798 	iwm_nic_unlock(sc);
1799 
1800 	/* Enable L1-Active */
1801 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1802 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1803 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1804 	}
1805 
1806 	return error;
1807 }
1808 
1809 /*
1810  * NVM read access and content parsing.  We do not support
1811  * external NVM or writing NVM.
1812  * iwlwifi/mvm/nvm.c
1813  */
1814 
1815 /* Default NVM size to read */
1816 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1817 
1818 #define IWM_NVM_WRITE_OPCODE 1
1819 #define IWM_NVM_READ_OPCODE 0
1820 
1821 /* load nvm chunk response */
1822 enum {
1823 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1824 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1825 };
1826 
1827 static int
1828 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1829 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1830 {
1831 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1832 		.offset = htole16(offset),
1833 		.length = htole16(length),
1834 		.type = htole16(section),
1835 		.op_code = IWM_NVM_READ_OPCODE,
1836 	};
1837 	struct iwm_nvm_access_resp *nvm_resp;
1838 	struct iwm_rx_packet *pkt;
1839 	struct iwm_host_cmd cmd = {
1840 		.id = IWM_NVM_ACCESS_CMD,
1841 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1842 		.data = { &nvm_access_cmd, },
1843 	};
1844 	int ret, bytes_read, offset_read;
1845 	uint8_t *resp_data;
1846 
1847 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1848 
1849 	ret = iwm_send_cmd(sc, &cmd);
1850 	if (ret) {
1851 		device_printf(sc->sc_dev,
1852 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1853 		return ret;
1854 	}
1855 
1856 	pkt = cmd.resp_pkt;
1857 
1858 	/* Extract NVM response */
1859 	nvm_resp = (void *)pkt->data;
1860 	ret = le16toh(nvm_resp->status);
1861 	bytes_read = le16toh(nvm_resp->length);
1862 	offset_read = le16toh(nvm_resp->offset);
1863 	resp_data = nvm_resp->data;
1864 	if (ret) {
1865 		if ((offset != 0) &&
1866 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1867 			/*
1868 			 * meaning of NOT_VALID_ADDRESS:
1869 			 * driver try to read chunk from address that is
1870 			 * multiple of 2K and got an error since addr is empty.
1871 			 * meaning of (offset != 0): driver already
1872 			 * read valid data from another chunk so this case
1873 			 * is not an error.
1874 			 */
1875 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1876 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1877 				    offset);
1878 			*len = 0;
1879 			ret = 0;
1880 		} else {
1881 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1882 				    "NVM access command failed with status %d\n", ret);
1883 			ret = EIO;
1884 		}
1885 		goto exit;
1886 	}
1887 
1888 	if (offset_read != offset) {
1889 		device_printf(sc->sc_dev,
1890 		    "NVM ACCESS response with invalid offset %d\n",
1891 		    offset_read);
1892 		ret = EINVAL;
1893 		goto exit;
1894 	}
1895 
1896 	if (bytes_read > length) {
1897 		device_printf(sc->sc_dev,
1898 		    "NVM ACCESS response with too much data "
1899 		    "(%d bytes requested, %d bytes received)\n",
1900 		    length, bytes_read);
1901 		ret = EINVAL;
1902 		goto exit;
1903 	}
1904 
1905 	/* Write data to NVM */
1906 	memcpy(data + offset, resp_data, bytes_read);
1907 	*len = bytes_read;
1908 
1909  exit:
1910 	iwm_free_resp(sc, &cmd);
1911 	return ret;
1912 }
1913 
1914 /*
1915  * Reads an NVM section completely.
1916  * NICs prior to 7000 family don't have a real NVM, but just read
1917  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1918  * by uCode, we need to manually check in this case that we don't
1919  * overflow and try to read more than the EEPROM size.
1920  * For 7000 family NICs, we supply the maximal size we can read, and
1921  * the uCode fills the response with as much data as we can,
1922  * without overflowing, so no check is needed.
1923  */
1924 static int
1925 iwm_nvm_read_section(struct iwm_softc *sc,
1926 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1927 {
1928 	uint16_t seglen, length, offset = 0;
1929 	int ret;
1930 
1931 	/* Set nvm section read length */
1932 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1933 
1934 	seglen = length;
1935 
1936 	/* Read the NVM until exhausted (reading less than requested) */
1937 	while (seglen == length) {
1938 		/* Check no memory assumptions fail and cause an overflow */
1939 		if ((size_read + offset + length) >
1940 		    sc->cfg->eeprom_size) {
1941 			device_printf(sc->sc_dev,
1942 			    "EEPROM size is too small for NVM\n");
1943 			return ENOBUFS;
1944 		}
1945 
1946 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1947 		if (ret) {
1948 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1949 				    "Cannot read NVM from section %d offset %d, length %d\n",
1950 				    section, offset, length);
1951 			return ret;
1952 		}
1953 		offset += seglen;
1954 	}
1955 
1956 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1957 		    "NVM section %d read completed\n", section);
1958 	*len = offset;
1959 	return 0;
1960 }
1961 
1962 /*
1963  * BEGIN IWM_NVM_PARSE
1964  */
1965 
1966 /* iwlwifi/iwl-nvm-parse.c */
1967 
1968 /* NVM offsets (in words) definitions */
1969 enum iwm_nvm_offsets {
1970 	/* NVM HW-Section offset (in words) definitions */
1971 	IWM_HW_ADDR = 0x15,
1972 
1973 /* NVM SW-Section offset (in words) definitions */
1974 	IWM_NVM_SW_SECTION = 0x1C0,
1975 	IWM_NVM_VERSION = 0,
1976 	IWM_RADIO_CFG = 1,
1977 	IWM_SKU = 2,
1978 	IWM_N_HW_ADDRS = 3,
1979 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1980 
1981 /* NVM calibration section offset (in words) definitions */
1982 	IWM_NVM_CALIB_SECTION = 0x2B8,
1983 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1984 };
1985 
1986 enum iwm_8000_nvm_offsets {
1987 	/* NVM HW-Section offset (in words) definitions */
1988 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1989 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1990 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1991 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1992 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1993 
1994 	/* NVM SW-Section offset (in words) definitions */
1995 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1996 	IWM_NVM_VERSION_8000 = 0,
1997 	IWM_RADIO_CFG_8000 = 0,
1998 	IWM_SKU_8000 = 2,
1999 	IWM_N_HW_ADDRS_8000 = 3,
2000 
2001 	/* NVM REGULATORY -Section offset (in words) definitions */
2002 	IWM_NVM_CHANNELS_8000 = 0,
2003 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
2004 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
2005 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
2006 
2007 	/* NVM calibration section offset (in words) definitions */
2008 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
2009 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
2010 };
2011 
2012 /* SKU Capabilities (actual values from NVM definition) */
2013 enum nvm_sku_bits {
2014 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
2015 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
2016 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
2017 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
2018 };
2019 
2020 /* radio config bits (actual values from NVM definition) */
2021 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
2022 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
2023 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
2024 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
2025 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
2026 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2027 
2028 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
2029 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
2030 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
2031 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
2032 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
2033 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
2034 
2035 /**
2036  * enum iwm_nvm_channel_flags - channel flags in NVM
2037  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2038  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2039  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2040  * @IWM_NVM_CHANNEL_RADAR: radar detection required
2041  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
2042  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2043  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2044  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2045  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2046  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2047  */
2048 enum iwm_nvm_channel_flags {
2049 	IWM_NVM_CHANNEL_VALID = (1 << 0),
2050 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
2051 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2052 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
2053 	IWM_NVM_CHANNEL_DFS = (1 << 7),
2054 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
2055 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2056 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2057 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2058 };
2059 
2060 /*
2061  * Translate EEPROM flags to net80211.
2062  */
2063 static uint32_t
2064 iwm_eeprom_channel_flags(uint16_t ch_flags)
2065 {
2066 	uint32_t nflags;
2067 
2068 	nflags = 0;
2069 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2070 		nflags |= IEEE80211_CHAN_PASSIVE;
2071 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2072 		nflags |= IEEE80211_CHAN_NOADHOC;
2073 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2074 		nflags |= IEEE80211_CHAN_DFS;
2075 		/* Just in case. */
2076 		nflags |= IEEE80211_CHAN_NOADHOC;
2077 	}
2078 
2079 	return (nflags);
2080 }
2081 
2082 static void
2083 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2084     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2085     const uint8_t bands[])
2086 {
2087 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2088 	uint32_t nflags;
2089 	uint16_t ch_flags;
2090 	uint8_t ieee;
2091 	int error;
2092 
2093 	for (; ch_idx < ch_num; ch_idx++) {
2094 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2095 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2096 			ieee = iwm_nvm_channels[ch_idx];
2097 		else
2098 			ieee = iwm_nvm_channels_8000[ch_idx];
2099 
2100 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2101 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2102 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2103 			    ieee, ch_flags,
2104 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2105 			    "5.2" : "2.4");
2106 			continue;
2107 		}
2108 
2109 		nflags = iwm_eeprom_channel_flags(ch_flags);
2110 		error = ieee80211_add_channel(chans, maxchans, nchans,
2111 		    ieee, 0, 0, nflags, bands);
2112 		if (error != 0)
2113 			break;
2114 
2115 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2116 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2117 		    ieee, ch_flags,
2118 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2119 		    "5.2" : "2.4");
2120 	}
2121 }
2122 
2123 static void
2124 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2125     struct ieee80211_channel chans[])
2126 {
2127 	struct iwm_softc *sc = ic->ic_softc;
2128 	struct iwm_nvm_data *data = sc->nvm_data;
2129 	uint8_t bands[IEEE80211_MODE_BYTES];
2130 	size_t ch_num;
2131 
2132 	memset(bands, 0, sizeof(bands));
2133 	/* 1-13: 11b/g channels. */
2134 	setbit(bands, IEEE80211_MODE_11B);
2135 	setbit(bands, IEEE80211_MODE_11G);
2136 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2137 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2138 
2139 	/* 14: 11b channel only. */
2140 	clrbit(bands, IEEE80211_MODE_11G);
2141 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2142 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2143 
2144 	if (data->sku_cap_band_52GHz_enable) {
2145 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2146 			ch_num = nitems(iwm_nvm_channels);
2147 		else
2148 			ch_num = nitems(iwm_nvm_channels_8000);
2149 		memset(bands, 0, sizeof(bands));
2150 		setbit(bands, IEEE80211_MODE_11A);
2151 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2152 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2153 	}
2154 }
2155 
2156 static void
2157 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2158 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2159 {
2160 	const uint8_t *hw_addr;
2161 
2162 	if (mac_override) {
2163 		static const uint8_t reserved_mac[] = {
2164 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2165 		};
2166 
2167 		hw_addr = (const uint8_t *)(mac_override +
2168 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2169 
2170 		/*
2171 		 * Store the MAC address from MAO section.
2172 		 * No byte swapping is required in MAO section
2173 		 */
2174 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2175 
2176 		/*
2177 		 * Force the use of the OTP MAC address in case of reserved MAC
2178 		 * address in the NVM, or if address is given but invalid.
2179 		 */
2180 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2181 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2182 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2183 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2184 			return;
2185 
2186 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2187 		    "%s: mac address from nvm override section invalid\n",
2188 		    __func__);
2189 	}
2190 
2191 	if (nvm_hw) {
2192 		/* read the mac address from WFMP registers */
2193 		uint32_t mac_addr0 =
2194 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2195 		uint32_t mac_addr1 =
2196 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2197 
2198 		hw_addr = (const uint8_t *)&mac_addr0;
2199 		data->hw_addr[0] = hw_addr[3];
2200 		data->hw_addr[1] = hw_addr[2];
2201 		data->hw_addr[2] = hw_addr[1];
2202 		data->hw_addr[3] = hw_addr[0];
2203 
2204 		hw_addr = (const uint8_t *)&mac_addr1;
2205 		data->hw_addr[4] = hw_addr[1];
2206 		data->hw_addr[5] = hw_addr[0];
2207 
2208 		return;
2209 	}
2210 
2211 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2212 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2213 }
2214 
2215 static int
2216 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2217 	    const uint16_t *phy_sku)
2218 {
2219 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2220 		return le16_to_cpup(nvm_sw + IWM_SKU);
2221 
2222 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2223 }
2224 
2225 static int
2226 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2227 {
2228 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2229 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2230 	else
2231 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2232 						IWM_NVM_VERSION_8000));
2233 }
2234 
2235 static int
2236 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2237 		  const uint16_t *phy_sku)
2238 {
2239         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2240                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2241 
2242         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2243 }
2244 
2245 static int
2246 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2247 {
2248 	int n_hw_addr;
2249 
2250 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2251 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2252 
2253 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2254 
2255         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2256 }
2257 
2258 static void
2259 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2260 		  uint32_t radio_cfg)
2261 {
2262 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2263 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2264 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2265 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2266 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2267 		return;
2268 	}
2269 
2270 	/* set the radio configuration for family 8000 */
2271 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2272 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2273 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2274 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2275 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2276 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2277 }
2278 
2279 static int
2280 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2281 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2282 {
2283 #ifdef notyet /* for FAMILY 9000 */
2284 	if (cfg->mac_addr_from_csr) {
2285 		iwm_set_hw_address_from_csr(sc, data);
2286         } else
2287 #endif
2288 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2289 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2290 
2291 		/* The byte order is little endian 16 bit, meaning 214365 */
2292 		data->hw_addr[0] = hw_addr[1];
2293 		data->hw_addr[1] = hw_addr[0];
2294 		data->hw_addr[2] = hw_addr[3];
2295 		data->hw_addr[3] = hw_addr[2];
2296 		data->hw_addr[4] = hw_addr[5];
2297 		data->hw_addr[5] = hw_addr[4];
2298 	} else {
2299 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2300 	}
2301 
2302 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2303 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2304 		return EINVAL;
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 static struct iwm_nvm_data *
2311 iwm_parse_nvm_data(struct iwm_softc *sc,
2312 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2313 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2314 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2315 {
2316 	struct iwm_nvm_data *data;
2317 	uint32_t sku, radio_cfg;
2318 	uint16_t lar_config;
2319 
2320 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2321 		data = kmalloc(sizeof(*data) +
2322 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2323 		    M_DEVBUF, M_WAITOK | M_ZERO);
2324 	} else {
2325 		data = kmalloc(sizeof(*data) +
2326 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2327 		    M_DEVBUF, M_WAITOK | M_ZERO);
2328 	}
2329 	if (!data)
2330 		return NULL;
2331 
2332 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2333 
2334 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2335 	iwm_set_radio_cfg(sc, data, radio_cfg);
2336 
2337 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2338 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2339 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2340 	data->sku_cap_11n_enable = 0;
2341 
2342 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2343 
2344 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2345 		/* TODO: use IWL_NVM_EXT */
2346 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2347 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2348 				       IWM_NVM_LAR_OFFSET_8000;
2349 
2350 		lar_config = le16_to_cpup(regulatory + lar_offset);
2351 		data->lar_enabled = !!(lar_config &
2352 				       IWM_NVM_LAR_ENABLED_8000);
2353 	}
2354 
2355 	/* If no valid mac address was found - bail out */
2356 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2357 		kfree(data, M_DEVBUF);
2358 		return NULL;
2359 	}
2360 
2361 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2362 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2363 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2364 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2365 	} else {
2366 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2367 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2368 	}
2369 
2370 	return data;
2371 }
2372 
2373 static void
2374 iwm_free_nvm_data(struct iwm_nvm_data *data)
2375 {
2376 	if (data != NULL)
2377 		kfree(data, M_DEVBUF);
2378 }
2379 
2380 static struct iwm_nvm_data *
2381 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2382 {
2383 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2384 
2385 	/* Checking for required sections */
2386 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2387 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2388 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2389 			device_printf(sc->sc_dev,
2390 			    "Can't parse empty OTP/NVM sections\n");
2391 			return NULL;
2392 		}
2393 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2394 		/* SW and REGULATORY sections are mandatory */
2395 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2396 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2397 			device_printf(sc->sc_dev,
2398 			    "Can't parse empty OTP/NVM sections\n");
2399 			return NULL;
2400 		}
2401 		/* MAC_OVERRIDE or at least HW section must exist */
2402 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2403 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2404 			device_printf(sc->sc_dev,
2405 			    "Can't parse mac_address, empty sections\n");
2406 			return NULL;
2407 		}
2408 
2409 		/* PHY_SKU section is mandatory in B0 */
2410 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2411 			device_printf(sc->sc_dev,
2412 			    "Can't parse phy_sku in B0, empty sections\n");
2413 			return NULL;
2414 		}
2415 	} else {
2416 		panic("unknown device family %d\n", sc->cfg->device_family);
2417 	}
2418 
2419 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2420 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2421 	calib = (const uint16_t *)
2422 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2423 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2424 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2425 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2426 	mac_override = (const uint16_t *)
2427 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2428 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2429 
2430 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2431 	    phy_sku, regulatory);
2432 }
2433 
2434 static int
2435 iwm_nvm_init(struct iwm_softc *sc)
2436 {
2437 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2438 	int i, ret, section;
2439 	uint32_t size_read = 0;
2440 	uint8_t *nvm_buffer, *temp;
2441 	uint16_t len;
2442 
2443 	memset(nvm_sections, 0, sizeof(nvm_sections));
2444 
2445 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2446 		return EINVAL;
2447 
2448 	/* load NVM values from nic */
2449 	/* Read From FW NVM */
2450 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2451 
2452 	nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF, M_WAITOK | M_ZERO);
2453 	if (!nvm_buffer)
2454 		return ENOMEM;
2455 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2456 		/* we override the constness for initial read */
2457 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2458 					   &len, size_read);
2459 		if (ret)
2460 			continue;
2461 		size_read += len;
2462 		temp = kmalloc(len, M_DEVBUF, M_WAITOK);
2463 		if (!temp) {
2464 			ret = ENOMEM;
2465 			break;
2466 		}
2467 		memcpy(temp, nvm_buffer, len);
2468 
2469 		nvm_sections[section].data = temp;
2470 		nvm_sections[section].length = len;
2471 	}
2472 	if (!size_read)
2473 		device_printf(sc->sc_dev, "OTP is blank\n");
2474 	kfree(nvm_buffer, M_DEVBUF);
2475 
2476 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2477 	if (!sc->nvm_data)
2478 		return EINVAL;
2479 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2480 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2481 
2482 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2483 		if (nvm_sections[i].data != NULL)
2484 			kfree(nvm_sections[i].data, M_DEVBUF);
2485 	}
2486 
2487 	return 0;
2488 }
2489 
2490 static int
2491 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2492 	const struct iwm_fw_desc *section)
2493 {
2494 	struct iwm_dma_info *dma = &sc->fw_dma;
2495 	uint8_t *v_addr;
2496 	bus_addr_t p_addr;
2497 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2498 	int ret = 0;
2499 
2500 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2501 		    "%s: [%d] uCode section being loaded...\n",
2502 		    __func__, section_num);
2503 
2504 	v_addr = dma->vaddr;
2505 	p_addr = dma->paddr;
2506 
2507 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2508 		uint32_t copy_size, dst_addr;
2509 		int extended_addr = FALSE;
2510 
2511 		copy_size = MIN(chunk_sz, section->len - offset);
2512 		dst_addr = section->offset + offset;
2513 
2514 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2515 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2516 			extended_addr = TRUE;
2517 
2518 		if (extended_addr)
2519 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2520 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2521 
2522 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2523 		    copy_size);
2524 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2525 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2526 						   copy_size);
2527 
2528 		if (extended_addr)
2529 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2530 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2531 
2532 		if (ret) {
2533 			device_printf(sc->sc_dev,
2534 			    "%s: Could not load the [%d] uCode section\n",
2535 			    __func__, section_num);
2536 			break;
2537 		}
2538 	}
2539 
2540 	return ret;
2541 }
2542 
2543 /*
2544  * ucode
2545  */
2546 static int
2547 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2548 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2549 {
2550 	sc->sc_fw_chunk_done = 0;
2551 
2552 	if (!iwm_nic_lock(sc))
2553 		return EBUSY;
2554 
2555 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2556 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2557 
2558 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2559 	    dst_addr);
2560 
2561 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2562 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2563 
2564 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2565 	    (iwm_get_dma_hi_addr(phy_addr)
2566 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2567 
2568 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2569 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2570 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2571 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2572 
2573 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2574 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2575 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2576 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2577 
2578 	iwm_nic_unlock(sc);
2579 
2580 	/* wait up to 5s for this segment to load */
2581 	lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz * 5);
2582 
2583 	if (!sc->sc_fw_chunk_done) {
2584 		device_printf(sc->sc_dev,
2585 		    "fw chunk addr 0x%x len %d failed to load\n",
2586 		    dst_addr, byte_cnt);
2587 		return ETIMEDOUT;
2588 	}
2589 
2590 	return 0;
2591 }
2592 
2593 static int
2594 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2595 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2596 {
2597 	int shift_param;
2598 	int i, ret = 0, sec_num = 0x1;
2599 	uint32_t val, last_read_idx = 0;
2600 
2601 	if (cpu == 1) {
2602 		shift_param = 0;
2603 		*first_ucode_section = 0;
2604 	} else {
2605 		shift_param = 16;
2606 		(*first_ucode_section)++;
2607 	}
2608 
2609 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2610 		last_read_idx = i;
2611 
2612 		/*
2613 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2614 		 * CPU1 to CPU2.
2615 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2616 		 * CPU2 non paged to CPU2 paging sec.
2617 		 */
2618 		if (!image->sec[i].data ||
2619 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2620 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2621 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2622 				    "Break since Data not valid or Empty section, sec = %d\n",
2623 				    i);
2624 			break;
2625 		}
2626 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2627 		if (ret)
2628 			return ret;
2629 
2630 		/* Notify the ucode of the loaded section number and status */
2631 		if (iwm_nic_lock(sc)) {
2632 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2633 			val = val | (sec_num << shift_param);
2634 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2635 			sec_num = (sec_num << 1) | 0x1;
2636 			iwm_nic_unlock(sc);
2637 		}
2638 	}
2639 
2640 	*first_ucode_section = last_read_idx;
2641 
2642 	iwm_enable_interrupts(sc);
2643 
2644 	if (iwm_nic_lock(sc)) {
2645 		if (cpu == 1)
2646 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2647 		else
2648 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2649 		iwm_nic_unlock(sc);
2650 	}
2651 
2652 	return 0;
2653 }
2654 
2655 static int
2656 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2657 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2658 {
2659 	int shift_param;
2660 	int i, ret = 0;
2661 	uint32_t last_read_idx = 0;
2662 
2663 	if (cpu == 1) {
2664 		shift_param = 0;
2665 		*first_ucode_section = 0;
2666 	} else {
2667 		shift_param = 16;
2668 		(*first_ucode_section)++;
2669 	}
2670 
2671 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2672 		last_read_idx = i;
2673 
2674 		/*
2675 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2676 		 * CPU1 to CPU2.
2677 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2678 		 * CPU2 non paged to CPU2 paging sec.
2679 		 */
2680 		if (!image->sec[i].data ||
2681 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2682 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2683 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2684 				    "Break since Data not valid or Empty section, sec = %d\n",
2685 				     i);
2686 			break;
2687 		}
2688 
2689 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2690 		if (ret)
2691 			return ret;
2692 	}
2693 
2694 	*first_ucode_section = last_read_idx;
2695 
2696 	return 0;
2697 
2698 }
2699 
2700 static int
2701 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2702 {
2703 	int ret = 0;
2704 	int first_ucode_section;
2705 
2706 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2707 		     image->is_dual_cpus ? "Dual" : "Single");
2708 
2709 	/* load to FW the binary non secured sections of CPU1 */
2710 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2711 	if (ret)
2712 		return ret;
2713 
2714 	if (image->is_dual_cpus) {
2715 		/* set CPU2 header address */
2716 		if (iwm_nic_lock(sc)) {
2717 			iwm_write_prph(sc,
2718 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2719 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2720 			iwm_nic_unlock(sc);
2721 		}
2722 
2723 		/* load to FW the binary sections of CPU2 */
2724 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2725 						 &first_ucode_section);
2726 		if (ret)
2727 			return ret;
2728 	}
2729 
2730 	iwm_enable_interrupts(sc);
2731 
2732 	/* release CPU reset */
2733 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2734 
2735 	return 0;
2736 }
2737 
2738 int
2739 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2740 	const struct iwm_fw_img *image)
2741 {
2742 	int ret = 0;
2743 	int first_ucode_section;
2744 
2745 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2746 		    image->is_dual_cpus ? "Dual" : "Single");
2747 
2748 	/* configure the ucode to be ready to get the secured image */
2749 	/* release CPU reset */
2750 	if (iwm_nic_lock(sc)) {
2751 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2752 		    IWM_RELEASE_CPU_RESET_BIT);
2753 		iwm_nic_unlock(sc);
2754 	}
2755 
2756 	/* load to FW the binary Secured sections of CPU1 */
2757 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2758 	    &first_ucode_section);
2759 	if (ret)
2760 		return ret;
2761 
2762 	/* load to FW the binary sections of CPU2 */
2763 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2764 	    &first_ucode_section);
2765 }
2766 
2767 /* XXX Get rid of this definition */
2768 static inline void
2769 iwm_enable_fw_load_int(struct iwm_softc *sc)
2770 {
2771 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2772 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2773 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2774 }
2775 
2776 /* XXX Add proper rfkill support code */
2777 static int
2778 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2779 {
2780 	int ret;
2781 
2782 	/* This may fail if AMT took ownership of the device */
2783 	if (iwm_prepare_card_hw(sc)) {
2784 		device_printf(sc->sc_dev,
2785 		    "%s: Exit HW not ready\n", __func__);
2786 		ret = EIO;
2787 		goto out;
2788 	}
2789 
2790 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2791 
2792 	iwm_disable_interrupts(sc);
2793 
2794 	/* make sure rfkill handshake bits are cleared */
2795 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2796 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2797 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2798 
2799 	/* clear (again), then enable host interrupts */
2800 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2801 
2802 	ret = iwm_nic_init(sc);
2803 	if (ret) {
2804 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2805 		goto out;
2806 	}
2807 
2808 	/*
2809 	 * Now, we load the firmware and don't want to be interrupted, even
2810 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2811 	 * FH_TX interrupt which is needed to load the firmware). If the
2812 	 * RF-Kill switch is toggled, we will find out after having loaded
2813 	 * the firmware and return the proper value to the caller.
2814 	 */
2815 	iwm_enable_fw_load_int(sc);
2816 
2817 	/* really make sure rfkill handshake bits are cleared */
2818 	/* maybe we should write a few times more?  just to make sure */
2819 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2820 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2821 
2822 	/* Load the given image to the HW */
2823 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2824 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2825 	else
2826 		ret = iwm_pcie_load_given_ucode(sc, fw);
2827 
2828 	/* XXX re-check RF-Kill state */
2829 
2830 out:
2831 	return ret;
2832 }
2833 
2834 static int
2835 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2836 {
2837 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2838 		.valid = htole32(valid_tx_ant),
2839 	};
2840 
2841 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2842 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2843 }
2844 
2845 /* iwlwifi: mvm/fw.c */
2846 static int
2847 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2848 {
2849 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2850 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2851 
2852 	/* Set parameters */
2853 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2854 	phy_cfg_cmd.calib_control.event_trigger =
2855 	    sc->sc_default_calib[ucode_type].event_trigger;
2856 	phy_cfg_cmd.calib_control.flow_trigger =
2857 	    sc->sc_default_calib[ucode_type].flow_trigger;
2858 
2859 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2860 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2861 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2862 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2863 }
2864 
2865 static int
2866 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2867 {
2868 	struct iwm_alive_data *alive_data = data;
2869 	struct iwm_alive_resp_v3 *palive3;
2870 	struct iwm_alive_resp *palive;
2871 	struct iwm_umac_alive *umac;
2872 	struct iwm_lmac_alive *lmac1;
2873 	struct iwm_lmac_alive *lmac2 = NULL;
2874 	uint16_t status;
2875 
2876 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2877 		palive = (void *)pkt->data;
2878 		umac = &palive->umac_data;
2879 		lmac1 = &palive->lmac_data[0];
2880 		lmac2 = &palive->lmac_data[1];
2881 		status = le16toh(palive->status);
2882 	} else {
2883 		palive3 = (void *)pkt->data;
2884 		umac = &palive3->umac_data;
2885 		lmac1 = &palive3->lmac_data;
2886 		status = le16toh(palive3->status);
2887 	}
2888 
2889 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2890 	if (lmac2)
2891 		sc->error_event_table[1] =
2892 			le32toh(lmac2->error_event_table_ptr);
2893 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2894 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2895 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2896 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2897 	if (sc->umac_error_event_table)
2898 		sc->support_umac_log = TRUE;
2899 
2900 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2901 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2902 		    status, lmac1->ver_type, lmac1->ver_subtype);
2903 
2904 	if (lmac2)
2905 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2906 
2907 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2908 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2909 		    le32toh(umac->umac_major),
2910 		    le32toh(umac->umac_minor));
2911 
2912 	return TRUE;
2913 }
2914 
2915 static int
2916 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2917 	struct iwm_rx_packet *pkt, void *data)
2918 {
2919 	struct iwm_phy_db *phy_db = data;
2920 
2921 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2922 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2923 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2924 			    __func__, pkt->hdr.code);
2925 		}
2926 		return TRUE;
2927 	}
2928 
2929 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2930 		device_printf(sc->sc_dev,
2931 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2932 	}
2933 
2934 	return FALSE;
2935 }
2936 
2937 static int
2938 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2939 	enum iwm_ucode_type ucode_type)
2940 {
2941 	struct iwm_notification_wait alive_wait;
2942 	struct iwm_alive_data alive_data;
2943 	const struct iwm_fw_img *fw;
2944 	enum iwm_ucode_type old_type = sc->cur_ucode;
2945 	int error;
2946 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2947 
2948 	fw = &sc->sc_fw.img[ucode_type];
2949 	sc->cur_ucode = ucode_type;
2950 	sc->ucode_loaded = FALSE;
2951 
2952 	memset(&alive_data, 0, sizeof(alive_data));
2953 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2954 				   alive_cmd, nitems(alive_cmd),
2955 				   iwm_alive_fn, &alive_data);
2956 
2957 	error = iwm_start_fw(sc, fw);
2958 	if (error) {
2959 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2960 		sc->cur_ucode = old_type;
2961 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2962 		return error;
2963 	}
2964 
2965 	/*
2966 	 * Some things may run in the background now, but we
2967 	 * just wait for the ALIVE notification here.
2968 	 */
2969 	IWM_UNLOCK(sc);
2970 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2971 				      IWM_UCODE_ALIVE_TIMEOUT);
2972 	IWM_LOCK(sc);
2973 	if (error) {
2974 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2975 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2976 			if (iwm_nic_lock(sc)) {
2977 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2978 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2979 				iwm_nic_unlock(sc);
2980 			}
2981 			device_printf(sc->sc_dev,
2982 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2983 			    a, b);
2984 		}
2985 		sc->cur_ucode = old_type;
2986 		return error;
2987 	}
2988 
2989 	if (!alive_data.valid) {
2990 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2991 		    __func__);
2992 		sc->cur_ucode = old_type;
2993 		return EIO;
2994 	}
2995 
2996 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2997 
2998 	/*
2999 	 * configure and operate fw paging mechanism.
3000 	 * driver configures the paging flow only once, CPU2 paging image
3001 	 * included in the IWM_UCODE_INIT image.
3002 	 */
3003 	if (fw->paging_mem_size) {
3004 		error = iwm_save_fw_paging(sc, fw);
3005 		if (error) {
3006 			device_printf(sc->sc_dev,
3007 			    "%s: failed to save the FW paging image\n",
3008 			    __func__);
3009 			return error;
3010 		}
3011 
3012 		error = iwm_send_paging_cmd(sc, fw);
3013 		if (error) {
3014 			device_printf(sc->sc_dev,
3015 			    "%s: failed to send the paging cmd\n", __func__);
3016 			iwm_free_fw_paging(sc);
3017 			return error;
3018 		}
3019 	}
3020 
3021 	if (!error)
3022 		sc->ucode_loaded = TRUE;
3023 	return error;
3024 }
3025 
3026 /*
3027  * mvm misc bits
3028  */
3029 
3030 /*
3031  * follows iwlwifi/fw.c
3032  */
3033 static int
3034 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
3035 {
3036 	struct iwm_notification_wait calib_wait;
3037 	static const uint16_t init_complete[] = {
3038 		IWM_INIT_COMPLETE_NOTIF,
3039 		IWM_CALIB_RES_NOTIF_PHY_DB
3040 	};
3041 	int ret;
3042 
3043 	/* do not operate with rfkill switch turned on */
3044 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3045 		device_printf(sc->sc_dev,
3046 		    "radio is disabled by hardware switch\n");
3047 		return EPERM;
3048 	}
3049 
3050 	iwm_init_notification_wait(sc->sc_notif_wait,
3051 				   &calib_wait,
3052 				   init_complete,
3053 				   nitems(init_complete),
3054 				   iwm_wait_phy_db_entry,
3055 				   sc->sc_phy_db);
3056 
3057 	/* Will also start the device */
3058 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3059 	if (ret) {
3060 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3061 		    ret);
3062 		goto error;
3063 	}
3064 
3065 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
3066 		ret = iwm_send_bt_init_conf(sc);
3067 		if (ret) {
3068 			device_printf(sc->sc_dev,
3069 			    "failed to send bt coex configuration: %d\n", ret);
3070 			goto error;
3071 		}
3072 	}
3073 
3074 	if (justnvm) {
3075 		/* Read nvm */
3076 		ret = iwm_nvm_init(sc);
3077 		if (ret) {
3078 			device_printf(sc->sc_dev, "failed to read nvm\n");
3079 			goto error;
3080 		}
3081 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3082 		goto error;
3083 	}
3084 
3085 	/* Send TX valid antennas before triggering calibrations */
3086 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
3087 	if (ret) {
3088 		device_printf(sc->sc_dev,
3089 		    "failed to send antennas before calibration: %d\n", ret);
3090 		goto error;
3091 	}
3092 
3093 	/*
3094 	 * Send phy configurations command to init uCode
3095 	 * to start the 16.0 uCode init image internal calibrations.
3096 	 */
3097 	ret = iwm_send_phy_cfg_cmd(sc);
3098 	if (ret) {
3099 		device_printf(sc->sc_dev,
3100 		    "%s: Failed to run INIT calibrations: %d\n",
3101 		    __func__, ret);
3102 		goto error;
3103 	}
3104 
3105 	/*
3106 	 * Nothing to do but wait for the init complete notification
3107 	 * from the firmware.
3108 	 */
3109 	IWM_UNLOCK(sc);
3110 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3111 	    IWM_UCODE_CALIB_TIMEOUT);
3112 	IWM_LOCK(sc);
3113 
3114 
3115 	goto out;
3116 
3117 error:
3118 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3119 out:
3120 	return ret;
3121 }
3122 
3123 static int
3124 iwm_config_ltr(struct iwm_softc *sc)
3125 {
3126 	struct iwm_ltr_config_cmd cmd = {
3127 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3128 	};
3129 
3130 	if (!sc->sc_ltr_enabled)
3131 		return 0;
3132 
3133 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3134 }
3135 
3136 /*
3137  * receive side
3138  */
3139 
3140 /* (re)stock rx ring, called at init-time and at runtime */
3141 static int
3142 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3143 {
3144 	struct iwm_rx_ring *ring = &sc->rxq;
3145 	struct iwm_rx_data *data = &ring->data[idx];
3146 	struct mbuf *m;
3147 	bus_dmamap_t dmamap;
3148 	bus_dma_segment_t seg;
3149 	int nsegs, error;
3150 
3151 	m = m_getjcl(M_WAITOK, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3152 	if (m == NULL)
3153 		return ENOBUFS;
3154 
3155 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3156 #if defined(__DragonFly__)
3157 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3158 	    m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3159 #else
3160 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3161 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3162 #endif
3163 	if (error != 0) {
3164 		device_printf(sc->sc_dev,
3165 		    "%s: can't map mbuf, error %d\n", __func__, error);
3166 		m_freem(m);
3167 		return error;
3168 	}
3169 
3170 	if (data->m != NULL)
3171 		bus_dmamap_unload(ring->data_dmat, data->map);
3172 
3173 	/* Swap ring->spare_map with data->map */
3174 	dmamap = data->map;
3175 	data->map = ring->spare_map;
3176 	ring->spare_map = dmamap;
3177 
3178 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3179 	data->m = m;
3180 
3181 	/* Update RX descriptor. */
3182 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3183 	if (sc->cfg->mqrx_supported)
3184 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3185 	else
3186 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3187 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3188 	    BUS_DMASYNC_PREWRITE);
3189 
3190 	return 0;
3191 }
3192 
3193 static void
3194 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3195 {
3196 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3197 
3198 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3199 
3200 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3201 }
3202 
3203 /*
3204  * Retrieve the average noise (in dBm) among receivers.
3205  */
3206 static int
3207 iwm_get_noise(struct iwm_softc *sc,
3208     const struct iwm_statistics_rx_non_phy *stats)
3209 {
3210 	int i, total, nbant, noise;
3211 
3212 	total = nbant = noise = 0;
3213 	for (i = 0; i < 3; i++) {
3214 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3215 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3216 		    __func__,
3217 		    i,
3218 		    noise);
3219 
3220 		if (noise) {
3221 			total += noise;
3222 			nbant++;
3223 		}
3224 	}
3225 
3226 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3227 	    __func__, nbant, total);
3228 #if 0
3229 	/* There should be at least one antenna but check anyway. */
3230 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3231 #else
3232 	/* For now, just hard-code it to -96 to be safe */
3233 	return (-96);
3234 #endif
3235 }
3236 
3237 static void
3238 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3239 {
3240 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3241 
3242 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3243 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3244 }
3245 
3246 /* iwlwifi: mvm/rx.c */
3247 /*
3248  * iwm_get_signal_strength - use new rx PHY INFO API
3249  * values are reported by the fw as positive values - need to negate
3250  * to obtain their dBM.  Account for missing antennas by replacing 0
3251  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3252  */
3253 static int
3254 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3255     struct iwm_rx_phy_info *phy_info)
3256 {
3257 	int energy_a, energy_b, energy_c, max_energy;
3258 	uint32_t val;
3259 
3260 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3261 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3262 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3263 	energy_a = energy_a ? -energy_a : -256;
3264 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3265 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3266 	energy_b = energy_b ? -energy_b : -256;
3267 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3268 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3269 	energy_c = energy_c ? -energy_c : -256;
3270 	max_energy = MAX(energy_a, energy_b);
3271 	max_energy = MAX(max_energy, energy_c);
3272 
3273 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3274 	    "energy In A %d B %d C %d , and max %d\n",
3275 	    energy_a, energy_b, energy_c, max_energy);
3276 
3277 	return max_energy;
3278 }
3279 
3280 static int
3281 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3282     struct iwm_rx_mpdu_desc *desc)
3283 {
3284 	int energy_a, energy_b;
3285 
3286 	energy_a = desc->v1.energy_a;
3287 	energy_b = desc->v1.energy_b;
3288 	energy_a = energy_a ? -energy_a : -256;
3289 	energy_b = energy_b ? -energy_b : -256;
3290 	return MAX(energy_a, energy_b);
3291 }
3292 
3293 /*
3294  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3295  *
3296  * Handles the actual data of the Rx packet from the fw
3297  */
3298 static bool
3299 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3300     bool stolen)
3301 {
3302 	struct ieee80211com *ic = &sc->sc_ic;
3303 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3304 	struct ieee80211_frame *wh;
3305 	struct ieee80211_rx_stats rxs;
3306 	struct iwm_rx_phy_info *phy_info;
3307 	struct iwm_rx_mpdu_res_start *rx_res;
3308 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3309 	uint32_t len;
3310 	uint32_t rx_pkt_status;
3311 	int rssi;
3312 
3313 	phy_info = &sc->sc_last_phy_info;
3314 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3315 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3316 	len = le16toh(rx_res->byte_count);
3317 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3318 
3319 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3320 		device_printf(sc->sc_dev,
3321 		    "dsp size out of range [0,20]: %d\n",
3322 		    phy_info->cfg_phy_cnt);
3323 		return false;
3324 	}
3325 
3326 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3327 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3328 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3329 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3330 		return false;
3331 	}
3332 
3333 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3334 
3335 	/* Map it to relative value */
3336 	rssi = rssi - sc->sc_noise;
3337 
3338 	/* replenish ring for the buffer we're going to feed to the sharks */
3339 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3340 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3341 		    __func__);
3342 		return false;
3343 	}
3344 
3345 	m->m_data = pkt->data + sizeof(*rx_res);
3346 	m->m_pkthdr.len = m->m_len = len;
3347 
3348 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3349 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3350 
3351 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3352 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3353 	    __func__,
3354 	    le16toh(phy_info->channel),
3355 	    le16toh(phy_info->phy_flags));
3356 
3357 	/*
3358 	 * Populate an RX state struct with the provided information.
3359 	 */
3360 	bzero(&rxs, sizeof(rxs));
3361 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3362 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3363 	rxs.c_ieee = le16toh(phy_info->channel);
3364 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3365 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3366 	} else {
3367 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3368 	}
3369 
3370 	/* rssi is in 1/2db units */
3371 #if !defined(__DragonFly__)
3372 	rxs.c_rssi = rssi * 2;
3373 	rxs.c_nf = sc->sc_noise;
3374 #else
3375 	/* old DFly ieee80211 ABI does not have c_rssi */
3376 #endif
3377 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3378 		return false;
3379 
3380 	if (ieee80211_radiotap_active_vap(vap)) {
3381 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3382 
3383 		tap->wr_flags = 0;
3384 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3385 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3386 		tap->wr_chan_freq = htole16(rxs.c_freq);
3387 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3388 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3389 		tap->wr_dbm_antsignal = (int8_t)rssi;
3390 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3391 		tap->wr_tsft = phy_info->system_timestamp;
3392 		switch (phy_info->rate) {
3393 		/* CCK rates. */
3394 		case  10: tap->wr_rate =   2; break;
3395 		case  20: tap->wr_rate =   4; break;
3396 		case  55: tap->wr_rate =  11; break;
3397 		case 110: tap->wr_rate =  22; break;
3398 		/* OFDM rates. */
3399 		case 0xd: tap->wr_rate =  12; break;
3400 		case 0xf: tap->wr_rate =  18; break;
3401 		case 0x5: tap->wr_rate =  24; break;
3402 		case 0x7: tap->wr_rate =  36; break;
3403 		case 0x9: tap->wr_rate =  48; break;
3404 		case 0xb: tap->wr_rate =  72; break;
3405 		case 0x1: tap->wr_rate =  96; break;
3406 		case 0x3: tap->wr_rate = 108; break;
3407 		/* Unknown rate: should not happen. */
3408 		default:  tap->wr_rate =   0;
3409 		}
3410 	}
3411 
3412 	return true;
3413 }
3414 
3415 static bool
3416 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3417     bool stolen)
3418 {
3419 	struct ieee80211com *ic = &sc->sc_ic;
3420 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3421 	struct ieee80211_frame *wh;
3422 	struct ieee80211_rx_stats rxs;
3423 	struct iwm_rx_mpdu_desc *desc;
3424 	struct iwm_rx_packet *pkt;
3425 	int rssi;
3426 	uint32_t hdrlen, len, rate_n_flags;
3427 	uint16_t phy_info;
3428 	uint8_t channel;
3429 
3430 	pkt = mtodo(m, offset);
3431 	desc = (void *)pkt->data;
3432 
3433 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3434 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3435 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3436 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3437 		return false;
3438 	}
3439 
3440 	channel = desc->v1.channel;
3441 	len = le16toh(desc->mpdu_len);
3442 	phy_info = le16toh(desc->phy_info);
3443 	rate_n_flags = desc->v1.rate_n_flags;
3444 
3445 	wh = mtodo(m, sizeof(*desc));
3446 	m->m_data = pkt->data + sizeof(*desc);
3447 	m->m_pkthdr.len = m->m_len = len;
3448 	m->m_len = len;
3449 
3450 	/* Account for padding following the frame header. */
3451 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3452 		hdrlen = ieee80211_anyhdrsize(wh);
3453 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3454 		m->m_data = mtodo(m, 2);
3455 		wh = mtod(m, struct ieee80211_frame *);
3456 	}
3457 
3458 	/* Map it to relative value */
3459 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3460 	rssi = rssi - sc->sc_noise;
3461 
3462 	/* replenish ring for the buffer we're going to feed to the sharks */
3463 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3464 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3465 		    __func__);
3466 		return false;
3467 	}
3468 
3469 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3470 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3471 
3472 	/*
3473 	 * Populate an RX state struct with the provided information.
3474 	 */
3475 	bzero(&rxs, sizeof(rxs));
3476 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3477 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3478 	rxs.c_ieee = channel;
3479 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3480 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3481 
3482 	/* rssi is in 1/2db units */
3483 #if !defined(__DragonFly__)
3484 	rxs.c_rssi = rssi * 2;
3485 	rxs.c_nf = sc->sc_noise;
3486 #else
3487 	/* old DFly ieee80211 ABI does not have c_rssi */
3488 #endif
3489 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3490 		return false;
3491 
3492 	if (ieee80211_radiotap_active_vap(vap)) {
3493 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3494 
3495 		tap->wr_flags = 0;
3496 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3497 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3498 		tap->wr_chan_freq = htole16(rxs.c_freq);
3499 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3500 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3501 		tap->wr_dbm_antsignal = (int8_t)rssi;
3502 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3503 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3504 		switch ((rate_n_flags & 0xff)) {
3505 		/* CCK rates. */
3506 		case  10: tap->wr_rate =   2; break;
3507 		case  20: tap->wr_rate =   4; break;
3508 		case  55: tap->wr_rate =  11; break;
3509 		case 110: tap->wr_rate =  22; break;
3510 		/* OFDM rates. */
3511 		case 0xd: tap->wr_rate =  12; break;
3512 		case 0xf: tap->wr_rate =  18; break;
3513 		case 0x5: tap->wr_rate =  24; break;
3514 		case 0x7: tap->wr_rate =  36; break;
3515 		case 0x9: tap->wr_rate =  48; break;
3516 		case 0xb: tap->wr_rate =  72; break;
3517 		case 0x1: tap->wr_rate =  96; break;
3518 		case 0x3: tap->wr_rate = 108; break;
3519 		/* Unknown rate: should not happen. */
3520 		default:  tap->wr_rate =   0;
3521 		}
3522 	}
3523 
3524 	return true;
3525 }
3526 
3527 static bool
3528 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3529     bool stolen)
3530 {
3531 	struct ieee80211com *ic;
3532 	struct ieee80211_frame *wh;
3533 	struct ieee80211_node *ni;
3534 	bool ret;
3535 
3536 	ic = &sc->sc_ic;
3537 
3538 	ret = sc->cfg->mqrx_supported ?
3539 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3540 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3541 	if (!ret) {
3542 #if !defined(__DragonFly__)
3543 		counter_u64_add(ic->ic_ierrors, 1);
3544 #else
3545 		++sc->sc_ic.ic_ierrors;
3546 #endif
3547 		return (ret);
3548 	}
3549 
3550 	wh = mtod(m, struct ieee80211_frame *);
3551 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3552 
3553 	IWM_UNLOCK(sc);
3554 	if (ni != NULL) {
3555 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3556 #if !defined(__DragonFly__)
3557 		ieee80211_input_mimo(ni, m);
3558 #else
3559 		ieee80211_input_mimo(ni, m, NULL);
3560 #endif
3561 		ieee80211_free_node(ni);
3562 	} else {
3563 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3564 #if !defined(__DragonFly__)
3565 		ieee80211_input_mimo_all(ic, m);
3566 #else
3567 		ieee80211_input_mimo_all(ic, m, NULL);
3568 #endif
3569 	}
3570 	IWM_LOCK(sc);
3571 
3572 	return true;
3573 }
3574 
3575 static int
3576 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3577 	struct iwm_node *in)
3578 {
3579 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3580 #if !defined(__DragonFly__)
3581 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3582 #endif
3583 	struct ieee80211_node *ni = &in->in_ni;
3584 	struct ieee80211vap *vap = ni->ni_vap;
3585 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3586 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3587 	boolean_t rate_matched;
3588 	uint8_t tx_resp_rate;
3589 
3590 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3591 
3592 	/* Update rate control statistics. */
3593 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3594 	    __func__,
3595 	    (int) le16toh(tx_resp->status.status),
3596 	    (int) le16toh(tx_resp->status.sequence),
3597 	    tx_resp->frame_count,
3598 	    tx_resp->bt_kill_count,
3599 	    tx_resp->failure_rts,
3600 	    tx_resp->failure_frame,
3601 	    le32toh(tx_resp->initial_rate),
3602 	    (int) le16toh(tx_resp->wireless_media_time));
3603 
3604 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3605 
3606 	/* For rate control, ignore frames sent at different initial rate */
3607 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3608 
3609 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3610 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3611 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3612 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3613 	}
3614 
3615 #if !defined(__DragonFly__)
3616 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3617 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3618 	txs->short_retries = tx_resp->failure_rts;
3619 	txs->long_retries = tx_resp->failure_frame;
3620 	if (status != IWM_TX_STATUS_SUCCESS &&
3621 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3622 		switch (status) {
3623 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3624 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3625 			break;
3626 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3627 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3628 			break;
3629 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3630 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3631 			break;
3632 		default:
3633 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3634 			break;
3635 		}
3636 	} else {
3637 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3638 	}
3639 
3640 	if (rate_matched) {
3641 		ieee80211_ratectl_tx_complete(ni, txs);
3642 
3643 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3644 		new_rate = vap->iv_bss->ni_txrate;
3645 		if (new_rate != 0 && new_rate != cur_rate) {
3646 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3647 			iwm_setrates(sc, in, rix);
3648 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3649 		}
3650 	}
3651 
3652 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3653 #else
3654 	/*
3655 	 * XXX try to use old ieee80211 ABI, the new one isn't incorporated
3656 	 * into our ieee80211 yet.
3657 	 */
3658 	int failack = tx_resp->failure_frame;
3659 	int ret;
3660 
3661 	if (status != IWM_TX_STATUS_SUCCESS &&
3662 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3663 		if (rate_matched) {
3664 			ieee80211_ratectl_tx_complete(vap, ni,
3665 			    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3666 		}
3667 		ret = 1;
3668 	} else {
3669 		if (rate_matched) {
3670 			ieee80211_ratectl_tx_complete(vap, ni,
3671 			    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3672 		}
3673 		ret = 0;
3674 	}
3675 
3676 	if (rate_matched) {
3677 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3678 		new_rate = vap->iv_bss->ni_txrate;
3679 		if (new_rate != 0 && new_rate != cur_rate) {
3680 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3681 			iwm_setrates(sc, in, rix);
3682 		}
3683 	}
3684 
3685 	return ret;
3686 
3687 #endif
3688 }
3689 
3690 static void
3691 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3692 {
3693 	struct iwm_cmd_header *cmd_hdr;
3694 	struct iwm_tx_ring *ring;
3695 	struct iwm_tx_data *txd;
3696 	struct iwm_node *in;
3697 	struct mbuf *m;
3698 	int idx, qid, qmsk, status;
3699 
3700 	cmd_hdr = &pkt->hdr;
3701 	idx = cmd_hdr->idx;
3702 	qid = cmd_hdr->qid;
3703 
3704 	ring = &sc->txq[qid];
3705 	txd = &ring->data[idx];
3706 	in = txd->in;
3707 	m = txd->m;
3708 
3709 	KASSERT(txd->done == 0, ("txd not done"));
3710 	KASSERT(txd->in != NULL, ("txd without node"));
3711 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3712 
3713 	sc->sc_tx_timer = 0;
3714 
3715 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3716 
3717 	/* Unmap and free mbuf. */
3718 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3719 	bus_dmamap_unload(ring->data_dmat, txd->map);
3720 
3721 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3722 	    "free txd %p, in %p\n", txd, txd->in);
3723 	txd->done = 1;
3724 	txd->m = NULL;
3725 	txd->in = NULL;
3726 
3727 	ieee80211_tx_complete(&in->in_ni, m, status);
3728 
3729 	qmsk = 1 << qid;
3730 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3731 		sc->qfullmsk &= ~qmsk;
3732 		if (sc->qfullmsk == 0)
3733 			iwm_start(sc);
3734 	}
3735 }
3736 
3737 /*
3738  * transmit side
3739  */
3740 
3741 /*
3742  * Process a "command done" firmware notification.  This is where we wakeup
3743  * processes waiting for a synchronous command completion.
3744  * from if_iwn
3745  */
3746 static void
3747 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3748 {
3749 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3750 	struct iwm_tx_data *data;
3751 
3752 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3753 		return;	/* Not a command ack. */
3754 	}
3755 
3756 	/* XXX wide commands? */
3757 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3758 	    "cmd notification type 0x%x qid %d idx %d\n",
3759 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3760 
3761 	data = &ring->data[pkt->hdr.idx];
3762 
3763 	/* If the command was mapped in an mbuf, free it. */
3764 	if (data->m != NULL) {
3765 		bus_dmamap_sync(ring->data_dmat, data->map,
3766 		    BUS_DMASYNC_POSTWRITE);
3767 		bus_dmamap_unload(ring->data_dmat, data->map);
3768 		m_freem(data->m);
3769 		data->m = NULL;
3770 	}
3771 	wakeup(&ring->desc[pkt->hdr.idx]);
3772 
3773 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3774 		device_printf(sc->sc_dev,
3775 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3776 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3777 		/* XXX call iwm_force_nmi() */
3778 	}
3779 
3780 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3781 	ring->queued--;
3782 	if (ring->queued == 0)
3783 		iwm_pcie_clear_cmd_in_flight(sc);
3784 }
3785 
3786 #if 0
3787 /*
3788  * necessary only for block ack mode
3789  */
3790 void
3791 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3792 	uint16_t len)
3793 {
3794 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3795 	uint16_t w_val;
3796 
3797 	scd_bc_tbl = sc->sched_dma.vaddr;
3798 
3799 	len += 8; /* magic numbers came naturally from paris */
3800 	len = roundup(len, 4) / 4;
3801 
3802 	w_val = htole16(sta_id << 12 | len);
3803 
3804 	/* Update TX scheduler. */
3805 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3806 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3807 	    BUS_DMASYNC_PREWRITE);
3808 
3809 	/* I really wonder what this is ?!? */
3810 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3811 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3812 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3813 		    BUS_DMASYNC_PREWRITE);
3814 	}
3815 }
3816 #endif
3817 
3818 static int
3819 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3820 {
3821 	int i;
3822 
3823 	for (i = 0; i < nitems(iwm_rates); i++) {
3824 		if (iwm_rates[i].rate == rate)
3825 			return (i);
3826 	}
3827 	/* XXX error? */
3828 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3829 	    "%s: couldn't find an entry for rate=%d\n",
3830 	    __func__,
3831 	    rate);
3832 	return (0);
3833 }
3834 
3835 /*
3836  * Fill in the rate related information for a transmit command.
3837  */
3838 static const struct iwm_rate *
3839 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3840 	struct mbuf *m, struct iwm_tx_cmd *tx)
3841 {
3842 	struct ieee80211_node *ni = &in->in_ni;
3843 	struct ieee80211_frame *wh;
3844 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3845 	const struct iwm_rate *rinfo;
3846 	int type;
3847 	int ridx, rate_flags;
3848 
3849 	wh = mtod(m, struct ieee80211_frame *);
3850 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3851 
3852 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3853 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3854 
3855 	if (type == IEEE80211_FC0_TYPE_MGT ||
3856 	    type == IEEE80211_FC0_TYPE_CTL ||
3857 	    (m->m_flags & M_EAPOL) != 0) {
3858 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3859 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3860 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3861 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3862 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3863 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3864 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3865 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3866 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3867 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3868 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3869 	} else {
3870 		/* for data frames, use RS table */
3871 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3872 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3873 		if (ridx == -1)
3874 			ridx = 0;
3875 
3876 		/* This is the index into the programmed table */
3877 		tx->initial_rate_index = 0;
3878 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3879 	}
3880 
3881 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3882 	    "%s: frame type=%d txrate %d\n",
3883 	        __func__, type, iwm_rates[ridx].rate);
3884 
3885 	rinfo = &iwm_rates[ridx];
3886 
3887 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3888 	    __func__, ridx,
3889 	    rinfo->rate,
3890 	    !! (IWM_RIDX_IS_CCK(ridx))
3891 	    );
3892 
3893 	/* XXX TODO: hard-coded TX antenna? */
3894 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3895 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3896 	else
3897 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3898 	if (IWM_RIDX_IS_CCK(ridx))
3899 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3900 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3901 
3902 	return rinfo;
3903 }
3904 
3905 #define TB0_SIZE 16
3906 static int
3907 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3908 {
3909 	struct ieee80211com *ic = &sc->sc_ic;
3910 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3911 	struct iwm_node *in = IWM_NODE(ni);
3912 	struct iwm_tx_ring *ring;
3913 	struct iwm_tx_data *data;
3914 	struct iwm_tfd *desc;
3915 	struct iwm_device_cmd *cmd;
3916 	struct iwm_tx_cmd *tx;
3917 	struct ieee80211_frame *wh;
3918 	struct ieee80211_key *k = NULL;
3919 	struct mbuf *m1;
3920 	const struct iwm_rate *rinfo;
3921 	uint32_t flags;
3922 	u_int hdrlen;
3923 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3924 	int nsegs;
3925 	uint8_t tid, type;
3926 	int i, totlen, error, pad;
3927 
3928 	wh = mtod(m, struct ieee80211_frame *);
3929 	hdrlen = ieee80211_anyhdrsize(wh);
3930 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3931 	tid = 0;
3932 	ring = &sc->txq[ac];
3933 	desc = &ring->desc[ring->cur];
3934 	data = &ring->data[ring->cur];
3935 
3936 	/* Fill out iwm_tx_cmd to send to the firmware */
3937 	cmd = &ring->cmd[ring->cur];
3938 	cmd->hdr.code = IWM_TX_CMD;
3939 	cmd->hdr.flags = 0;
3940 	cmd->hdr.qid = ring->qid;
3941 	cmd->hdr.idx = ring->cur;
3942 
3943 	tx = (void *)cmd->data;
3944 	memset(tx, 0, sizeof(*tx));
3945 
3946 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3947 
3948 	/* Encrypt the frame if need be. */
3949 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3950 		/* Retrieve key for TX && do software encryption. */
3951 		k = ieee80211_crypto_encap(ni, m);
3952 		if (k == NULL) {
3953 			m_freem(m);
3954 			return (ENOBUFS);
3955 		}
3956 		/* 802.11 header may have moved. */
3957 		wh = mtod(m, struct ieee80211_frame *);
3958 	}
3959 
3960 	if (ieee80211_radiotap_active_vap(vap)) {
3961 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3962 
3963 		tap->wt_flags = 0;
3964 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3965 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3966 		tap->wt_rate = rinfo->rate;
3967 		if (k != NULL)
3968 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3969 		ieee80211_radiotap_tx(vap, m);
3970 	}
3971 
3972 	flags = 0;
3973 	totlen = m->m_pkthdr.len;
3974 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3975 		flags |= IWM_TX_CMD_FLG_ACK;
3976 	}
3977 
3978 	if (type == IEEE80211_FC0_TYPE_DATA &&
3979 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3980 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3981 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3982 	}
3983 
3984 	tx->sta_id = IWM_STATION_ID;
3985 
3986 	if (type == IEEE80211_FC0_TYPE_MGT) {
3987 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3988 
3989 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3990 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3991 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3992 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3993 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3994 		} else {
3995 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3996 		}
3997 	} else {
3998 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3999 	}
4000 
4001 	if (hdrlen & 3) {
4002 		/* First segment length must be a multiple of 4. */
4003 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4004 		tx->offload_assist |= htole16(1 << IWM_TX_CMD_OFFLD_PAD);
4005 		pad = 4 - (hdrlen & 3);
4006 	} else {
4007 		tx->offload_assist = 0;
4008 		pad = 0;
4009 	}
4010 
4011 	tx->len = htole16(totlen);
4012 	tx->tid_tspec = tid;
4013 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4014 
4015 	/* Set physical address of "scratch area". */
4016 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4017 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4018 
4019 	/* Copy 802.11 header in TX command. */
4020 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
4021 
4022 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4023 
4024 	tx->sec_ctl = 0;
4025 	tx->tx_flags |= htole32(flags);
4026 
4027 	/* Trim 802.11 header. */
4028 	m_adj(m, hdrlen);
4029 #if !defined(__DragonFly__)
4030 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4031 	    segs, &nsegs, BUS_DMA_NOWAIT);
4032 #else
4033 	error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
4034 	    segs, IWM_MAX_SCATTER - 2,
4035 	    &nsegs, BUS_DMA_NOWAIT);
4036 #endif
4037 	if (error != 0) {
4038 		if (error != EFBIG) {
4039 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
4040 			    error);
4041 			m_freem(m);
4042 			return error;
4043 		}
4044 		/* Too many DMA segments, linearize mbuf. */
4045 #if !defined(__DragonFly__)
4046 		m1 = m_collapse(m, M_WAITOK, IWM_MAX_SCATTER - 2);
4047 #else
4048 		m1 = m_defrag(m, M_NOWAIT);
4049 #endif
4050 		if (m1 == NULL) {
4051 			device_printf(sc->sc_dev,
4052 			    "%s: could not defrag mbuf\n", __func__);
4053 			m_freem(m);
4054 			return (ENOBUFS);
4055 		}
4056 		m = m1;
4057 
4058 #if !defined(__DragonFly__)
4059 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4060 		    segs, &nsegs, BUS_DMA_NOWAIT);
4061 #else
4062 		error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map,
4063 		    &m, segs, IWM_MAX_SCATTER - 2, &nsegs, BUS_DMA_NOWAIT);
4064 #endif
4065 		if (error != 0) {
4066 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
4067 			    error);
4068 			m_freem(m);
4069 			return error;
4070 		}
4071 	}
4072 	data->m = m;
4073 	data->in = in;
4074 	data->done = 0;
4075 
4076 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4077 	    "sending txd %p, in %p\n", data, data->in);
4078 	KASSERT(data->in != NULL, ("node is NULL"));
4079 
4080 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4081 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
4082 	    ring->qid, ring->cur, totlen, nsegs,
4083 	    le32toh(tx->tx_flags),
4084 	    le32toh(tx->rate_n_flags),
4085 	    tx->initial_rate_index
4086 	    );
4087 
4088 	/* Fill TX descriptor. */
4089 	memset(desc, 0, sizeof(*desc));
4090 	desc->num_tbs = 2 + nsegs;
4091 
4092 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4093 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
4094 	    (TB0_SIZE << 4));
4095 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4096 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
4097 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
4098 	    hdrlen + pad - TB0_SIZE) << 4));
4099 
4100 	/* Other DMA segments are for data payload. */
4101 	for (i = 0; i < nsegs; i++) {
4102 		seg = &segs[i];
4103 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
4104 		desc->tbs[i + 2].hi_n_len =
4105 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
4106 		    (seg->ds_len << 4);
4107 	}
4108 
4109 	bus_dmamap_sync(ring->data_dmat, data->map,
4110 	    BUS_DMASYNC_PREWRITE);
4111 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
4112 	    BUS_DMASYNC_PREWRITE);
4113 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4114 	    BUS_DMASYNC_PREWRITE);
4115 
4116 #if 0
4117 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4118 #endif
4119 
4120 	/* Kick TX ring. */
4121 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4122 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4123 
4124 	/* Mark TX ring as full if we reach a certain threshold. */
4125 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4126 		sc->qfullmsk |= 1 << ring->qid;
4127 	}
4128 
4129 	return 0;
4130 }
4131 
4132 static int
4133 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4134     const struct ieee80211_bpf_params *params)
4135 {
4136 	struct ieee80211com *ic = ni->ni_ic;
4137 	struct iwm_softc *sc = ic->ic_softc;
4138 	int error = 0;
4139 
4140 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4141 	    "->%s begin\n", __func__);
4142 
4143 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4144 		m_freem(m);
4145 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4146 		    "<-%s not RUNNING\n", __func__);
4147 		return (ENETDOWN);
4148         }
4149 
4150 	IWM_LOCK(sc);
4151 	/* XXX fix this */
4152         if (params == NULL) {
4153 		error = iwm_tx(sc, m, ni, 0);
4154 	} else {
4155 		error = iwm_tx(sc, m, ni, 0);
4156 	}
4157 	if (sc->sc_tx_timer == 0)
4158 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4159 	sc->sc_tx_timer = 5;
4160 	IWM_UNLOCK(sc);
4161 
4162         return (error);
4163 }
4164 
4165 /*
4166  * mvm/tx.c
4167  */
4168 
4169 /*
4170  * Note that there are transports that buffer frames before they reach
4171  * the firmware. This means that after flush_tx_path is called, the
4172  * queue might not be empty. The race-free way to handle this is to:
4173  * 1) set the station as draining
4174  * 2) flush the Tx path
4175  * 3) wait for the transport queues to be empty
4176  */
4177 int
4178 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
4179 {
4180 	int ret;
4181 	struct iwm_tx_path_flush_cmd flush_cmd = {
4182 		.queues_ctl = htole32(tfd_msk),
4183 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4184 	};
4185 
4186 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4187 	    sizeof(flush_cmd), &flush_cmd);
4188 	if (ret)
4189                 device_printf(sc->sc_dev,
4190 		    "Flushing tx queue failed: %d\n", ret);
4191 	return ret;
4192 }
4193 
4194 /*
4195  * BEGIN mvm/quota.c
4196  */
4197 
4198 static int
4199 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4200 {
4201 	struct iwm_time_quota_cmd cmd;
4202 	int i, idx, ret, num_active_macs, quota, quota_rem;
4203 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4204 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4205 	uint16_t id;
4206 
4207 	memset(&cmd, 0, sizeof(cmd));
4208 
4209 	/* currently, PHY ID == binding ID */
4210 	if (ivp) {
4211 		id = ivp->phy_ctxt->id;
4212 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4213 		colors[id] = ivp->phy_ctxt->color;
4214 
4215 		if (1)
4216 			n_ifs[id] = 1;
4217 	}
4218 
4219 	/*
4220 	 * The FW's scheduling session consists of
4221 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4222 	 * equally between all the bindings that require quota
4223 	 */
4224 	num_active_macs = 0;
4225 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4226 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4227 		num_active_macs += n_ifs[i];
4228 	}
4229 
4230 	quota = 0;
4231 	quota_rem = 0;
4232 	if (num_active_macs) {
4233 		quota = IWM_MAX_QUOTA / num_active_macs;
4234 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4235 	}
4236 
4237 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4238 		if (colors[i] < 0)
4239 			continue;
4240 
4241 		cmd.quotas[idx].id_and_color =
4242 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4243 
4244 		if (n_ifs[i] <= 0) {
4245 			cmd.quotas[idx].quota = htole32(0);
4246 			cmd.quotas[idx].max_duration = htole32(0);
4247 		} else {
4248 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4249 			cmd.quotas[idx].max_duration = htole32(0);
4250 		}
4251 		idx++;
4252 	}
4253 
4254 	/* Give the remainder of the session to the first binding */
4255 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4256 
4257 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4258 	    sizeof(cmd), &cmd);
4259 	if (ret)
4260 		device_printf(sc->sc_dev,
4261 		    "%s: Failed to send quota: %d\n", __func__, ret);
4262 	return ret;
4263 }
4264 
4265 /*
4266  * END mvm/quota.c
4267  */
4268 
4269 /*
4270  * ieee80211 routines
4271  */
4272 
4273 /*
4274  * Change to AUTH state in 80211 state machine.  Roughly matches what
4275  * Linux does in bss_info_changed().
4276  */
4277 static int
4278 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4279 {
4280 	struct ieee80211_node *ni;
4281 	struct iwm_node *in;
4282 	struct iwm_vap *iv = IWM_VAP(vap);
4283 	uint32_t duration;
4284 	int error;
4285 
4286 	/*
4287 	 * XXX i have a feeling that the vap node is being
4288 	 * freed from underneath us. Grr.
4289 	 */
4290 	ni = ieee80211_ref_node(vap->iv_bss);
4291 	in = IWM_NODE(ni);
4292 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4293 	    "%s: called; vap=%p, bss ni=%p\n",
4294 	    __func__,
4295 	    vap,
4296 	    ni);
4297 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4298 	    __func__, ether_sprintf(ni->ni_bssid));
4299 
4300 	in->in_assoc = 0;
4301 	iv->iv_auth = 1;
4302 
4303 	/*
4304 	 * Firmware bug - it'll crash if the beacon interval is less
4305 	 * than 16. We can't avoid connecting at all, so refuse the
4306 	 * station state change, this will cause net80211 to abandon
4307 	 * attempts to connect to this AP, and eventually wpa_s will
4308 	 * blacklist the AP...
4309 	 */
4310 	if (ni->ni_intval < 16) {
4311 		device_printf(sc->sc_dev,
4312 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4313 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4314 		error = EINVAL;
4315 		goto out;
4316 	}
4317 
4318 	error = iwm_allow_mcast(vap, sc);
4319 	if (error) {
4320 		device_printf(sc->sc_dev,
4321 		    "%s: failed to set multicast\n", __func__);
4322 		goto out;
4323 	}
4324 
4325 	/*
4326 	 * This is where it deviates from what Linux does.
4327 	 *
4328 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4329 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4330 	 * and always does a mac_ctx_changed().
4331 	 *
4332 	 * The openbsd port doesn't attempt to do that - it reset things
4333 	 * at odd states and does the add here.
4334 	 *
4335 	 * So, until the state handling is fixed (ie, we never reset
4336 	 * the NIC except for a firmware failure, which should drag
4337 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4338 	 * contexts that are required), let's do a dirty hack here.
4339 	 */
4340 	if (iv->is_uploaded) {
4341 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4342 			device_printf(sc->sc_dev,
4343 			    "%s: failed to update MAC\n", __func__);
4344 			goto out;
4345 		}
4346 	} else {
4347 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4348 			device_printf(sc->sc_dev,
4349 			    "%s: failed to add MAC\n", __func__);
4350 			goto out;
4351 		}
4352 	}
4353 	sc->sc_firmware_state = 1;
4354 
4355 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4356 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4357 		device_printf(sc->sc_dev,
4358 		    "%s: failed update phy ctxt\n", __func__);
4359 		goto out;
4360 	}
4361 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4362 
4363 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4364 		device_printf(sc->sc_dev,
4365 		    "%s: binding update cmd\n", __func__);
4366 		goto out;
4367 	}
4368 	sc->sc_firmware_state = 2;
4369 	/*
4370 	 * Authentication becomes unreliable when powersaving is left enabled
4371 	 * here. Powersaving will be activated again when association has
4372 	 * finished or is aborted.
4373 	 */
4374 	iv->ps_disabled = TRUE;
4375 	error = iwm_power_update_mac(sc);
4376 	iv->ps_disabled = FALSE;
4377 	if (error != 0) {
4378 		device_printf(sc->sc_dev,
4379 		    "%s: failed to update power management\n",
4380 		    __func__);
4381 		goto out;
4382 	}
4383 	if ((error = iwm_add_sta(sc, in)) != 0) {
4384 		device_printf(sc->sc_dev,
4385 		    "%s: failed to add sta\n", __func__);
4386 		goto out;
4387 	}
4388 	sc->sc_firmware_state = 3;
4389 
4390 	/*
4391 	 * Prevent the FW from wandering off channel during association
4392 	 * by "protecting" the session with a time event.
4393 	 */
4394 	/* XXX duration is in units of TU, not MS */
4395 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4396 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4397 
4398 	error = 0;
4399 out:
4400 	if (error != 0)
4401 		iv->iv_auth = 0;
4402 	ieee80211_free_node(ni);
4403 	return (error);
4404 }
4405 
4406 static struct ieee80211_node *
4407 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4408 {
4409 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4410 	    M_WAITOK | M_ZERO);
4411 }
4412 
4413 static uint8_t
4414 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4415 {
4416 	uint8_t plcp = rate_n_flags & 0xff;
4417 	int i;
4418 
4419 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4420 		if (iwm_rates[i].plcp == plcp)
4421 			return iwm_rates[i].rate;
4422 	}
4423 	return 0;
4424 }
4425 
4426 uint8_t
4427 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4428 {
4429 	int i;
4430 	uint8_t rval;
4431 
4432 	for (i = 0; i < rs->rs_nrates; i++) {
4433 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4434 		if (rval == iwm_rates[ridx].rate)
4435 			return rs->rs_rates[i];
4436 	}
4437 
4438 	return 0;
4439 }
4440 
4441 static int
4442 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4443 {
4444 	int i;
4445 
4446 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4447 		if (iwm_rates[i].rate == rate)
4448 			return i;
4449 	}
4450 
4451 	device_printf(sc->sc_dev,
4452 	    "%s: WARNING: device rate for %u not found!\n",
4453 	    __func__, rate);
4454 
4455 	return -1;
4456 }
4457 
4458 
4459 static void
4460 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4461 {
4462 	struct ieee80211_node *ni = &in->in_ni;
4463 	struct iwm_lq_cmd *lq = &in->in_lq;
4464 	struct ieee80211_rateset *rs = &ni->ni_rates;
4465 	int nrates = rs->rs_nrates;
4466 	int i, ridx, tab = 0;
4467 //	int txant = 0;
4468 
4469 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4470 
4471 	if (nrates > nitems(lq->rs_table)) {
4472 		device_printf(sc->sc_dev,
4473 		    "%s: node supports %d rates, driver handles "
4474 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4475 		return;
4476 	}
4477 	if (nrates == 0) {
4478 		device_printf(sc->sc_dev,
4479 		    "%s: node supports 0 rates, odd!\n", __func__);
4480 		return;
4481 	}
4482 	nrates = imin(rix + 1, nrates);
4483 
4484 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4485 	    "%s: nrates=%d\n", __func__, nrates);
4486 
4487 	/* then construct a lq_cmd based on those */
4488 	memset(lq, 0, sizeof(*lq));
4489 	lq->sta_id = IWM_STATION_ID;
4490 
4491 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4492 	if (ni->ni_flags & IEEE80211_NODE_HT)
4493 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4494 
4495 	/*
4496 	 * are these used? (we don't do SISO or MIMO)
4497 	 * need to set them to non-zero, though, or we get an error.
4498 	 */
4499 	lq->single_stream_ant_msk = 1;
4500 	lq->dual_stream_ant_msk = 1;
4501 
4502 	/*
4503 	 * Build the actual rate selection table.
4504 	 * The lowest bits are the rates.  Additionally,
4505 	 * CCK needs bit 9 to be set.  The rest of the bits
4506 	 * we add to the table select the tx antenna
4507 	 * Note that we add the rates in the highest rate first
4508 	 * (opposite of ni_rates).
4509 	 */
4510 	for (i = 0; i < nrates; i++) {
4511 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4512 		int nextant;
4513 
4514 		/* Map 802.11 rate to HW rate index. */
4515 		ridx = iwm_rate2ridx(sc, rate);
4516 		if (ridx == -1)
4517 			continue;
4518 
4519 #if 0
4520 		if (txant == 0)
4521 			txant = iwm_get_valid_tx_ant(sc);
4522 		nextant = 1<<(ffs(txant)-1);
4523 		txant &= ~nextant;
4524 #else
4525 		nextant = iwm_get_valid_tx_ant(sc);
4526 #endif
4527 		tab = iwm_rates[ridx].plcp;
4528 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4529 		if (IWM_RIDX_IS_CCK(ridx))
4530 			tab |= IWM_RATE_MCS_CCK_MSK;
4531 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4532 		    "station rate i=%d, rate=%d, hw=%x\n",
4533 		    i, iwm_rates[ridx].rate, tab);
4534 		lq->rs_table[i] = htole32(tab);
4535 	}
4536 	/* then fill the rest with the lowest possible rate */
4537 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4538 		KASSERT(tab != 0, ("invalid tab"));
4539 		lq->rs_table[i] = htole32(tab);
4540 	}
4541 }
4542 
4543 static int
4544 iwm_media_change(struct ifnet *ifp)
4545 {
4546 	struct ieee80211vap *vap = ifp->if_softc;
4547 	struct ieee80211com *ic = vap->iv_ic;
4548 	struct iwm_softc *sc = ic->ic_softc;
4549 	int error;
4550 
4551 	error = ieee80211_media_change(ifp);
4552 	if (error != ENETRESET)
4553 		return error;
4554 
4555 	IWM_LOCK(sc);
4556 	if (ic->ic_nrunning > 0) {
4557 		iwm_stop(sc);
4558 		iwm_init(sc);
4559 	}
4560 	IWM_UNLOCK(sc);
4561 	return error;
4562 }
4563 
4564 static void
4565 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4566 {
4567 	struct iwm_vap *ivp = IWM_VAP(vap);
4568 	int error;
4569 
4570 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4571 	sc->sc_tx_timer = 0;
4572 
4573 	ivp->iv_auth = 0;
4574 	if (sc->sc_firmware_state == 3) {
4575 		iwm_xmit_queue_drain(sc);
4576 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4577 		error = iwm_rm_sta(sc, vap, TRUE);
4578 		if (error) {
4579 			device_printf(sc->sc_dev,
4580 			    "%s: Failed to remove station: %d\n",
4581 			    __func__, error);
4582 		}
4583 	}
4584 	if (sc->sc_firmware_state == 3) {
4585 		error = iwm_mac_ctxt_changed(sc, vap);
4586 		if (error) {
4587 			device_printf(sc->sc_dev,
4588 			    "%s: Failed to change mac context: %d\n",
4589 			    __func__, error);
4590 		}
4591 	}
4592 	if (sc->sc_firmware_state == 3) {
4593 		error = iwm_sf_update(sc, vap, FALSE);
4594 		if (error) {
4595 			device_printf(sc->sc_dev,
4596 			    "%s: Failed to update smart FIFO: %d\n",
4597 			    __func__, error);
4598 		}
4599 	}
4600 	if (sc->sc_firmware_state == 3) {
4601 		error = iwm_rm_sta_id(sc, vap);
4602 		if (error) {
4603 			device_printf(sc->sc_dev,
4604 			    "%s: Failed to remove station id: %d\n",
4605 			    __func__, error);
4606 		}
4607 	}
4608 	if (sc->sc_firmware_state == 3) {
4609 		error = iwm_update_quotas(sc, NULL);
4610 		if (error) {
4611 			device_printf(sc->sc_dev,
4612 			    "%s: Failed to update PHY quota: %d\n",
4613 			    __func__, error);
4614 		}
4615 	}
4616 	if (sc->sc_firmware_state == 3) {
4617 		/* XXX Might need to specify bssid correctly. */
4618 		error = iwm_mac_ctxt_changed(sc, vap);
4619 		if (error) {
4620 			device_printf(sc->sc_dev,
4621 			    "%s: Failed to change mac context: %d\n",
4622 			    __func__, error);
4623 		}
4624 	}
4625 	if (sc->sc_firmware_state == 3) {
4626 		sc->sc_firmware_state = 2;
4627 	}
4628 	if (sc->sc_firmware_state > 1) {
4629 		error = iwm_binding_remove_vif(sc, ivp);
4630 		if (error) {
4631 			device_printf(sc->sc_dev,
4632 			    "%s: Failed to remove channel ctx: %d\n",
4633 			    __func__, error);
4634 		}
4635 	}
4636 	if (sc->sc_firmware_state > 1) {
4637 		sc->sc_firmware_state = 1;
4638 	}
4639 	ivp->phy_ctxt = NULL;
4640 	if (sc->sc_firmware_state > 0) {
4641 		error = iwm_mac_ctxt_changed(sc, vap);
4642 		if (error) {
4643 			device_printf(sc->sc_dev,
4644 			    "%s: Failed to change mac context: %d\n",
4645 			    __func__, error);
4646 		}
4647 	}
4648 	if (sc->sc_firmware_state > 0) {
4649 		error = iwm_power_update_mac(sc);
4650 		if (error != 0) {
4651 			device_printf(sc->sc_dev,
4652 			    "%s: failed to update power management\n",
4653 			    __func__);
4654 		}
4655 	}
4656 	sc->sc_firmware_state = 0;
4657 }
4658 
4659 static int
4660 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4661 {
4662 	struct iwm_vap *ivp = IWM_VAP(vap);
4663 	struct ieee80211com *ic = vap->iv_ic;
4664 	struct iwm_softc *sc = ic->ic_softc;
4665 	struct iwm_node *in;
4666 	int error;
4667 
4668 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4669 	    "switching state %s -> %s arg=0x%x\n",
4670 	    ieee80211_state_name[vap->iv_state],
4671 	    ieee80211_state_name[nstate],
4672 	    arg);
4673 
4674 	IEEE80211_UNLOCK(ic);
4675 	IWM_LOCK(sc);
4676 
4677 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4678 	    (nstate == IEEE80211_S_AUTH ||
4679 	     nstate == IEEE80211_S_ASSOC ||
4680 	     nstate == IEEE80211_S_RUN)) {
4681 		/* Stop blinking for a scan, when authenticating. */
4682 		iwm_led_blink_stop(sc);
4683 	}
4684 
4685 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4686 		iwm_led_disable(sc);
4687 		/* disable beacon filtering if we're hopping out of RUN */
4688 		iwm_disable_beacon_filter(sc);
4689 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4690 			in->in_assoc = 0;
4691 	}
4692 
4693 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4694 	     vap->iv_state == IEEE80211_S_ASSOC ||
4695 	     vap->iv_state == IEEE80211_S_RUN) &&
4696 	    (nstate == IEEE80211_S_INIT ||
4697 	     nstate == IEEE80211_S_SCAN ||
4698 	     nstate == IEEE80211_S_AUTH)) {
4699 		iwm_stop_session_protection(sc, ivp);
4700 	}
4701 
4702 	if ((vap->iv_state == IEEE80211_S_RUN ||
4703 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4704 	    nstate == IEEE80211_S_INIT) {
4705 		/*
4706 		 * In this case, iv_newstate() wants to send an 80211 frame on
4707 		 * the network that we are leaving. So we need to call it,
4708 		 * before tearing down all the firmware state.
4709 		 */
4710 		IWM_UNLOCK(sc);
4711 		IEEE80211_LOCK(ic);
4712 		ivp->iv_newstate(vap, nstate, arg);
4713 		IEEE80211_UNLOCK(ic);
4714 		IWM_LOCK(sc);
4715 		iwm_bring_down_firmware(sc, vap);
4716 		IWM_UNLOCK(sc);
4717 		IEEE80211_LOCK(ic);
4718 		return 0;
4719 	}
4720 
4721 	switch (nstate) {
4722 	case IEEE80211_S_INIT:
4723 	case IEEE80211_S_SCAN:
4724 		break;
4725 
4726 	case IEEE80211_S_AUTH:
4727 		iwm_bring_down_firmware(sc, vap);
4728 		if ((error = iwm_auth(vap, sc)) != 0) {
4729 			device_printf(sc->sc_dev,
4730 			    "%s: could not move to auth state: %d\n",
4731 			    __func__, error);
4732 			iwm_bring_down_firmware(sc, vap);
4733 			IWM_UNLOCK(sc);
4734 			IEEE80211_LOCK(ic);
4735 			return 1;
4736 		}
4737 		break;
4738 
4739 	case IEEE80211_S_ASSOC:
4740 		/*
4741 		 * EBS may be disabled due to previous failures reported by FW.
4742 		 * Reset EBS status here assuming environment has been changed.
4743 		 */
4744 		sc->last_ebs_successful = TRUE;
4745 		break;
4746 
4747 	case IEEE80211_S_RUN:
4748 		in = IWM_NODE(vap->iv_bss);
4749 		/* Update the association state, now we have it all */
4750 		/* (eg associd comes in at this point */
4751 		error = iwm_update_sta(sc, in);
4752 		if (error != 0) {
4753 			device_printf(sc->sc_dev,
4754 			    "%s: failed to update STA\n", __func__);
4755 			IWM_UNLOCK(sc);
4756 			IEEE80211_LOCK(ic);
4757 			return error;
4758 		}
4759 		in->in_assoc = 1;
4760 		error = iwm_mac_ctxt_changed(sc, vap);
4761 		if (error != 0) {
4762 			device_printf(sc->sc_dev,
4763 			    "%s: failed to update MAC: %d\n", __func__, error);
4764 		}
4765 
4766 		iwm_sf_update(sc, vap, FALSE);
4767 		iwm_enable_beacon_filter(sc, ivp);
4768 		iwm_power_update_mac(sc);
4769 		iwm_update_quotas(sc, ivp);
4770 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4771 		iwm_setrates(sc, in, rix);
4772 
4773 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4774 			device_printf(sc->sc_dev,
4775 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4776 		}
4777 
4778 		iwm_led_enable(sc);
4779 		break;
4780 
4781 	default:
4782 		break;
4783 	}
4784 	IWM_UNLOCK(sc);
4785 	IEEE80211_LOCK(ic);
4786 
4787 	return (ivp->iv_newstate(vap, nstate, arg));
4788 }
4789 
4790 void
4791 iwm_endscan_cb(void *arg, int pending)
4792 {
4793 	struct iwm_softc *sc = arg;
4794 	struct ieee80211com *ic = &sc->sc_ic;
4795 
4796 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4797 	    "%s: scan ended\n",
4798 	    __func__);
4799 
4800 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4801 }
4802 
4803 static int
4804 iwm_send_bt_init_conf(struct iwm_softc *sc)
4805 {
4806 	struct iwm_bt_coex_cmd bt_cmd;
4807 
4808 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4809 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4810 
4811 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4812 	    &bt_cmd);
4813 }
4814 
4815 static boolean_t
4816 iwm_is_lar_supported(struct iwm_softc *sc)
4817 {
4818 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4819 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4820 
4821 	if (iwm_lar_disable)
4822 		return FALSE;
4823 
4824 	/*
4825 	 * Enable LAR only if it is supported by the FW (TLV) &&
4826 	 * enabled in the NVM
4827 	 */
4828 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4829 		return nvm_lar && tlv_lar;
4830 	else
4831 		return tlv_lar;
4832 }
4833 
4834 static boolean_t
4835 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4836 {
4837 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4838 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4839 }
4840 
4841 static int
4842 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4843 {
4844 	struct iwm_mcc_update_cmd mcc_cmd;
4845 	struct iwm_host_cmd hcmd = {
4846 		.id = IWM_MCC_UPDATE_CMD,
4847 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4848 		.data = { &mcc_cmd },
4849 	};
4850 	int ret;
4851 #ifdef IWM_DEBUG
4852 	struct iwm_rx_packet *pkt;
4853 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4854 	struct iwm_mcc_update_resp *mcc_resp;
4855 	int n_channels;
4856 	uint16_t mcc;
4857 #endif
4858 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4859 
4860 	if (!iwm_is_lar_supported(sc)) {
4861 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4862 		    __func__);
4863 		return 0;
4864 	}
4865 
4866 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4867 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4868 	if (iwm_is_wifi_mcc_supported(sc))
4869 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4870 	else
4871 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4872 
4873 	if (resp_v2)
4874 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4875 	else
4876 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4877 
4878 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4879 	    "send MCC update to FW with '%c%c' src = %d\n",
4880 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4881 
4882 	ret = iwm_send_cmd(sc, &hcmd);
4883 	if (ret)
4884 		return ret;
4885 
4886 #ifdef IWM_DEBUG
4887 	pkt = hcmd.resp_pkt;
4888 
4889 	/* Extract MCC response */
4890 	if (resp_v2) {
4891 		mcc_resp = (void *)pkt->data;
4892 		mcc = mcc_resp->mcc;
4893 		n_channels =  le32toh(mcc_resp->n_channels);
4894 	} else {
4895 		mcc_resp_v1 = (void *)pkt->data;
4896 		mcc = mcc_resp_v1->mcc;
4897 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4898 	}
4899 
4900 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4901 	if (mcc == 0)
4902 		mcc = 0x3030;  /* "00" - world */
4903 
4904 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4905 	    "regulatory domain '%c%c' (%d channels available)\n",
4906 	    mcc >> 8, mcc & 0xff, n_channels);
4907 #endif
4908 	iwm_free_resp(sc, &hcmd);
4909 
4910 	return 0;
4911 }
4912 
4913 static void
4914 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4915 {
4916 	struct iwm_host_cmd cmd = {
4917 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4918 		.len = { sizeof(uint32_t), },
4919 		.data = { &backoff, },
4920 	};
4921 
4922 	if (iwm_send_cmd(sc, &cmd) != 0) {
4923 		device_printf(sc->sc_dev,
4924 		    "failed to change thermal tx backoff\n");
4925 	}
4926 }
4927 
4928 static int
4929 iwm_init_hw(struct iwm_softc *sc)
4930 {
4931 	struct ieee80211com *ic = &sc->sc_ic;
4932 	int error, i, ac;
4933 
4934 	sc->sf_state = IWM_SF_UNINIT;
4935 
4936 	if ((error = iwm_start_hw(sc)) != 0) {
4937 		kprintf("iwm_start_hw: failed %d\n", error);
4938 		return error;
4939 	}
4940 
4941 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4942 		kprintf("iwm_run_init_ucode: failed %d\n", error);
4943 		return error;
4944 	}
4945 
4946 	/*
4947 	 * should stop and start HW since that INIT
4948 	 * image just loaded
4949 	 */
4950 	iwm_stop_device(sc);
4951 	sc->sc_ps_disabled = FALSE;
4952 	if ((error = iwm_start_hw(sc)) != 0) {
4953 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4954 		return error;
4955 	}
4956 
4957 	/* omstart, this time with the regular firmware */
4958 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4959 	if (error) {
4960 		device_printf(sc->sc_dev, "could not load firmware\n");
4961 		goto error;
4962 	}
4963 
4964 	error = iwm_sf_update(sc, NULL, FALSE);
4965 	if (error)
4966 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4967 
4968 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4969 		device_printf(sc->sc_dev, "bt init conf failed\n");
4970 		goto error;
4971 	}
4972 
4973 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4974 	if (error != 0) {
4975 		device_printf(sc->sc_dev, "antenna config failed\n");
4976 		goto error;
4977 	}
4978 
4979 	/* Send phy db control command and then phy db calibration */
4980 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4981 		goto error;
4982 
4983 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4984 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4985 		goto error;
4986 	}
4987 
4988 	/* Add auxiliary station for scanning */
4989 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4990 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4991 		goto error;
4992 	}
4993 
4994 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4995 		/*
4996 		 * The channel used here isn't relevant as it's
4997 		 * going to be overwritten in the other flows.
4998 		 * For now use the first channel we have.
4999 		 */
5000 		if ((error = iwm_phy_ctxt_add(sc,
5001 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5002 			goto error;
5003 	}
5004 
5005 	/* Initialize tx backoffs to the minimum. */
5006 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
5007 		iwm_tt_tx_backoff(sc, 0);
5008 
5009 	if (iwm_config_ltr(sc) != 0)
5010 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
5011 
5012 	error = iwm_power_update_device(sc);
5013 	if (error)
5014 		goto error;
5015 
5016 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
5017 		goto error;
5018 
5019 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
5020 		if ((error = iwm_config_umac_scan(sc)) != 0)
5021 			goto error;
5022 	}
5023 
5024 	/* Enable Tx queues. */
5025 	for (ac = 0; ac < WME_NUM_AC; ac++) {
5026 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
5027 		    iwm_ac_to_tx_fifo[ac]);
5028 		if (error)
5029 			goto error;
5030 	}
5031 
5032 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
5033 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
5034 		goto error;
5035 	}
5036 
5037 	return 0;
5038 
5039  error:
5040 	iwm_stop_device(sc);
5041 	return error;
5042 }
5043 
5044 /* Allow multicast from our BSSID. */
5045 static int
5046 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
5047 {
5048 	struct ieee80211_node *ni = vap->iv_bss;
5049 	struct iwm_mcast_filter_cmd *cmd;
5050 	size_t size;
5051 	int error;
5052 
5053 	size = roundup(sizeof(*cmd), 4);
5054 	cmd = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
5055 	if (cmd == NULL)
5056 		return ENOMEM;
5057 	cmd->filter_own = 1;
5058 	cmd->port_id = 0;
5059 	cmd->count = 0;
5060 	cmd->pass_all = 1;
5061 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5062 
5063 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5064 	    IWM_CMD_SYNC, size, cmd);
5065 	kfree(cmd, M_DEVBUF);
5066 
5067 	return (error);
5068 }
5069 
5070 /*
5071  * ifnet interfaces
5072  */
5073 
5074 static void
5075 iwm_init(struct iwm_softc *sc)
5076 {
5077 	int error;
5078 
5079 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5080 		return;
5081 	}
5082 	sc->sc_generation++;
5083 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
5084 
5085 	if ((error = iwm_init_hw(sc)) != 0) {
5086 		kprintf("iwm_init_hw failed %d\n", error);
5087 		iwm_stop(sc);
5088 		return;
5089 	}
5090 
5091 	/*
5092 	 * Ok, firmware loaded and we are jogging
5093 	 */
5094 	sc->sc_flags |= IWM_FLAG_HW_INITED;
5095 }
5096 
5097 static int
5098 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5099 {
5100 	struct iwm_softc *sc;
5101 	int error;
5102 
5103 	sc = ic->ic_softc;
5104 
5105 	IWM_LOCK(sc);
5106 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5107 		IWM_UNLOCK(sc);
5108 		return (ENXIO);
5109 	}
5110 	error = mbufq_enqueue(&sc->sc_snd, m);
5111 	if (error) {
5112 		IWM_UNLOCK(sc);
5113 		return (error);
5114 	}
5115 	iwm_start(sc);
5116 	IWM_UNLOCK(sc);
5117 	return (0);
5118 }
5119 
5120 /*
5121  * Dequeue packets from sendq and call send.
5122  */
5123 static void
5124 iwm_start(struct iwm_softc *sc)
5125 {
5126 	struct ieee80211_node *ni;
5127 	struct mbuf *m;
5128 	int ac = 0;
5129 
5130 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5131 	while (sc->qfullmsk == 0 &&
5132 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5133 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5134 		if (iwm_tx(sc, m, ni, ac) != 0) {
5135 			if_inc_counter(ni->ni_vap->iv_ifp,
5136 			    IFCOUNTER_OERRORS, 1);
5137 			ieee80211_free_node(ni);
5138 			continue;
5139 		}
5140 		if (sc->sc_tx_timer == 0) {
5141 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
5142 			    sc);
5143 		}
5144 		sc->sc_tx_timer = 15;
5145 	}
5146 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5147 }
5148 
5149 static void
5150 iwm_stop(struct iwm_softc *sc)
5151 {
5152 
5153 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5154 	sc->sc_flags |= IWM_FLAG_STOPPED;
5155 	sc->sc_generation++;
5156 	iwm_led_blink_stop(sc);
5157 	sc->sc_tx_timer = 0;
5158 	iwm_stop_device(sc);
5159 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5160 }
5161 
5162 static void
5163 iwm_watchdog(void *arg)
5164 {
5165 	struct iwm_softc *sc = arg;
5166 	struct ieee80211com *ic = &sc->sc_ic;
5167 
5168 	if (sc->sc_attached == 0)
5169 		return;
5170 
5171 	if (sc->sc_tx_timer > 0) {
5172 		if (--sc->sc_tx_timer == 0) {
5173 			device_printf(sc->sc_dev, "device timeout\n");
5174 #ifdef IWM_DEBUG
5175 			iwm_nic_error(sc);
5176 #endif
5177 			ieee80211_restart_all(ic);
5178 #if defined(__DragonFly__)
5179 			++sc->sc_ic.ic_oerrors;
5180 #else
5181 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5182 #endif
5183 			return;
5184 		}
5185 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5186 	}
5187 }
5188 
5189 static void
5190 iwm_parent(struct ieee80211com *ic)
5191 {
5192 	struct iwm_softc *sc = ic->ic_softc;
5193 	int startall = 0;
5194 
5195 	IWM_LOCK(sc);
5196 	if (ic->ic_nrunning > 0) {
5197 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5198 			iwm_init(sc);
5199 			startall = 1;
5200 		}
5201 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5202 		iwm_stop(sc);
5203 	IWM_UNLOCK(sc);
5204 	if (startall)
5205 		ieee80211_start_all(ic);
5206 }
5207 
5208 /*
5209  * The interrupt side of things
5210  */
5211 
5212 /*
5213  * error dumping routines are from iwlwifi/mvm/utils.c
5214  */
5215 
5216 /*
5217  * Note: This structure is read from the device with IO accesses,
5218  * and the reading already does the endian conversion. As it is
5219  * read with uint32_t-sized accesses, any members with a different size
5220  * need to be ordered correctly though!
5221  */
5222 struct iwm_error_event_table {
5223 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5224 	uint32_t error_id;		/* type of error */
5225 	uint32_t trm_hw_status0;	/* TRM HW status */
5226 	uint32_t trm_hw_status1;	/* TRM HW status */
5227 	uint32_t blink2;		/* branch link */
5228 	uint32_t ilink1;		/* interrupt link */
5229 	uint32_t ilink2;		/* interrupt link */
5230 	uint32_t data1;		/* error-specific data */
5231 	uint32_t data2;		/* error-specific data */
5232 	uint32_t data3;		/* error-specific data */
5233 	uint32_t bcon_time;		/* beacon timer */
5234 	uint32_t tsf_low;		/* network timestamp function timer */
5235 	uint32_t tsf_hi;		/* network timestamp function timer */
5236 	uint32_t gp1;		/* GP1 timer register */
5237 	uint32_t gp2;		/* GP2 timer register */
5238 	uint32_t fw_rev_type;	/* firmware revision type */
5239 	uint32_t major;		/* uCode version major */
5240 	uint32_t minor;		/* uCode version minor */
5241 	uint32_t hw_ver;		/* HW Silicon version */
5242 	uint32_t brd_ver;		/* HW board version */
5243 	uint32_t log_pc;		/* log program counter */
5244 	uint32_t frame_ptr;		/* frame pointer */
5245 	uint32_t stack_ptr;		/* stack pointer */
5246 	uint32_t hcmd;		/* last host command header */
5247 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5248 				 * rxtx_flag */
5249 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5250 				 * host_flag */
5251 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5252 				 * enc_flag */
5253 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5254 				 * time_flag */
5255 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5256 				 * wico interrupt */
5257 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5258 	uint32_t wait_event;		/* wait event() caller address */
5259 	uint32_t l2p_control;	/* L2pControlField */
5260 	uint32_t l2p_duration;	/* L2pDurationField */
5261 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5262 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5263 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5264 				 * (LMPM_PMG_SEL) */
5265 	uint32_t u_timestamp;	/* indicate when the date and time of the
5266 				 * compilation */
5267 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5268 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5269 
5270 /*
5271  * UMAC error struct - relevant starting from family 8000 chip.
5272  * Note: This structure is read from the device with IO accesses,
5273  * and the reading already does the endian conversion. As it is
5274  * read with u32-sized accesses, any members with a different size
5275  * need to be ordered correctly though!
5276  */
5277 struct iwm_umac_error_event_table {
5278 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5279 	uint32_t error_id;	/* type of error */
5280 	uint32_t blink1;	/* branch link */
5281 	uint32_t blink2;	/* branch link */
5282 	uint32_t ilink1;	/* interrupt link */
5283 	uint32_t ilink2;	/* interrupt link */
5284 	uint32_t data1;		/* error-specific data */
5285 	uint32_t data2;		/* error-specific data */
5286 	uint32_t data3;		/* error-specific data */
5287 	uint32_t umac_major;
5288 	uint32_t umac_minor;
5289 	uint32_t frame_pointer;	/* core register 27*/
5290 	uint32_t stack_pointer;	/* core register 28 */
5291 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5292 	uint32_t nic_isr_pref;	/* ISR status register */
5293 } __packed;
5294 
5295 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5296 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5297 
5298 #ifdef IWM_DEBUG
5299 struct {
5300 	const char *name;
5301 	uint8_t num;
5302 } advanced_lookup[] = {
5303 	{ "NMI_INTERRUPT_WDG", 0x34 },
5304 	{ "SYSASSERT", 0x35 },
5305 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5306 	{ "BAD_COMMAND", 0x38 },
5307 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5308 	{ "FATAL_ERROR", 0x3D },
5309 	{ "NMI_TRM_HW_ERR", 0x46 },
5310 	{ "NMI_INTERRUPT_TRM", 0x4C },
5311 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5312 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5313 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5314 	{ "NMI_INTERRUPT_HOST", 0x66 },
5315 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5316 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5317 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5318 	{ "ADVANCED_SYSASSERT", 0 },
5319 };
5320 
5321 static const char *
5322 iwm_desc_lookup(uint32_t num)
5323 {
5324 	int i;
5325 
5326 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5327 		if (advanced_lookup[i].num == num)
5328 			return advanced_lookup[i].name;
5329 
5330 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5331 	return advanced_lookup[i].name;
5332 }
5333 
5334 static void
5335 iwm_nic_umac_error(struct iwm_softc *sc)
5336 {
5337 	struct iwm_umac_error_event_table table;
5338 	uint32_t base;
5339 
5340 	base = sc->umac_error_event_table;
5341 
5342 	if (base < 0x800000) {
5343 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5344 		    base);
5345 		return;
5346 	}
5347 
5348 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5349 		device_printf(sc->sc_dev, "reading errlog failed\n");
5350 		return;
5351 	}
5352 
5353 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5354 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5355 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5356 		    sc->sc_flags, table.valid);
5357 	}
5358 
5359 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5360 		iwm_desc_lookup(table.error_id));
5361 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5362 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5363 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5364 	    table.ilink1);
5365 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5366 	    table.ilink2);
5367 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5368 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5369 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5370 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5371 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5372 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5373 	    table.frame_pointer);
5374 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5375 	    table.stack_pointer);
5376 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5377 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5378 	    table.nic_isr_pref);
5379 }
5380 
5381 /*
5382  * Support for dumping the error log seemed like a good idea ...
5383  * but it's mostly hex junk and the only sensible thing is the
5384  * hw/ucode revision (which we know anyway).  Since it's here,
5385  * I'll just leave it in, just in case e.g. the Intel guys want to
5386  * help us decipher some "ADVANCED_SYSASSERT" later.
5387  */
5388 static void
5389 iwm_nic_error(struct iwm_softc *sc)
5390 {
5391 	struct iwm_error_event_table table;
5392 	uint32_t base;
5393 
5394 	device_printf(sc->sc_dev, "dumping device error log\n");
5395 	base = sc->error_event_table[0];
5396 	if (base < 0x800000) {
5397 		device_printf(sc->sc_dev,
5398 		    "Invalid error log pointer 0x%08x\n", base);
5399 		return;
5400 	}
5401 
5402 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5403 		device_printf(sc->sc_dev, "reading errlog failed\n");
5404 		return;
5405 	}
5406 
5407 	if (!table.valid) {
5408 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5409 		return;
5410 	}
5411 
5412 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5413 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5414 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5415 		    sc->sc_flags, table.valid);
5416 	}
5417 
5418 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5419 	    iwm_desc_lookup(table.error_id));
5420 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5421 	    table.trm_hw_status0);
5422 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5423 	    table.trm_hw_status1);
5424 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5425 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5426 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5427 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5428 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5429 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5430 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5431 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5432 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5433 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5434 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5435 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5436 	    table.fw_rev_type);
5437 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5438 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5439 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5440 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5441 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5442 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5443 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5444 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5445 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5446 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5447 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5448 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5449 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5450 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5451 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5452 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5453 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5454 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5455 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5456 
5457 	if (sc->umac_error_event_table)
5458 		iwm_nic_umac_error(sc);
5459 }
5460 #endif
5461 
5462 static void
5463 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5464 {
5465 	struct ieee80211com *ic = &sc->sc_ic;
5466 	struct iwm_cmd_response *cresp;
5467 	struct mbuf *m1;
5468 	uint32_t offset = 0;
5469 	uint32_t maxoff = IWM_RBUF_SIZE;
5470 	uint32_t nextoff;
5471 	boolean_t stolen = FALSE;
5472 
5473 #define HAVEROOM(a)	\
5474     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5475 
5476 	while (HAVEROOM(offset)) {
5477 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5478 		    offset);
5479 		int qid, idx, code, len;
5480 
5481 		qid = pkt->hdr.qid;
5482 		idx = pkt->hdr.idx;
5483 
5484 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5485 
5486 		/*
5487 		 * randomly get these from the firmware, no idea why.
5488 		 * they at least seem harmless, so just ignore them for now
5489 		 */
5490 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5491 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5492 			break;
5493 		}
5494 
5495 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5496 		    "rx packet qid=%d idx=%d type=%x\n",
5497 		    qid & ~0x80, pkt->hdr.idx, code);
5498 
5499 		len = iwm_rx_packet_len(pkt);
5500 		len += sizeof(uint32_t); /* account for status word */
5501 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5502 
5503 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5504 
5505 		switch (code) {
5506 		case IWM_REPLY_RX_PHY_CMD:
5507 			iwm_rx_rx_phy_cmd(sc, pkt);
5508 			break;
5509 
5510 		case IWM_REPLY_RX_MPDU_CMD: {
5511 			/*
5512 			 * If this is the last frame in the RX buffer, we
5513 			 * can directly feed the mbuf to the sharks here.
5514 			 */
5515 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5516 			    struct iwm_rx_packet *, nextoff);
5517 			if (!HAVEROOM(nextoff) ||
5518 			    (nextpkt->hdr.code == 0 &&
5519 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5520 			     nextpkt->hdr.idx == 0) ||
5521 			    (nextpkt->len_n_flags ==
5522 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5523 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5524 					stolen = FALSE;
5525 					/* Make sure we abort the loop */
5526 					nextoff = maxoff;
5527 				}
5528 				break;
5529 			}
5530 
5531 			/*
5532 			 * Use m_copym instead of m_split, because that
5533 			 * makes it easier to keep a valid rx buffer in
5534 			 * the ring, when iwm_rx_mpdu() fails.
5535 			 *
5536 			 * We need to start m_copym() at offset 0, to get the
5537 			 * M_PKTHDR flag preserved.
5538 			 */
5539 			m1 = m_copym(m, 0, M_COPYALL, M_WAITOK);
5540 			if (m1) {
5541 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5542 					stolen = TRUE;
5543 				else
5544 					m_freem(m1);
5545 			}
5546 			break;
5547 		}
5548 
5549 		case IWM_TX_CMD:
5550 			iwm_rx_tx_cmd(sc, pkt);
5551 			break;
5552 
5553 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5554 			struct iwm_missed_beacons_notif *resp;
5555 			int missed;
5556 
5557 			/* XXX look at mac_id to determine interface ID */
5558 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5559 
5560 			resp = (void *)pkt->data;
5561 			missed = le32toh(resp->consec_missed_beacons);
5562 
5563 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5564 			    "%s: MISSED_BEACON: mac_id=%d, "
5565 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5566 			    "num_rx=%d\n",
5567 			    __func__,
5568 			    le32toh(resp->mac_id),
5569 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5570 			    le32toh(resp->consec_missed_beacons),
5571 			    le32toh(resp->num_expected_beacons),
5572 			    le32toh(resp->num_recvd_beacons));
5573 
5574 			/* Be paranoid */
5575 			if (vap == NULL)
5576 				break;
5577 
5578 			/* XXX no net80211 locking? */
5579 			if (vap->iv_state == IEEE80211_S_RUN &&
5580 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5581 				if (missed > vap->iv_bmissthreshold) {
5582 					/* XXX bad locking; turn into task */
5583 					IWM_UNLOCK(sc);
5584 					ieee80211_beacon_miss(ic);
5585 					IWM_LOCK(sc);
5586 				}
5587 			}
5588 
5589 			break;
5590 		}
5591 
5592 		case IWM_MFUART_LOAD_NOTIFICATION:
5593 			break;
5594 
5595 		case IWM_ALIVE:
5596 			break;
5597 
5598 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5599 			break;
5600 
5601 		case IWM_STATISTICS_NOTIFICATION:
5602 			iwm_handle_rx_statistics(sc, pkt);
5603 			break;
5604 
5605 		case IWM_NVM_ACCESS_CMD:
5606 		case IWM_MCC_UPDATE_CMD:
5607 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5608 				memcpy(sc->sc_cmd_resp,
5609 				    pkt, sizeof(sc->sc_cmd_resp));
5610 			}
5611 			break;
5612 
5613 		case IWM_MCC_CHUB_UPDATE_CMD: {
5614 			struct iwm_mcc_chub_notif *notif;
5615 			notif = (void *)pkt->data;
5616 
5617 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5618 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5619 			sc->sc_fw_mcc[2] = '\0';
5620 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5621 			    "fw source %d sent CC '%s'\n",
5622 			    notif->source_id, sc->sc_fw_mcc);
5623 			break;
5624 		}
5625 
5626 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5627 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5628 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5629 			struct iwm_dts_measurement_notif_v1 *notif;
5630 
5631 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5632 				device_printf(sc->sc_dev,
5633 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5634 				break;
5635 			}
5636 			notif = (void *)pkt->data;
5637 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5638 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5639 			    notif->temp);
5640 			break;
5641 		}
5642 
5643 		case IWM_PHY_CONFIGURATION_CMD:
5644 		case IWM_TX_ANT_CONFIGURATION_CMD:
5645 		case IWM_ADD_STA:
5646 		case IWM_MAC_CONTEXT_CMD:
5647 		case IWM_REPLY_SF_CFG_CMD:
5648 		case IWM_POWER_TABLE_CMD:
5649 		case IWM_LTR_CONFIG:
5650 		case IWM_PHY_CONTEXT_CMD:
5651 		case IWM_BINDING_CONTEXT_CMD:
5652 		case IWM_TIME_EVENT_CMD:
5653 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5654 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5655 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5656 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5657 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5658 		case IWM_REPLY_BEACON_FILTERING_CMD:
5659 		case IWM_MAC_PM_POWER_TABLE:
5660 		case IWM_TIME_QUOTA_CMD:
5661 		case IWM_REMOVE_STA:
5662 		case IWM_TXPATH_FLUSH:
5663 		case IWM_LQ_CMD:
5664 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5665 				 IWM_FW_PAGING_BLOCK_CMD):
5666 		case IWM_BT_CONFIG:
5667 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5668 			cresp = (void *)pkt->data;
5669 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5670 				memcpy(sc->sc_cmd_resp,
5671 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5672 			}
5673 			break;
5674 
5675 		/* ignore */
5676 		case IWM_PHY_DB_CMD:
5677 			break;
5678 
5679 		case IWM_INIT_COMPLETE_NOTIF:
5680 			break;
5681 
5682 		case IWM_SCAN_OFFLOAD_COMPLETE:
5683 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5684 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5685 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5686 				ieee80211_runtask(ic, &sc->sc_es_task);
5687 			}
5688 			break;
5689 
5690 		case IWM_SCAN_ITERATION_COMPLETE: {
5691 			struct iwm_lmac_scan_complete_notif *notif;
5692 			notif = (void *)pkt->data;
5693 			break;
5694 		}
5695 
5696 		case IWM_SCAN_COMPLETE_UMAC:
5697 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5698 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5699 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5700 				ieee80211_runtask(ic, &sc->sc_es_task);
5701 			}
5702 			break;
5703 
5704 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5705 			struct iwm_umac_scan_iter_complete_notif *notif;
5706 			notif = (void *)pkt->data;
5707 
5708 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5709 			    "complete, status=0x%x, %d channels scanned\n",
5710 			    notif->status, notif->scanned_channels);
5711 			break;
5712 		}
5713 
5714 		case IWM_REPLY_ERROR: {
5715 			struct iwm_error_resp *resp;
5716 			resp = (void *)pkt->data;
5717 
5718 			device_printf(sc->sc_dev,
5719 			    "firmware error 0x%x, cmd 0x%x\n",
5720 			    le32toh(resp->error_type),
5721 			    resp->cmd_id);
5722 			break;
5723 		}
5724 
5725 		case IWM_TIME_EVENT_NOTIFICATION:
5726 			iwm_rx_time_event_notif(sc, pkt);
5727 			break;
5728 
5729 		/*
5730 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5731 		 * messages. Just ignore them for now.
5732 		 */
5733 		case IWM_DEBUG_LOG_MSG:
5734 			break;
5735 
5736 		case IWM_MCAST_FILTER_CMD:
5737 			break;
5738 
5739 		case IWM_SCD_QUEUE_CFG: {
5740 			struct iwm_scd_txq_cfg_rsp *rsp;
5741 			rsp = (void *)pkt->data;
5742 
5743 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5744 			    "queue cfg token=0x%x sta_id=%d "
5745 			    "tid=%d scd_queue=%d\n",
5746 			    rsp->token, rsp->sta_id, rsp->tid,
5747 			    rsp->scd_queue);
5748 			break;
5749 		}
5750 
5751 		default:
5752 			device_printf(sc->sc_dev,
5753 			    "frame %d/%d %x UNHANDLED (this should "
5754 			    "not happen)\n", qid & ~0x80, idx,
5755 			    pkt->len_n_flags);
5756 			break;
5757 		}
5758 
5759 		/*
5760 		 * Why test bit 0x80?  The Linux driver:
5761 		 *
5762 		 * There is one exception:  uCode sets bit 15 when it
5763 		 * originates the response/notification, i.e. when the
5764 		 * response/notification is not a direct response to a
5765 		 * command sent by the driver.  For example, uCode issues
5766 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5767 		 * it is not a direct response to any driver command.
5768 		 *
5769 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5770 		 * uses a slightly different format for pkt->hdr, and "qid"
5771 		 * is actually the upper byte of a two-byte field.
5772 		 */
5773 		if (!(qid & (1 << 7)))
5774 			iwm_cmd_done(sc, pkt);
5775 
5776 		offset = nextoff;
5777 	}
5778 	if (stolen)
5779 		m_freem(m);
5780 #undef HAVEROOM
5781 }
5782 
5783 /*
5784  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5785  * Basic structure from if_iwn
5786  */
5787 static void
5788 iwm_notif_intr(struct iwm_softc *sc)
5789 {
5790 	int count;
5791 	uint32_t wreg;
5792 	uint16_t hw;
5793 
5794 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5795 	    BUS_DMASYNC_POSTREAD);
5796 
5797 	if (sc->cfg->mqrx_supported) {
5798 		count = IWM_RX_MQ_RING_COUNT;
5799 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5800 	} else {
5801 		count = IWM_RX_LEGACY_RING_COUNT;
5802 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5803 	}
5804 
5805 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5806 
5807 	/*
5808 	 * Process responses
5809 	 */
5810 	while (sc->rxq.cur != hw) {
5811 		struct iwm_rx_ring *ring = &sc->rxq;
5812 		struct iwm_rx_data *data = &ring->data[ring->cur];
5813 
5814 		bus_dmamap_sync(ring->data_dmat, data->map,
5815 		    BUS_DMASYNC_POSTREAD);
5816 
5817 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5818 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5819 		iwm_handle_rxb(sc, data->m);
5820 
5821 		ring->cur = (ring->cur + 1) % count;
5822 	}
5823 
5824 	/*
5825 	 * Tell the firmware that it can reuse the ring entries that
5826 	 * we have just processed.
5827 	 * Seems like the hardware gets upset unless we align
5828 	 * the write by 8??
5829 	 */
5830 	hw = (hw == 0) ? count - 1 : hw - 1;
5831 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5832 }
5833 
5834 static void
5835 iwm_intr(void *arg)
5836 {
5837 	struct iwm_softc *sc = arg;
5838 	int handled = 0;
5839 	int r1, r2, rv = 0;
5840 	int isperiodic = 0;
5841 
5842 #if defined(__DragonFly__)
5843 	if (sc->sc_mem == NULL) {
5844 		kprintf("iwm_intr: detached\n");
5845 		return;
5846 	}
5847 #endif
5848 
5849 	IWM_LOCK(sc);
5850 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5851 
5852 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5853 		uint32_t *ict = sc->ict_dma.vaddr;
5854 		int tmp;
5855 
5856 		tmp = htole32(ict[sc->ict_cur]);
5857 		if (!tmp)
5858 			goto out_ena;
5859 
5860 		/*
5861 		 * ok, there was something.  keep plowing until we have all.
5862 		 */
5863 		r1 = r2 = 0;
5864 		while (tmp) {
5865 			r1 |= tmp;
5866 			ict[sc->ict_cur] = 0;
5867 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5868 			tmp = htole32(ict[sc->ict_cur]);
5869 		}
5870 
5871 		/* this is where the fun begins.  don't ask */
5872 		if (r1 == 0xffffffff)
5873 			r1 = 0;
5874 
5875 		/* i am not expected to understand this */
5876 		if (r1 & 0xc0000)
5877 			r1 |= 0x8000;
5878 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5879 	} else {
5880 		r1 = IWM_READ(sc, IWM_CSR_INT);
5881 		/* "hardware gone" (where, fishing?) */
5882 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5883 			goto out;
5884 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5885 	}
5886 	if (r1 == 0 && r2 == 0) {
5887 		goto out_ena;
5888 	}
5889 
5890 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5891 
5892 	/* Safely ignore these bits for debug checks below */
5893 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5894 
5895 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5896 		int i;
5897 		struct ieee80211com *ic = &sc->sc_ic;
5898 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5899 
5900 #ifdef IWM_DEBUG
5901 		iwm_nic_error(sc);
5902 #endif
5903 		/* Dump driver status (TX and RX rings) while we're here. */
5904 		device_printf(sc->sc_dev, "driver status:\n");
5905 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5906 			struct iwm_tx_ring *ring = &sc->txq[i];
5907 			device_printf(sc->sc_dev,
5908 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5909 			    "queued=%-3d\n",
5910 			    i, ring->qid, ring->cur, ring->queued);
5911 		}
5912 		device_printf(sc->sc_dev,
5913 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5914 		device_printf(sc->sc_dev,
5915 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5916 
5917 		/* Reset our firmware state tracking. */
5918 		sc->sc_firmware_state = 0;
5919 		/* Don't stop the device; just do a VAP restart */
5920 		IWM_UNLOCK(sc);
5921 
5922 		if (vap == NULL) {
5923 			kprintf("%s: null vap\n", __func__);
5924 			return;
5925 		}
5926 
5927 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5928 		    "restarting\n", __func__, vap->iv_state);
5929 
5930 		ieee80211_restart_all(ic);
5931 		return;
5932 	}
5933 
5934 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5935 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5936 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5937 		iwm_stop(sc);
5938 		rv = 1;
5939 		goto out;
5940 	}
5941 
5942 	/* firmware chunk loaded */
5943 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5944 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5945 		handled |= IWM_CSR_INT_BIT_FH_TX;
5946 		sc->sc_fw_chunk_done = 1;
5947 		wakeup(&sc->sc_fw);
5948 	}
5949 
5950 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5951 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5952 		if (iwm_check_rfkill(sc)) {
5953 			device_printf(sc->sc_dev,
5954 			    "%s: rfkill switch, disabling interface\n",
5955 			    __func__);
5956 			iwm_stop(sc);
5957 		}
5958 	}
5959 
5960 	/*
5961 	 * The Linux driver uses periodic interrupts to avoid races.
5962 	 * We cargo-cult like it's going out of fashion.
5963 	 */
5964 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5965 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5966 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5967 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5968 			IWM_WRITE_1(sc,
5969 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5970 		isperiodic = 1;
5971 	}
5972 
5973 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5974 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5975 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5976 
5977 		iwm_notif_intr(sc);
5978 
5979 		/* enable periodic interrupt, see above */
5980 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5981 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5982 			    IWM_CSR_INT_PERIODIC_ENA);
5983 	}
5984 
5985 	if (__predict_false(r1 & ~handled))
5986 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5987 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5988 	rv = 1;
5989 
5990  out_ena:
5991 	iwm_restore_interrupts(sc);
5992  out:
5993 	IWM_UNLOCK(sc);
5994 	return;
5995 }
5996 
5997 /*
5998  * Autoconf glue-sniffing
5999  */
6000 #define	PCI_VENDOR_INTEL		0x8086
6001 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
6002 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
6003 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
6004 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
6005 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
6006 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
6007 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
6008 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
6009 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
6010 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
6011 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
6012 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
6013 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
6014 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
6015 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
6016 
6017 static const struct iwm_devices {
6018 	uint16_t		device;
6019 	const struct iwm_cfg	*cfg;
6020 } iwm_devices[] = {
6021 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
6022 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
6023 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
6024 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
6025 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
6026 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
6027 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
6028 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
6029 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
6030 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
6031 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
6032 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
6033 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
6034 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
6035 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
6036 };
6037 
6038 static int
6039 iwm_probe(device_t dev)
6040 {
6041 	int i;
6042 
6043 	for (i = 0; i < nitems(iwm_devices); i++) {
6044 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
6045 		    pci_get_device(dev) == iwm_devices[i].device) {
6046 			device_set_desc(dev, iwm_devices[i].cfg->name);
6047 			return (BUS_PROBE_DEFAULT);
6048 		}
6049 	}
6050 
6051 	return (ENXIO);
6052 }
6053 
6054 static int
6055 iwm_dev_check(device_t dev)
6056 {
6057 	struct iwm_softc *sc;
6058 	uint16_t devid;
6059 	int i;
6060 
6061 	sc = device_get_softc(dev);
6062 
6063 	devid = pci_get_device(dev);
6064 	for (i = 0; i < nitems(iwm_devices); i++) {
6065 		if (iwm_devices[i].device == devid) {
6066 			sc->cfg = iwm_devices[i].cfg;
6067 			return (0);
6068 		}
6069 	}
6070 	device_printf(dev, "unknown adapter type\n");
6071 	return ENXIO;
6072 }
6073 
6074 /* PCI registers */
6075 #define PCI_CFG_RETRY_TIMEOUT	0x041
6076 
6077 static int
6078 iwm_pci_attach(device_t dev)
6079 {
6080 	struct iwm_softc *sc;
6081 	int count, error, rid;
6082 	uint16_t reg;
6083 #if defined(__DragonFly__)
6084         int irq_flags;
6085 #endif
6086 
6087 	sc = device_get_softc(dev);
6088 
6089 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
6090 	 * PCI Tx retries from interfering with C3 CPU state */
6091 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6092 
6093 	/* Enable bus-mastering and hardware bug workaround. */
6094 	pci_enable_busmaster(dev);
6095 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
6096 	/* if !MSI */
6097 	if (reg & PCIM_STATUS_INTxSTATE) {
6098 		reg &= ~PCIM_STATUS_INTxSTATE;
6099 	}
6100 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
6101 
6102 	rid = PCIR_BAR(0);
6103 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
6104 	    RF_ACTIVE);
6105 	if (sc->sc_mem == NULL) {
6106 		device_printf(sc->sc_dev, "can't map mem space\n");
6107 		return (ENXIO);
6108 	}
6109 	sc->sc_st = rman_get_bustag(sc->sc_mem);
6110 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
6111 
6112 	/* Install interrupt handler. */
6113 	count = 1;
6114 	rid = 0;
6115 #if defined(__DragonFly__)
6116 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
6117 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
6118 #else
6119 	if (pci_alloc_msi(dev, &count) == 0)
6120 		rid = 1;
6121 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
6122 	    (rid != 0 ? 0 : RF_SHAREABLE));
6123 #endif
6124 	if (sc->sc_irq == NULL) {
6125 		device_printf(dev, "can't map interrupt\n");
6126 			return (ENXIO);
6127 	}
6128 #if defined(__DragonFly__)
6129 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
6130 			       iwm_intr, sc, &sc->sc_ih,
6131 			       &wlan_global_serializer);
6132 #else
6133 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
6134 	    NULL, iwm_intr, sc, &sc->sc_ih);
6135 #endif
6136 	if (sc->sc_ih == NULL) {
6137 		device_printf(dev, "can't establish interrupt");
6138 			return (ENXIO);
6139 #if defined(__DragonFly__)
6140 		pci_release_msi(dev);
6141 #endif
6142 	}
6143 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
6144 
6145 	return (0);
6146 }
6147 
6148 static void
6149 iwm_pci_detach(device_t dev)
6150 {
6151 	struct iwm_softc *sc = device_get_softc(dev);
6152 
6153 	if (sc->sc_irq != NULL) {
6154 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6155 		bus_release_resource(dev, SYS_RES_IRQ,
6156 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
6157 		pci_release_msi(dev);
6158 #if defined(__DragonFly__)
6159 		sc->sc_irq = NULL;
6160 #endif
6161         }
6162 	if (sc->sc_mem != NULL) {
6163 		bus_release_resource(dev, SYS_RES_MEMORY,
6164 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
6165 #if defined(__DragonFly__)
6166 		sc->sc_mem = NULL;
6167 #endif
6168 	}
6169 }
6170 
6171 static int
6172 iwm_attach(device_t dev)
6173 {
6174 	struct iwm_softc *sc = device_get_softc(dev);
6175 	struct ieee80211com *ic = &sc->sc_ic;
6176 	int error;
6177 	int txq_i, i;
6178 
6179 	sc->sc_dev = dev;
6180 	sc->sc_attached = 1;
6181 	IWM_LOCK_INIT(sc);
6182 	mbufq_init(&sc->sc_snd, ifqmaxlen);
6183 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
6184 	callout_init_lk(&sc->sc_led_blink_to, &sc->sc_lk);
6185 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6186 
6187 	error = iwm_dev_check(dev);
6188 	if (error != 0)
6189 		goto fail;
6190 
6191 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6192 	if (sc->sc_notif_wait == NULL) {
6193 		device_printf(dev, "failed to init notification wait struct\n");
6194 		goto fail;
6195 	}
6196 
6197 	sc->sf_state = IWM_SF_UNINIT;
6198 
6199 	/* Init phy db */
6200 	sc->sc_phy_db = iwm_phy_db_init(sc);
6201 	if (!sc->sc_phy_db) {
6202 		device_printf(dev, "Cannot init phy_db\n");
6203 		goto fail;
6204 	}
6205 
6206 	/* Set EBS as successful as long as not stated otherwise by the FW. */
6207 	sc->last_ebs_successful = TRUE;
6208 
6209 	/* PCI attach */
6210 	error = iwm_pci_attach(dev);
6211 	if (error != 0)
6212 		goto fail;
6213 
6214 	sc->sc_wantresp = -1;
6215 
6216 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6217 	/*
6218 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6219 	 * changed, and now the revision step also includes bit 0-1 (no more
6220 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6221 	 * in the old format.
6222 	 */
6223 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
6224 		int ret;
6225 		uint32_t hw_step;
6226 
6227 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6228 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6229 
6230 		if (iwm_prepare_card_hw(sc) != 0) {
6231 			device_printf(dev, "could not initialize hardware\n");
6232 			goto fail;
6233 		}
6234 
6235 		/*
6236 		 * In order to recognize C step the driver should read the
6237 		 * chip version id located at the AUX bus MISC address.
6238 		 */
6239 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6240 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6241 		DELAY(2);
6242 
6243 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6244 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6245 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6246 				   25000);
6247 		if (!ret) {
6248 			device_printf(sc->sc_dev,
6249 			    "Failed to wake up the nic\n");
6250 			goto fail;
6251 		}
6252 
6253 		if (iwm_nic_lock(sc)) {
6254 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6255 			hw_step |= IWM_ENABLE_WFPM;
6256 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6257 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6258 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6259 			if (hw_step == 0x3)
6260 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6261 						(IWM_SILICON_C_STEP << 2);
6262 			iwm_nic_unlock(sc);
6263 		} else {
6264 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6265 			goto fail;
6266 		}
6267 	}
6268 
6269 	/* special-case 7265D, it has the same PCI IDs. */
6270 	if (sc->cfg == &iwm7265_cfg &&
6271 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6272 		sc->cfg = &iwm7265d_cfg;
6273 	}
6274 
6275 	/* Allocate DMA memory for firmware transfers. */
6276 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6277 		device_printf(dev, "could not allocate memory for firmware\n");
6278 		goto fail;
6279 	}
6280 
6281 	/* Allocate "Keep Warm" page. */
6282 	if ((error = iwm_alloc_kw(sc)) != 0) {
6283 		device_printf(dev, "could not allocate keep warm page\n");
6284 		goto fail;
6285 	}
6286 
6287 	/* We use ICT interrupts */
6288 	if ((error = iwm_alloc_ict(sc)) != 0) {
6289 		device_printf(dev, "could not allocate ICT table\n");
6290 		goto fail;
6291 	}
6292 
6293 	/* Allocate TX scheduler "rings". */
6294 	if ((error = iwm_alloc_sched(sc)) != 0) {
6295 		device_printf(dev, "could not allocate TX scheduler rings\n");
6296 		goto fail;
6297 	}
6298 
6299 	/* Allocate TX rings */
6300 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6301 		if ((error = iwm_alloc_tx_ring(sc,
6302 		    &sc->txq[txq_i], txq_i)) != 0) {
6303 			device_printf(dev,
6304 			    "could not allocate TX ring %d\n",
6305 			    txq_i);
6306 			goto fail;
6307 		}
6308 	}
6309 
6310 	/* Allocate RX ring. */
6311 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6312 		device_printf(dev, "could not allocate RX ring\n");
6313 		goto fail;
6314 	}
6315 
6316 	/* Clear pending interrupts. */
6317 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6318 
6319 	ic->ic_softc = sc;
6320 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6321 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6322 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6323 
6324 	/* Set device capabilities. */
6325 	ic->ic_caps =
6326 	    IEEE80211_C_STA |
6327 	    IEEE80211_C_WPA |		/* WPA/RSN */
6328 	    IEEE80211_C_WME |
6329 	    IEEE80211_C_PMGT |
6330 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6331 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6332 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6333 	    ;
6334 	/* Advertise full-offload scanning */
6335 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6336 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6337 		sc->sc_phyctxt[i].id = i;
6338 		sc->sc_phyctxt[i].color = 0;
6339 		sc->sc_phyctxt[i].ref = 0;
6340 		sc->sc_phyctxt[i].channel = NULL;
6341 	}
6342 
6343 	/* Default noise floor */
6344 	sc->sc_noise = -96;
6345 
6346 	/* Max RSSI */
6347 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6348 
6349 #ifdef IWM_DEBUG
6350 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6351 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6352 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6353 #endif
6354 
6355 	error = iwm_read_firmware(sc);
6356 	if (error) {
6357 		goto fail;
6358 	} else if (sc->sc_fw.fw_fp == NULL) {
6359 		/*
6360 		 * XXX Add a solution for properly deferring firmware load
6361 		 *     during bootup.
6362 		 */
6363 		goto fail;
6364 	} else {
6365 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6366 		sc->sc_preinit_hook.ich_arg = sc;
6367 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6368 			device_printf(dev,
6369 			    "config_intrhook_establish failed\n");
6370 			goto fail;
6371 		}
6372 	}
6373 
6374 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6375 	    "<-%s\n", __func__);
6376 
6377 	return 0;
6378 
6379 	/* Free allocated memory if something failed during attachment. */
6380 fail:
6381 	iwm_detach_local(sc, 0);
6382 
6383 	return ENXIO;
6384 }
6385 
6386 static int
6387 iwm_is_valid_ether_addr(uint8_t *addr)
6388 {
6389 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6390 
6391 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6392 		return (FALSE);
6393 
6394 	return (TRUE);
6395 }
6396 
6397 static int
6398 iwm_wme_update(struct ieee80211com *ic)
6399 {
6400 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6401 	struct iwm_softc *sc = ic->ic_softc;
6402 #if !defined(__DragonFly__)
6403 	struct chanAccParams chp;
6404 #endif
6405 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6406 	struct iwm_vap *ivp = IWM_VAP(vap);
6407 	struct iwm_node *in;
6408 	struct wmeParams tmp[WME_NUM_AC];
6409 	int aci, error;
6410 
6411 	if (vap == NULL)
6412 		return (0);
6413 
6414 #if !defined(__DragonFly__)
6415 	ieee80211_wme_ic_getparams(ic, &chp);
6416 
6417 	IEEE80211_LOCK(ic);
6418 	for (aci = 0; aci < WME_NUM_AC; aci++)
6419 		tmp[aci] = chp.cap_wmeParams[aci];
6420 	IEEE80211_UNLOCK(ic);
6421 #else
6422 	IEEE80211_LOCK(ic);
6423 	for (aci = 0; aci < WME_NUM_AC; aci++)
6424 		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6425 	IEEE80211_UNLOCK(ic);
6426 #endif
6427 
6428 	IWM_LOCK(sc);
6429 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6430 		const struct wmeParams *ac = &tmp[aci];
6431 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6432 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6433 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6434 		ivp->queue_params[aci].edca_txop =
6435 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6436 	}
6437 	ivp->have_wme = TRUE;
6438 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6439 		in = IWM_NODE(vap->iv_bss);
6440 		if (in->in_assoc) {
6441 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6442 				device_printf(sc->sc_dev,
6443 				    "%s: failed to update MAC\n", __func__);
6444 			}
6445 		}
6446 	}
6447 	IWM_UNLOCK(sc);
6448 
6449 	return (0);
6450 #undef IWM_EXP2
6451 }
6452 
6453 static void
6454 iwm_preinit(void *arg)
6455 {
6456 	struct iwm_softc *sc = arg;
6457 	device_t dev = sc->sc_dev;
6458 	struct ieee80211com *ic = &sc->sc_ic;
6459 	int error;
6460 
6461 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6462 	    "->%s\n", __func__);
6463 
6464 	IWM_LOCK(sc);
6465 	if ((error = iwm_start_hw(sc)) != 0) {
6466 		device_printf(dev, "could not initialize hardware\n");
6467 		IWM_UNLOCK(sc);
6468 		goto fail;
6469 	}
6470 
6471 	error = iwm_run_init_ucode(sc, 1);
6472 	iwm_stop_device(sc);
6473 	if (error) {
6474 		IWM_UNLOCK(sc);
6475 		goto fail;
6476 	}
6477 	device_printf(dev,
6478 	    "hw rev 0x%x, fw ver %s, address %s\n",
6479 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6480 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6481 
6482 	/* not all hardware can do 5GHz band */
6483 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6484 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6485 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6486 	IWM_UNLOCK(sc);
6487 
6488 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6489 	    ic->ic_channels);
6490 
6491 	/*
6492 	 * At this point we've committed - if we fail to do setup,
6493 	 * we now also have to tear down the net80211 state.
6494 	 */
6495 	ieee80211_ifattach(ic);
6496 	ic->ic_vap_create = iwm_vap_create;
6497 	ic->ic_vap_delete = iwm_vap_delete;
6498 	ic->ic_raw_xmit = iwm_raw_xmit;
6499 	ic->ic_node_alloc = iwm_node_alloc;
6500 	ic->ic_scan_start = iwm_scan_start;
6501 	ic->ic_scan_end = iwm_scan_end;
6502 	ic->ic_update_mcast = iwm_update_mcast;
6503 	ic->ic_getradiocaps = iwm_init_channel_map;
6504 	ic->ic_set_channel = iwm_set_channel;
6505 	ic->ic_scan_curchan = iwm_scan_curchan;
6506 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6507 	ic->ic_wme.wme_update = iwm_wme_update;
6508 	ic->ic_parent = iwm_parent;
6509 	ic->ic_transmit = iwm_transmit;
6510 	iwm_radiotap_attach(sc);
6511 	if (bootverbose)
6512 		ieee80211_announce(ic);
6513 
6514 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6515 	    "<-%s\n", __func__);
6516 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6517 
6518 	return;
6519 fail:
6520 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6521 	iwm_detach_local(sc, 0);
6522 }
6523 
6524 /*
6525  * Attach the interface to 802.11 radiotap.
6526  */
6527 static void
6528 iwm_radiotap_attach(struct iwm_softc *sc)
6529 {
6530         struct ieee80211com *ic = &sc->sc_ic;
6531 
6532 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6533 	    "->%s begin\n", __func__);
6534         ieee80211_radiotap_attach(ic,
6535             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6536                 IWM_TX_RADIOTAP_PRESENT,
6537             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6538                 IWM_RX_RADIOTAP_PRESENT);
6539 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6540 	    "->%s end\n", __func__);
6541 }
6542 
6543 static struct ieee80211vap *
6544 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6545     enum ieee80211_opmode opmode, int flags,
6546     const uint8_t bssid[IEEE80211_ADDR_LEN],
6547     const uint8_t mac[IEEE80211_ADDR_LEN])
6548 {
6549 	struct iwm_vap *ivp;
6550 	struct ieee80211vap *vap;
6551 
6552 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6553 		return NULL;
6554 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6555 	vap = &ivp->iv_vap;
6556 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6557 	vap->iv_bmissthreshold = 10;            /* override default */
6558 	/* Override with driver methods. */
6559 	ivp->iv_newstate = vap->iv_newstate;
6560 	vap->iv_newstate = iwm_newstate;
6561 
6562 	ivp->id = IWM_DEFAULT_MACID;
6563 	ivp->color = IWM_DEFAULT_COLOR;
6564 
6565 	ivp->have_wme = FALSE;
6566 	ivp->ps_disabled = FALSE;
6567 
6568 	ieee80211_ratectl_init(vap);
6569 	/* Complete setup. */
6570 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6571 	    mac);
6572 	ic->ic_opmode = opmode;
6573 
6574 	return vap;
6575 }
6576 
6577 static void
6578 iwm_vap_delete(struct ieee80211vap *vap)
6579 {
6580 	struct iwm_vap *ivp = IWM_VAP(vap);
6581 
6582 	ieee80211_ratectl_deinit(vap);
6583 	ieee80211_vap_detach(vap);
6584 	kfree(ivp, M_80211_VAP);
6585 }
6586 
6587 static void
6588 iwm_xmit_queue_drain(struct iwm_softc *sc)
6589 {
6590 	struct mbuf *m;
6591 	struct ieee80211_node *ni;
6592 
6593 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6594 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6595 		ieee80211_free_node(ni);
6596 		m_freem(m);
6597 	}
6598 }
6599 
6600 static void
6601 iwm_scan_start(struct ieee80211com *ic)
6602 {
6603 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6604 	struct iwm_softc *sc = ic->ic_softc;
6605 	int error;
6606 
6607 	IWM_LOCK(sc);
6608 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6609 		/* This should not be possible */
6610 		device_printf(sc->sc_dev,
6611 		    "%s: Previous scan not completed yet\n", __func__);
6612 	}
6613 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6614 		error = iwm_umac_scan(sc);
6615 	else
6616 		error = iwm_lmac_scan(sc);
6617 	if (error != 0) {
6618 		device_printf(sc->sc_dev, "could not initiate scan\n");
6619 		IWM_UNLOCK(sc);
6620 		ieee80211_cancel_scan(vap);
6621 	} else {
6622 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6623 		iwm_led_blink_start(sc);
6624 		IWM_UNLOCK(sc);
6625 	}
6626 }
6627 
6628 static void
6629 iwm_scan_end(struct ieee80211com *ic)
6630 {
6631 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6632 	struct iwm_softc *sc = ic->ic_softc;
6633 
6634 	IWM_LOCK(sc);
6635 	iwm_led_blink_stop(sc);
6636 	if (vap->iv_state == IEEE80211_S_RUN)
6637 		iwm_led_enable(sc);
6638 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6639 		/*
6640 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6641 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6642 		 * taskqueue.
6643 		 */
6644 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6645 		iwm_scan_stop_wait(sc);
6646 	}
6647 	IWM_UNLOCK(sc);
6648 
6649 	/*
6650 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6651 	 * This is to make sure that it won't call ieee80211_scan_done
6652 	 * when we have already started the next scan.
6653 	 */
6654 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6655 }
6656 
6657 static void
6658 iwm_update_mcast(struct ieee80211com *ic)
6659 {
6660 }
6661 
6662 static void
6663 iwm_set_channel(struct ieee80211com *ic)
6664 {
6665 }
6666 
6667 static void
6668 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6669 {
6670 }
6671 
6672 static void
6673 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6674 {
6675 }
6676 
6677 void
6678 iwm_init_task(void *arg1)
6679 {
6680 	struct iwm_softc *sc = arg1;
6681 
6682 	IWM_LOCK(sc);
6683 	while (sc->sc_flags & IWM_FLAG_BUSY)
6684 		lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6685 	sc->sc_flags |= IWM_FLAG_BUSY;
6686 	iwm_stop(sc);
6687 	if (sc->sc_ic.ic_nrunning > 0)
6688 		iwm_init(sc);
6689 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6690 	wakeup(&sc->sc_flags);
6691 	IWM_UNLOCK(sc);
6692 }
6693 
6694 static int
6695 iwm_resume(device_t dev)
6696 {
6697 	struct iwm_softc *sc = device_get_softc(dev);
6698 	int do_reinit = 0;
6699 
6700 	/*
6701 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6702 	 * PCI Tx retries from interfering with C3 CPU state.
6703 	 */
6704 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6705 
6706 	if (!sc->sc_attached)
6707 		return 0;
6708 
6709 	iwm_init_task(device_get_softc(dev));
6710 
6711 	IWM_LOCK(sc);
6712 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6713 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6714 		do_reinit = 1;
6715 	}
6716 	IWM_UNLOCK(sc);
6717 
6718 	if (do_reinit)
6719 		ieee80211_resume_all(&sc->sc_ic);
6720 
6721 	return 0;
6722 }
6723 
6724 static int
6725 iwm_suspend(device_t dev)
6726 {
6727 	int do_stop = 0;
6728 	struct iwm_softc *sc = device_get_softc(dev);
6729 
6730 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6731 
6732 	if (!sc->sc_attached)
6733 		return (0);
6734 
6735 	ieee80211_suspend_all(&sc->sc_ic);
6736 
6737 	if (do_stop) {
6738 		IWM_LOCK(sc);
6739 		iwm_stop(sc);
6740 		sc->sc_flags |= IWM_FLAG_SCANNING;
6741 		IWM_UNLOCK(sc);
6742 	}
6743 
6744 	return (0);
6745 }
6746 
6747 static int
6748 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6749 {
6750 	struct iwm_fw_info *fw = &sc->sc_fw;
6751 	device_t dev = sc->sc_dev;
6752 	int i;
6753 
6754 	if (!sc->sc_attached)
6755 		return 0;
6756 	sc->sc_attached = 0;
6757 	if (do_net80211) {
6758 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6759 	}
6760 	iwm_stop_device(sc);
6761 	if (do_net80211) {
6762 		IWM_LOCK(sc);
6763 		iwm_xmit_queue_drain(sc);
6764 		IWM_UNLOCK(sc);
6765 		ieee80211_ifdetach(&sc->sc_ic);
6766 	}
6767 	callout_drain(&sc->sc_led_blink_to);
6768 	callout_drain(&sc->sc_watchdog_to);
6769 
6770 	iwm_phy_db_free(sc->sc_phy_db);
6771 	sc->sc_phy_db = NULL;
6772 
6773 	iwm_free_nvm_data(sc->nvm_data);
6774 
6775 	/* Free descriptor rings */
6776 	iwm_free_rx_ring(sc, &sc->rxq);
6777 	for (i = 0; i < nitems(sc->txq); i++)
6778 		iwm_free_tx_ring(sc, &sc->txq[i]);
6779 
6780 	/* Free firmware */
6781 	if (fw->fw_fp != NULL)
6782 		iwm_fw_info_free(fw);
6783 
6784 	/* Free scheduler */
6785 	iwm_dma_contig_free(&sc->sched_dma);
6786 	iwm_dma_contig_free(&sc->ict_dma);
6787 	iwm_dma_contig_free(&sc->kw_dma);
6788 	iwm_dma_contig_free(&sc->fw_dma);
6789 
6790 	iwm_free_fw_paging(sc);
6791 
6792 	/* Finished with the hardware - detach things */
6793 	iwm_pci_detach(dev);
6794 
6795 	if (sc->sc_notif_wait != NULL) {
6796 		iwm_notification_wait_free(sc->sc_notif_wait);
6797 		sc->sc_notif_wait = NULL;
6798 	}
6799 
6800 	IWM_LOCK_DESTROY(sc);
6801 
6802 	return (0);
6803 }
6804 
6805 static int
6806 iwm_detach(device_t dev)
6807 {
6808 	struct iwm_softc *sc = device_get_softc(dev);
6809 
6810 	return (iwm_detach_local(sc, 1));
6811 }
6812 
6813 static device_method_t iwm_pci_methods[] = {
6814         /* Device interface */
6815         DEVMETHOD(device_probe,         iwm_probe),
6816         DEVMETHOD(device_attach,        iwm_attach),
6817         DEVMETHOD(device_detach,        iwm_detach),
6818         DEVMETHOD(device_suspend,       iwm_suspend),
6819         DEVMETHOD(device_resume,        iwm_resume),
6820 
6821         DEVMETHOD_END
6822 };
6823 
6824 static driver_t iwm_pci_driver = {
6825         "iwm",
6826         iwm_pci_methods,
6827         sizeof (struct iwm_softc)
6828 };
6829 
6830 static devclass_t iwm_devclass;
6831 
6832 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6833 #if !defined(__DragonFly__)
6834 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6835     iwm_devices, nitems(iwm_devices));
6836 #endif
6837 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6838 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6839 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6840