xref: /dragonfly/sys/dev/netif/iwm/if_iwm.c (revision 795e3215)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *				DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *	 changes to remove per-device network interface (DragonFly has not
110  *	 caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *	malloc -> kmalloc	(in particular, changing improper M_NOWAIT
114  *				specifications to M_INTWAIT.  We still don't
115  *				understand why FreeBSD uses M_NOWAIT for
116  *				critical must-not-fail kmalloc()s).
117  *	free -> kfree
118  *	printf -> kprintf
119  *	(bug fix) memset in iwm_reset_rx_ring.
120  *	(debug)   added several kprintf()s on error
121  *
122  *	header file paths (DFly allows localized path specifications).
123  *	minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *	(safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *	packet counters
128  *	msleep -> iwmsleep (handle deadlocks due to dfly interrupt serializer)
129  *	mtx -> lk  (mtx functions -> lockmgr functions)
130  *	callout differences
131  *	taskqueue differences
132  *	MSI differences
133  *	bus_setup_intr() differences
134  *	minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138 
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/mutex.h>
147 #include <sys/module.h>
148 #include <sys/proc.h>
149 #include <sys/rman.h>
150 #include <sys/socket.h>
151 #include <sys/sockio.h>
152 #include <sys/sysctl.h>
153 #include <sys/linker.h>
154 
155 #include <machine/endian.h>
156 
157 #include <bus/pci/pcivar.h>
158 #include <bus/pci/pcireg.h>
159 
160 #include <net/bpf.h>
161 
162 #include <net/if.h>
163 #include <net/if_var.h>
164 #include <net/if_arp.h>
165 #include <net/if_dl.h>
166 #include <net/if_media.h>
167 #include <net/if_types.h>
168 
169 #include <netinet/in.h>
170 #include <netinet/in_systm.h>
171 #include <netinet/if_ether.h>
172 #include <netinet/ip.h>
173 
174 #include <netproto/802_11/ieee80211_var.h>
175 #include <netproto/802_11/ieee80211_regdomain.h>
176 #include <netproto/802_11/ieee80211_ratectl.h>
177 #include <netproto/802_11/ieee80211_radiotap.h>
178 
179 #include "if_iwmreg.h"
180 #include "if_iwmvar.h"
181 #include "if_iwm_debug.h"
182 #include "if_iwm_util.h"
183 #include "if_iwm_binding.h"
184 #include "if_iwm_phy_db.h"
185 #include "if_iwm_mac_ctxt.h"
186 #include "if_iwm_phy_ctxt.h"
187 #include "if_iwm_time_event.h"
188 #include "if_iwm_power.h"
189 #include "if_iwm_scan.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 
193 const uint8_t iwm_nvm_channels[] = {
194 	/* 2.4 GHz */
195 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196 	/* 5 GHz */
197 	36, 40, 44, 48, 52, 56, 60, 64,
198 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
199 	149, 153, 157, 161, 165
200 };
201 #define IWM_NUM_2GHZ_CHANNELS	14
202 
203 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
204     "IWM_NUM_CHANNELS is too small");
205 
206 /*
207  * XXX For now, there's simply a fixed set of rate table entries
208  * that are populated.
209  */
210 const struct iwm_rate {
211 	uint8_t rate;
212 	uint8_t plcp;
213 } iwm_rates[] = {
214 	{   2,	IWM_RATE_1M_PLCP  },
215 	{   4,	IWM_RATE_2M_PLCP  },
216 	{  11,	IWM_RATE_5M_PLCP  },
217 	{  22,	IWM_RATE_11M_PLCP },
218 	{  12,	IWM_RATE_6M_PLCP  },
219 	{  18,	IWM_RATE_9M_PLCP  },
220 	{  24,	IWM_RATE_12M_PLCP },
221 	{  36,	IWM_RATE_18M_PLCP },
222 	{  48,	IWM_RATE_24M_PLCP },
223 	{  72,	IWM_RATE_36M_PLCP },
224 	{  96,	IWM_RATE_48M_PLCP },
225 	{ 108,	IWM_RATE_54M_PLCP },
226 };
227 #define IWM_RIDX_CCK	0
228 #define IWM_RIDX_OFDM	4
229 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
230 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
231 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
232 
233 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
234 static int	iwm_firmware_store_section(struct iwm_softc *,
235                                            enum iwm_ucode_type,
236                                            const uint8_t *, size_t);
237 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
238 static void	iwm_fw_info_free(struct iwm_fw_info *);
239 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
240 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
241 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
242                                      bus_size_t, bus_size_t);
243 static void	iwm_dma_contig_free(struct iwm_dma_info *);
244 static int	iwm_alloc_fwmem(struct iwm_softc *);
245 static void	iwm_free_fwmem(struct iwm_softc *);
246 static int	iwm_alloc_sched(struct iwm_softc *);
247 static void	iwm_free_sched(struct iwm_softc *);
248 static int	iwm_alloc_kw(struct iwm_softc *);
249 static void	iwm_free_kw(struct iwm_softc *);
250 static int	iwm_alloc_ict(struct iwm_softc *);
251 static void	iwm_free_ict(struct iwm_softc *);
252 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253 static void	iwm_disable_rx_dma(struct iwm_softc *);
254 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void	iwm_enable_interrupts(struct iwm_softc *);
261 static void	iwm_restore_interrupts(struct iwm_softc *);
262 static void	iwm_disable_interrupts(struct iwm_softc *);
263 static void	iwm_ict_reset(struct iwm_softc *);
264 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void	iwm_stop_device(struct iwm_softc *);
266 static void	iwm_mvm_nic_config(struct iwm_softc *);
267 static int	iwm_nic_rx_init(struct iwm_softc *);
268 static int	iwm_nic_tx_init(struct iwm_softc *);
269 static int	iwm_nic_init(struct iwm_softc *);
270 static void	iwm_enable_txq(struct iwm_softc *, int, int);
271 static int	iwm_post_alive(struct iwm_softc *);
272 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
273                                    uint16_t, uint8_t *, uint16_t *);
274 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
275 				     uint16_t *);
276 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
277 static void	iwm_add_channel_band(struct iwm_softc *,
278 		    struct ieee80211_channel[], int, int *, int, int,
279 		    const uint8_t[]);
280 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
281 		    struct ieee80211_channel[]);
282 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 			           const uint16_t *, const uint16_t *, uint8_t,
284 				   uint8_t);
285 struct iwm_nvm_section;
286 static int	iwm_parse_nvm_sections(struct iwm_softc *,
287                                        struct iwm_nvm_section *);
288 static int	iwm_nvm_init(struct iwm_softc *);
289 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
290                                         const uint8_t *, uint32_t);
291 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
292 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
293 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
294 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
295 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
296                                               enum iwm_ucode_type);
297 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
298 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
299 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
300 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
301 					    struct iwm_rx_phy_info *);
302 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
303                                       struct iwm_rx_packet *,
304                                       struct iwm_rx_data *);
305 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
306 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
307                                    struct iwm_rx_data *);
308 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
309                                          struct iwm_rx_packet *,
310 				         struct iwm_node *);
311 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
312                                   struct iwm_rx_data *);
313 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
314 #if 0
315 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
316                                  uint16_t);
317 #endif
318 static const struct iwm_rate *
319 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
320 			struct ieee80211_frame *, struct iwm_tx_cmd *);
321 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
322                        struct ieee80211_node *, int);
323 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
324 			     const struct ieee80211_bpf_params *);
325 static void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
326 					     struct iwm_mvm_add_sta_cmd_v5 *);
327 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
328 					        struct iwm_mvm_add_sta_cmd_v6 *,
329                                                 int *);
330 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
331                                        int);
332 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
333 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
334 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
335                                            struct iwm_int_sta *,
336 				           const uint8_t *, uint16_t, uint16_t);
337 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
338 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
339 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
340 static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
341 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
342 static struct ieee80211_node *
343 		iwm_node_alloc(struct ieee80211vap *,
344 		               const uint8_t[IEEE80211_ADDR_LEN]);
345 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
346 static int	iwm_media_change(struct ifnet *);
347 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
348 static void	iwm_endscan_cb(void *, int);
349 static int	iwm_init_hw(struct iwm_softc *);
350 static void	iwm_init(struct iwm_softc *);
351 static void	iwm_start(struct iwm_softc *);
352 static void	iwm_stop(struct iwm_softc *);
353 static void	iwm_watchdog(void *);
354 static void	iwm_parent(struct ieee80211com *);
355 #ifdef IWM_DEBUG
356 static const char *
357 		iwm_desc_lookup(uint32_t);
358 static void	iwm_nic_error(struct iwm_softc *);
359 #endif
360 static void	iwm_notif_intr(struct iwm_softc *);
361 static void	iwm_intr(void *);
362 static int	iwm_attach(device_t);
363 static void	iwm_preinit(void *);
364 static int	iwm_detach_local(struct iwm_softc *sc, int);
365 static void	iwm_init_task(void *);
366 static void	iwm_radiotap_attach(struct iwm_softc *);
367 static struct ieee80211vap *
368 		iwm_vap_create(struct ieee80211com *,
369 		               const char [IFNAMSIZ], int,
370 		               enum ieee80211_opmode, int,
371 		               const uint8_t [IEEE80211_ADDR_LEN],
372 		               const uint8_t [IEEE80211_ADDR_LEN]);
373 static void	iwm_vap_delete(struct ieee80211vap *);
374 static void	iwm_scan_start(struct ieee80211com *);
375 static void	iwm_scan_end(struct ieee80211com *);
376 static void	iwm_update_mcast(struct ieee80211com *);
377 static void	iwm_set_channel(struct ieee80211com *);
378 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
379 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
380 static int	iwm_detach(device_t);
381 
382 #if defined(__DragonFly__)
383 static int	iwm_msi_enable = 1;
384 
385 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
386 
387 /*
388  * This is a hack due to the wlan_serializer deadlocking sleepers.
389  */
390 int iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to);
391 
392 int
393 iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to)
394 {
395 	int error;
396 
397 	if (wlan_is_serialized()) {
398 		wlan_serialize_exit();
399 		kprintf("%s: have to release serializer for sleeping\n",
400 		    __func__);
401 		error = lksleep(chan, lk, flags, wmesg, to);
402 		lockmgr(lk, LK_RELEASE);
403 		wlan_serialize_enter();
404 		lockmgr(lk, LK_EXCLUSIVE);
405 	} else {
406 		error = lksleep(chan, lk, flags, wmesg, to);
407 	}
408 	return error;
409 }
410 
411 #endif
412 
413 /*
414  * Firmware parser.
415  */
416 
417 static int
418 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
419 {
420 	const struct iwm_fw_cscheme_list *l = (const void *)data;
421 
422 	if (dlen < sizeof(*l) ||
423 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
424 		return EINVAL;
425 
426 	/* we don't actually store anything for now, always use s/w crypto */
427 
428 	return 0;
429 }
430 
431 static int
432 iwm_firmware_store_section(struct iwm_softc *sc,
433     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
434 {
435 	struct iwm_fw_sects *fws;
436 	struct iwm_fw_onesect *fwone;
437 
438 	if (type >= IWM_UCODE_TYPE_MAX)
439 		return EINVAL;
440 	if (dlen < sizeof(uint32_t))
441 		return EINVAL;
442 
443 	fws = &sc->sc_fw.fw_sects[type];
444 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
445 		return EINVAL;
446 
447 	fwone = &fws->fw_sect[fws->fw_count];
448 
449 	/* first 32bit are device load offset */
450 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
451 
452 	/* rest is data */
453 	fwone->fws_data = data + sizeof(uint32_t);
454 	fwone->fws_len = dlen - sizeof(uint32_t);
455 
456 	fws->fw_count++;
457 	fws->fw_totlen += fwone->fws_len;
458 
459 	return 0;
460 }
461 
462 struct iwm_tlv_calib_data {
463 	uint32_t ucode_type;
464 	struct iwm_tlv_calib_ctrl calib;
465 } __packed;
466 
467 static int
468 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
469 {
470 	const struct iwm_tlv_calib_data *def_calib = data;
471 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
472 
473 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
474 		device_printf(sc->sc_dev,
475 		    "Wrong ucode_type %u for default "
476 		    "calibration.\n", ucode_type);
477 		return EINVAL;
478 	}
479 
480 	sc->sc_default_calib[ucode_type].flow_trigger =
481 	    def_calib->calib.flow_trigger;
482 	sc->sc_default_calib[ucode_type].event_trigger =
483 	    def_calib->calib.event_trigger;
484 
485 	return 0;
486 }
487 
488 static void
489 iwm_fw_info_free(struct iwm_fw_info *fw)
490 {
491 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
492 	fw->fw_fp = NULL;
493 	/* don't touch fw->fw_status */
494 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
495 }
496 
497 static int
498 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
499 {
500 	struct iwm_fw_info *fw = &sc->sc_fw;
501 	const struct iwm_tlv_ucode_header *uhdr;
502 	struct iwm_ucode_tlv tlv;
503 	enum iwm_ucode_tlv_type tlv_type;
504 	const struct firmware *fwp;
505 	const uint8_t *data;
506 	int error = 0;
507 	size_t len;
508 
509 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
510 	    ucode_type != IWM_UCODE_TYPE_INIT)
511 		return 0;
512 
513 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
514 #if defined(__DragonFly__)
515 		iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
516 #else
517 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
518 #endif
519 	}
520 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
521 
522 	if (fw->fw_fp != NULL)
523 		iwm_fw_info_free(fw);
524 
525 	/*
526 	 * Load firmware into driver memory.
527 	 * fw_fp will be set.
528 	 */
529 	IWM_UNLOCK(sc);
530 	fwp = firmware_get(sc->sc_fwname);
531 	IWM_LOCK(sc);
532 	if (fwp == NULL) {
533 		device_printf(sc->sc_dev,
534 		    "could not read firmware %s (error %d)\n",
535 		    sc->sc_fwname, error);
536 		goto out;
537 	}
538 	fw->fw_fp = fwp;
539 
540 	/*
541 	 * Parse firmware contents
542 	 */
543 
544 	uhdr = (const void *)fw->fw_fp->data;
545 	if (*(const uint32_t *)fw->fw_fp->data != 0
546 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
547 		device_printf(sc->sc_dev, "invalid firmware %s\n",
548 		    sc->sc_fwname);
549 		error = EINVAL;
550 		goto out;
551 	}
552 
553 	sc->sc_fwver = le32toh(uhdr->ver);
554 	data = uhdr->data;
555 	len = fw->fw_fp->datasize - sizeof(*uhdr);
556 
557 	while (len >= sizeof(tlv)) {
558 		size_t tlv_len;
559 		const void *tlv_data;
560 
561 		memcpy(&tlv, data, sizeof(tlv));
562 		tlv_len = le32toh(tlv.length);
563 		tlv_type = le32toh(tlv.type);
564 
565 		len -= sizeof(tlv);
566 		data += sizeof(tlv);
567 		tlv_data = data;
568 
569 		if (len < tlv_len) {
570 			device_printf(sc->sc_dev,
571 			    "firmware too short: %zu bytes\n",
572 			    len);
573 			error = EINVAL;
574 			goto parse_out;
575 		}
576 
577 		switch ((int)tlv_type) {
578 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
579 			if (tlv_len < sizeof(uint32_t)) {
580 				device_printf(sc->sc_dev,
581 				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
582 				    __func__,
583 				    (int) tlv_len);
584 				error = EINVAL;
585 				goto parse_out;
586 			}
587 			sc->sc_capa_max_probe_len
588 			    = le32toh(*(const uint32_t *)tlv_data);
589 			/* limit it to something sensible */
590 			if (sc->sc_capa_max_probe_len > (1<<16)) {
591 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
592 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
593 				    "ridiculous\n", __func__);
594 				error = EINVAL;
595 				goto parse_out;
596 			}
597 			break;
598 		case IWM_UCODE_TLV_PAN:
599 			if (tlv_len) {
600 				device_printf(sc->sc_dev,
601 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
602 				    __func__,
603 				    (int) tlv_len);
604 				error = EINVAL;
605 				goto parse_out;
606 			}
607 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
608 			break;
609 		case IWM_UCODE_TLV_FLAGS:
610 			if (tlv_len < sizeof(uint32_t)) {
611 				device_printf(sc->sc_dev,
612 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
613 				    __func__,
614 				    (int) tlv_len);
615 				error = EINVAL;
616 				goto parse_out;
617 			}
618 			/*
619 			 * Apparently there can be many flags, but Linux driver
620 			 * parses only the first one, and so do we.
621 			 *
622 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
623 			 * Intentional or a bug?  Observations from
624 			 * current firmware file:
625 			 *  1) TLV_PAN is parsed first
626 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
627 			 * ==> this resets TLV_PAN to itself... hnnnk
628 			 */
629 			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
630 			break;
631 		case IWM_UCODE_TLV_CSCHEME:
632 			if ((error = iwm_store_cscheme(sc,
633 			    tlv_data, tlv_len)) != 0) {
634 				device_printf(sc->sc_dev,
635 				    "%s: iwm_store_cscheme(): returned %d\n",
636 				    __func__,
637 				    error);
638 				goto parse_out;
639 			}
640 			break;
641 		case IWM_UCODE_TLV_NUM_OF_CPU:
642 			if (tlv_len != sizeof(uint32_t)) {
643 				device_printf(sc->sc_dev,
644 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
645 				    __func__,
646 				    (int) tlv_len);
647 				error = EINVAL;
648 				goto parse_out;
649 			}
650 			if (le32toh(*(const uint32_t*)tlv_data) != 1) {
651 				device_printf(sc->sc_dev,
652 				    "%s: driver supports "
653 				    "only TLV_NUM_OF_CPU == 1",
654 				    __func__);
655 				error = EINVAL;
656 				goto parse_out;
657 			}
658 			break;
659 		case IWM_UCODE_TLV_SEC_RT:
660 			if ((error = iwm_firmware_store_section(sc,
661 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
662 				device_printf(sc->sc_dev,
663 				    "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
664 				    __func__,
665 				    error);
666 				goto parse_out;
667 			}
668 			break;
669 		case IWM_UCODE_TLV_SEC_INIT:
670 			if ((error = iwm_firmware_store_section(sc,
671 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
672 				device_printf(sc->sc_dev,
673 				    "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
674 				    __func__,
675 				    error);
676 				goto parse_out;
677 			}
678 			break;
679 		case IWM_UCODE_TLV_SEC_WOWLAN:
680 			if ((error = iwm_firmware_store_section(sc,
681 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
682 				device_printf(sc->sc_dev,
683 				    "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
684 				    __func__,
685 				    error);
686 				goto parse_out;
687 			}
688 			break;
689 		case IWM_UCODE_TLV_DEF_CALIB:
690 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
691 				device_printf(sc->sc_dev,
692 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
693 				    __func__,
694 				    (int) tlv_len,
695 				    (int) sizeof(struct iwm_tlv_calib_data));
696 				error = EINVAL;
697 				goto parse_out;
698 			}
699 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
700 				device_printf(sc->sc_dev,
701 				    "%s: iwm_set_default_calib() failed: %d\n",
702 				    __func__,
703 				    error);
704 				goto parse_out;
705 			}
706 			break;
707 		case IWM_UCODE_TLV_PHY_SKU:
708 			if (tlv_len != sizeof(uint32_t)) {
709 				error = EINVAL;
710 				device_printf(sc->sc_dev,
711 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
712 				    __func__,
713 				    (int) tlv_len);
714 				goto parse_out;
715 			}
716 			sc->sc_fw_phy_config =
717 			    le32toh(*(const uint32_t *)tlv_data);
718 			break;
719 
720 		case IWM_UCODE_TLV_API_CHANGES_SET:
721 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
722 			/* ignore, not used by current driver */
723 			break;
724 
725 		default:
726 			device_printf(sc->sc_dev,
727 			    "%s: unknown firmware section %d, abort\n",
728 			    __func__, tlv_type);
729 			error = EINVAL;
730 			goto parse_out;
731 		}
732 
733 		len -= roundup(tlv_len, 4);
734 		data += roundup(tlv_len, 4);
735 	}
736 
737 	KASSERT(error == 0, ("unhandled error"));
738 
739  parse_out:
740 	if (error) {
741 		device_printf(sc->sc_dev, "firmware parse error %d, "
742 		    "section type %d\n", error, tlv_type);
743 	}
744 
745 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
746 		device_printf(sc->sc_dev,
747 		    "device uses unsupported power ops\n");
748 		error = ENOTSUP;
749 	}
750 
751  out:
752 	if (error) {
753 		fw->fw_status = IWM_FW_STATUS_NONE;
754 		if (fw->fw_fp != NULL)
755 			iwm_fw_info_free(fw);
756 	} else
757 		fw->fw_status = IWM_FW_STATUS_DONE;
758 	wakeup(&sc->sc_fw);
759 
760 	return error;
761 }
762 
763 /*
764  * DMA resource routines
765  */
766 
767 static void
768 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
769 {
770         if (error != 0)
771                 return;
772 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
773 	*(bus_addr_t *)arg = segs[0].ds_addr;
774 }
775 
776 static int
777 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
778     bus_size_t size, bus_size_t alignment)
779 {
780 	int error;
781 
782 	dma->tag = NULL;
783 	dma->size = size;
784 
785 #if defined(__DragonFly__)
786 	error = bus_dma_tag_create(tag, alignment,
787 				   0,
788 				   BUS_SPACE_MAXADDR_32BIT,
789 				   BUS_SPACE_MAXADDR,
790 				   NULL, NULL,
791 				   size, 1, size,
792 				   BUS_DMA_NOWAIT, &dma->tag);
793 #else
794 	error = bus_dma_tag_create(tag, alignment,
795             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
796             1, size, 0, NULL, NULL, &dma->tag);
797 #endif
798         if (error != 0)
799                 goto fail;
800 
801         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
802             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
803         if (error != 0)
804                 goto fail;
805 
806         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
807             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
808         if (error != 0)
809                 goto fail;
810 
811 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
812 
813 	return 0;
814 
815 fail:
816 	iwm_dma_contig_free(dma);
817 
818 	return error;
819 }
820 
821 static void
822 iwm_dma_contig_free(struct iwm_dma_info *dma)
823 {
824 	if (dma->map != NULL) {
825 		if (dma->vaddr != NULL) {
826 			bus_dmamap_sync(dma->tag, dma->map,
827 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
828 			bus_dmamap_unload(dma->tag, dma->map);
829 			bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
830 			dma->vaddr = NULL;
831 		}
832 		bus_dmamap_destroy(dma->tag, dma->map);
833 		dma->map = NULL;
834 	}
835 	if (dma->tag != NULL) {
836 		bus_dma_tag_destroy(dma->tag);
837 		dma->tag = NULL;
838 	}
839 
840 }
841 
842 /* fwmem is used to load firmware onto the card */
843 static int
844 iwm_alloc_fwmem(struct iwm_softc *sc)
845 {
846 	/* Must be aligned on a 16-byte boundary. */
847 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
848 	    sc->sc_fwdmasegsz, 16);
849 }
850 
851 static void
852 iwm_free_fwmem(struct iwm_softc *sc)
853 {
854 	iwm_dma_contig_free(&sc->fw_dma);
855 }
856 
857 /* tx scheduler rings.  not used? */
858 static int
859 iwm_alloc_sched(struct iwm_softc *sc)
860 {
861 	int rv;
862 
863 	/* TX scheduler rings must be aligned on a 1KB boundary. */
864 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
865 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
866 	return rv;
867 }
868 
869 static void
870 iwm_free_sched(struct iwm_softc *sc)
871 {
872 	iwm_dma_contig_free(&sc->sched_dma);
873 }
874 
875 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
876 static int
877 iwm_alloc_kw(struct iwm_softc *sc)
878 {
879 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
880 }
881 
882 static void
883 iwm_free_kw(struct iwm_softc *sc)
884 {
885 	iwm_dma_contig_free(&sc->kw_dma);
886 }
887 
888 /* interrupt cause table */
889 static int
890 iwm_alloc_ict(struct iwm_softc *sc)
891 {
892 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
893 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
894 }
895 
896 static void
897 iwm_free_ict(struct iwm_softc *sc)
898 {
899 	iwm_dma_contig_free(&sc->ict_dma);
900 }
901 
902 static int
903 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
904 {
905 	bus_size_t size;
906 	int i, error;
907 
908 	ring->cur = 0;
909 
910 	/* Allocate RX descriptors (256-byte aligned). */
911 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
912 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
913 	if (error != 0) {
914 		device_printf(sc->sc_dev,
915 		    "could not allocate RX ring DMA memory\n");
916 		goto fail;
917 	}
918 	ring->desc = ring->desc_dma.vaddr;
919 
920 	/* Allocate RX status area (16-byte aligned). */
921 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
922 	    sizeof(*ring->stat), 16);
923 	if (error != 0) {
924 		device_printf(sc->sc_dev,
925 		    "could not allocate RX status DMA memory\n");
926 		goto fail;
927 	}
928 	ring->stat = ring->stat_dma.vaddr;
929 
930         /* Create RX buffer DMA tag. */
931 #if defined(__DragonFly__)
932         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
933 				   0,
934 				   BUS_SPACE_MAXADDR_32BIT,
935 				   BUS_SPACE_MAXADDR,
936 				   NULL, NULL,
937 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
938 				   BUS_DMA_NOWAIT, &ring->data_dmat);
939 #else
940         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
941             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
942             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
943 #endif
944         if (error != 0) {
945                 device_printf(sc->sc_dev,
946                     "%s: could not create RX buf DMA tag, error %d\n",
947                     __func__, error);
948                 goto fail;
949         }
950 
951 	/*
952 	 * Allocate and map RX buffers.
953 	 */
954 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
955 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
956 			goto fail;
957 		}
958 	}
959 	return 0;
960 
961 fail:	iwm_free_rx_ring(sc, ring);
962 	return error;
963 }
964 
965 static void
966 iwm_disable_rx_dma(struct iwm_softc *sc)
967 {
968 	/* XXX conditional nic locks are stupid */
969 	/* XXX print out if we can't lock the NIC? */
970 	if (iwm_nic_lock(sc)) {
971 		/* XXX handle if RX stop doesn't finish? */
972 		(void) iwm_pcie_rx_stop(sc);
973 		iwm_nic_unlock(sc);
974 	}
975 }
976 
977 static void
978 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
979 {
980 	/* Reset the ring state */
981 	ring->cur = 0;
982 
983 	/*
984 	 * The hw rx ring index in shared memory must also be cleared,
985 	 * otherwise the discrepancy can cause reprocessing chaos.
986 	 */
987 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
988 }
989 
990 static void
991 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
992 {
993 	int i;
994 
995 	iwm_dma_contig_free(&ring->desc_dma);
996 	iwm_dma_contig_free(&ring->stat_dma);
997 
998 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
999 		struct iwm_rx_data *data = &ring->data[i];
1000 
1001 		if (data->m != NULL) {
1002 			bus_dmamap_sync(ring->data_dmat, data->map,
1003 			    BUS_DMASYNC_POSTREAD);
1004 			bus_dmamap_unload(ring->data_dmat, data->map);
1005 			m_freem(data->m);
1006 			data->m = NULL;
1007 		}
1008 		if (data->map != NULL) {
1009 			bus_dmamap_destroy(ring->data_dmat, data->map);
1010 			data->map = NULL;
1011 		}
1012 	}
1013 	if (ring->data_dmat != NULL) {
1014 		bus_dma_tag_destroy(ring->data_dmat);
1015 		ring->data_dmat = NULL;
1016 	}
1017 }
1018 
1019 static int
1020 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1021 {
1022 	bus_addr_t paddr;
1023 	bus_size_t size;
1024 	int i, error;
1025 
1026 	ring->qid = qid;
1027 	ring->queued = 0;
1028 	ring->cur = 0;
1029 
1030 	/* Allocate TX descriptors (256-byte aligned). */
1031 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1032 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1033 	if (error != 0) {
1034 		device_printf(sc->sc_dev,
1035 		    "could not allocate TX ring DMA memory\n");
1036 		goto fail;
1037 	}
1038 	ring->desc = ring->desc_dma.vaddr;
1039 
1040 	/*
1041 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1042 	 * to allocate commands space for other rings.
1043 	 */
1044 	if (qid > IWM_MVM_CMD_QUEUE)
1045 		return 0;
1046 
1047 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1048 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1049 	if (error != 0) {
1050 		device_printf(sc->sc_dev,
1051 		    "could not allocate TX cmd DMA memory\n");
1052 		goto fail;
1053 	}
1054 	ring->cmd = ring->cmd_dma.vaddr;
1055 
1056 #if defined(__DragonFly__)
1057 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1058 				   0,
1059 				   BUS_SPACE_MAXADDR_32BIT,
1060 				   BUS_SPACE_MAXADDR,
1061 				   NULL, NULL,
1062 				   MCLBYTES, IWM_MAX_SCATTER - 2, MCLBYTES,
1063 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1064 #else
1065 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1066 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1067             IWM_MAX_SCATTER - 2, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
1068 #endif
1069 	if (error != 0) {
1070 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1071 		goto fail;
1072 	}
1073 
1074 	paddr = ring->cmd_dma.paddr;
1075 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1076 		struct iwm_tx_data *data = &ring->data[i];
1077 
1078 		data->cmd_paddr = paddr;
1079 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1080 		    + offsetof(struct iwm_tx_cmd, scratch);
1081 		paddr += sizeof(struct iwm_device_cmd);
1082 
1083 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1084 		if (error != 0) {
1085 			device_printf(sc->sc_dev,
1086 			    "could not create TX buf DMA map\n");
1087 			goto fail;
1088 		}
1089 	}
1090 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1091 	    ("invalid physical address"));
1092 	return 0;
1093 
1094 fail:	iwm_free_tx_ring(sc, ring);
1095 	return error;
1096 }
1097 
1098 static void
1099 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1100 {
1101 	int i;
1102 
1103 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1104 		struct iwm_tx_data *data = &ring->data[i];
1105 
1106 		if (data->m != NULL) {
1107 			bus_dmamap_sync(ring->data_dmat, data->map,
1108 			    BUS_DMASYNC_POSTWRITE);
1109 			bus_dmamap_unload(ring->data_dmat, data->map);
1110 			m_freem(data->m);
1111 			data->m = NULL;
1112 		}
1113 	}
1114 	/* Clear TX descriptors. */
1115 	memset(ring->desc, 0, ring->desc_dma.size);
1116 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1117 	    BUS_DMASYNC_PREWRITE);
1118 	sc->qfullmsk &= ~(1 << ring->qid);
1119 	ring->queued = 0;
1120 	ring->cur = 0;
1121 }
1122 
1123 static void
1124 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1125 {
1126 	int i;
1127 
1128 	iwm_dma_contig_free(&ring->desc_dma);
1129 	iwm_dma_contig_free(&ring->cmd_dma);
1130 
1131 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1132 		struct iwm_tx_data *data = &ring->data[i];
1133 
1134 		if (data->m != NULL) {
1135 			bus_dmamap_sync(ring->data_dmat, data->map,
1136 			    BUS_DMASYNC_POSTWRITE);
1137 			bus_dmamap_unload(ring->data_dmat, data->map);
1138 			m_freem(data->m);
1139 			data->m = NULL;
1140 		}
1141 		if (data->map != NULL) {
1142 			bus_dmamap_destroy(ring->data_dmat, data->map);
1143 			data->map = NULL;
1144 		}
1145 	}
1146 	if (ring->data_dmat != NULL) {
1147 		bus_dma_tag_destroy(ring->data_dmat);
1148 		ring->data_dmat = NULL;
1149 	}
1150 }
1151 
1152 /*
1153  * High-level hardware frobbing routines
1154  */
1155 
1156 static void
1157 iwm_enable_interrupts(struct iwm_softc *sc)
1158 {
1159 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1160 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1161 }
1162 
1163 static void
1164 iwm_restore_interrupts(struct iwm_softc *sc)
1165 {
1166 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1167 }
1168 
1169 static void
1170 iwm_disable_interrupts(struct iwm_softc *sc)
1171 {
1172 	/* disable interrupts */
1173 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1174 
1175 	/* acknowledge all interrupts */
1176 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1177 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1178 }
1179 
1180 static void
1181 iwm_ict_reset(struct iwm_softc *sc)
1182 {
1183 	iwm_disable_interrupts(sc);
1184 
1185 	/* Reset ICT table. */
1186 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1187 	sc->ict_cur = 0;
1188 
1189 	/* Set physical address of ICT table (4KB aligned). */
1190 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1191 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1192 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1193 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1194 
1195 	/* Switch to ICT interrupt mode in driver. */
1196 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1197 
1198 	/* Re-enable interrupts. */
1199 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1200 	iwm_enable_interrupts(sc);
1201 }
1202 
1203 /*
1204  * Since this .. hard-resets things, it's time to actually
1205  * mark the first vap (if any) as having no mac context.
1206  * It's annoying, but since the driver is potentially being
1207  * stop/start'ed whilst active (thanks openbsd port!) we
1208  * have to correctly track this.
1209  */
1210 static void
1211 iwm_stop_device(struct iwm_softc *sc)
1212 {
1213 	struct ieee80211com *ic = &sc->sc_ic;
1214 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1215 	int chnl, ntries;
1216 	int qid;
1217 
1218 	/* tell the device to stop sending interrupts */
1219 	iwm_disable_interrupts(sc);
1220 
1221 	/*
1222 	 * FreeBSD-local: mark the first vap as not-uploaded,
1223 	 * so the next transition through auth/assoc
1224 	 * will correctly populate the MAC context.
1225 	 */
1226 	if (vap) {
1227 		struct iwm_vap *iv = IWM_VAP(vap);
1228 		iv->is_uploaded = 0;
1229 	}
1230 
1231 	/* device going down, Stop using ICT table */
1232 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1233 
1234 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1235 
1236 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1237 
1238 	/* Stop all DMA channels. */
1239 	if (iwm_nic_lock(sc)) {
1240 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1241 			IWM_WRITE(sc,
1242 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1243 			for (ntries = 0; ntries < 200; ntries++) {
1244 				uint32_t r;
1245 
1246 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1247 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1248 				    chnl))
1249 					break;
1250 				DELAY(20);
1251 			}
1252 		}
1253 		iwm_nic_unlock(sc);
1254 	}
1255 	iwm_disable_rx_dma(sc);
1256 
1257 	/* Stop RX ring. */
1258 	iwm_reset_rx_ring(sc, &sc->rxq);
1259 
1260 	/* Reset all TX rings. */
1261 	for (qid = 0; qid < nitems(sc->txq); qid++)
1262 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1263 
1264 	/*
1265 	 * Power-down device's busmaster DMA clocks
1266 	 */
1267 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1268 	DELAY(5);
1269 
1270 	/* Make sure (redundant) we've released our request to stay awake */
1271 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1272 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1273 
1274 	/* Stop the device, and put it in low power state */
1275 	iwm_apm_stop(sc);
1276 
1277 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1278 	 * Clean again the interrupt here
1279 	 */
1280 	iwm_disable_interrupts(sc);
1281 	/* stop and reset the on-board processor */
1282 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1283 
1284 	/*
1285 	 * Even if we stop the HW, we still want the RF kill
1286 	 * interrupt
1287 	 */
1288 	iwm_enable_rfkill_int(sc);
1289 	iwm_check_rfkill(sc);
1290 }
1291 
1292 static void
1293 iwm_mvm_nic_config(struct iwm_softc *sc)
1294 {
1295 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1296 	uint32_t reg_val = 0;
1297 
1298 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1299 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1300 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1301 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1302 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1303 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1304 
1305 	/* SKU control */
1306 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1307 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1308 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1309 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1310 
1311 	/* radio configuration */
1312 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1313 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1314 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1315 
1316 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1317 
1318 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1319 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1320 	    radio_cfg_step, radio_cfg_dash);
1321 
1322 	/*
1323 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1324 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1325 	 * to lose ownership and not being able to obtain it back.
1326 	 */
1327 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1328 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1329 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1330 }
1331 
1332 static int
1333 iwm_nic_rx_init(struct iwm_softc *sc)
1334 {
1335 	if (!iwm_nic_lock(sc))
1336 		return EBUSY;
1337 
1338 	/*
1339 	 * Initialize RX ring.  This is from the iwn driver.
1340 	 */
1341 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1342 
1343 	/* stop DMA */
1344 	iwm_disable_rx_dma(sc);
1345 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1346 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1347 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1348 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1349 
1350 	/* Set physical address of RX ring (256-byte aligned). */
1351 	IWM_WRITE(sc,
1352 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1353 
1354 	/* Set physical address of RX status (16-byte aligned). */
1355 	IWM_WRITE(sc,
1356 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1357 
1358 #if defined(__DragonFly__)
1359 	/* Force serialization (probably not needed but don't trust the HW) */
1360 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1361 #endif
1362 
1363 	/* Enable RX. */
1364 	/*
1365 	 * Note: Linux driver also sets this:
1366 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1367 	 *
1368 	 * It causes weird behavior.  YMMV.
1369 	 */
1370 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1371 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1372 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1373 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1374 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1375 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1376 
1377 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1378 
1379 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1380 	if (sc->host_interrupt_operation_mode)
1381 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1382 
1383 	/*
1384 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1385 	 *
1386 	 * This value should initially be 0 (before preparing any
1387 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1388 	 */
1389 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1390 
1391 	iwm_nic_unlock(sc);
1392 
1393 	return 0;
1394 }
1395 
1396 static int
1397 iwm_nic_tx_init(struct iwm_softc *sc)
1398 {
1399 	int qid;
1400 
1401 	if (!iwm_nic_lock(sc))
1402 		return EBUSY;
1403 
1404 	/* Deactivate TX scheduler. */
1405 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1406 
1407 	/* Set physical address of "keep warm" page (16-byte aligned). */
1408 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1409 
1410 	/* Initialize TX rings. */
1411 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1412 		struct iwm_tx_ring *txq = &sc->txq[qid];
1413 
1414 		/* Set physical address of TX ring (256-byte aligned). */
1415 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1416 		    txq->desc_dma.paddr >> 8);
1417 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1418 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1419 		    __func__,
1420 		    qid, txq->desc,
1421 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1422 	}
1423 	iwm_nic_unlock(sc);
1424 
1425 	return 0;
1426 }
1427 
1428 static int
1429 iwm_nic_init(struct iwm_softc *sc)
1430 {
1431 	int error;
1432 
1433 	iwm_apm_init(sc);
1434 	iwm_set_pwr(sc);
1435 
1436 	iwm_mvm_nic_config(sc);
1437 
1438 	if ((error = iwm_nic_rx_init(sc)) != 0)
1439 		return error;
1440 
1441 	/*
1442 	 * Ditto for TX, from iwn
1443 	 */
1444 	if ((error = iwm_nic_tx_init(sc)) != 0)
1445 		return error;
1446 
1447 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1448 	    "%s: shadow registers enabled\n", __func__);
1449 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1450 
1451 	return 0;
1452 }
1453 
1454 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1455 	IWM_MVM_TX_FIFO_VO,
1456 	IWM_MVM_TX_FIFO_VI,
1457 	IWM_MVM_TX_FIFO_BE,
1458 	IWM_MVM_TX_FIFO_BK,
1459 };
1460 
1461 static void
1462 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1463 {
1464 	if (!iwm_nic_lock(sc)) {
1465 		device_printf(sc->sc_dev,
1466 		    "%s: cannot enable txq %d\n",
1467 		    __func__,
1468 		    qid);
1469 		return; /* XXX return EBUSY */
1470 	}
1471 
1472 	/* unactivate before configuration */
1473 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1474 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1475 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1476 
1477 	if (qid != IWM_MVM_CMD_QUEUE) {
1478 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1479 	}
1480 
1481 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1482 
1483 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1484 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1485 
1486 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1487 	/* Set scheduler window size and frame limit. */
1488 	iwm_write_mem32(sc,
1489 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1490 	    sizeof(uint32_t),
1491 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1492 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1493 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1494 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1495 
1496 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1497 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1498 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1499 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1500 	    IWM_SCD_QUEUE_STTS_REG_MSK);
1501 
1502 	iwm_nic_unlock(sc);
1503 
1504 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1505 	    "%s: enabled txq %d FIFO %d\n",
1506 	    __func__, qid, fifo);
1507 }
1508 
1509 static int
1510 iwm_post_alive(struct iwm_softc *sc)
1511 {
1512 	int nwords;
1513 	int error, chnl;
1514 
1515 	if (!iwm_nic_lock(sc))
1516 		return EBUSY;
1517 
1518 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1519 		device_printf(sc->sc_dev,
1520 		    "%s: sched addr mismatch",
1521 		    __func__);
1522 		error = EINVAL;
1523 		goto out;
1524 	}
1525 
1526 	iwm_ict_reset(sc);
1527 
1528 	/* Clear TX scheduler state in SRAM. */
1529 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1530 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1531 	    / sizeof(uint32_t);
1532 	error = iwm_write_mem(sc,
1533 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1534 	    NULL, nwords);
1535 	if (error)
1536 		goto out;
1537 
1538 	/* Set physical address of TX scheduler rings (1KB aligned). */
1539 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1540 
1541 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1542 
1543 	/* enable command channel */
1544 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1545 
1546 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1547 
1548 	/* Enable DMA channels. */
1549 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1550 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1551 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1552 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1553 	}
1554 
1555 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1556 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1557 
1558 	/* Enable L1-Active */
1559 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1560 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1561 
1562  out:
1563 	iwm_nic_unlock(sc);
1564 	return error;
1565 }
1566 
1567 /*
1568  * NVM read access and content parsing.  We do not support
1569  * external NVM or writing NVM.
1570  * iwlwifi/mvm/nvm.c
1571  */
1572 
1573 /* list of NVM sections we are allowed/need to read */
1574 const int nvm_to_read[] = {
1575 	IWM_NVM_SECTION_TYPE_HW,
1576 	IWM_NVM_SECTION_TYPE_SW,
1577 	IWM_NVM_SECTION_TYPE_CALIBRATION,
1578 	IWM_NVM_SECTION_TYPE_PRODUCTION,
1579 };
1580 
1581 /* Default NVM size to read */
1582 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1583 #define IWM_MAX_NVM_SECTION_SIZE 7000
1584 
1585 #define IWM_NVM_WRITE_OPCODE 1
1586 #define IWM_NVM_READ_OPCODE 0
1587 
1588 static int
1589 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1590 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1591 {
1592 	offset = 0;
1593 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1594 		.offset = htole16(offset),
1595 		.length = htole16(length),
1596 		.type = htole16(section),
1597 		.op_code = IWM_NVM_READ_OPCODE,
1598 	};
1599 	struct iwm_nvm_access_resp *nvm_resp;
1600 	struct iwm_rx_packet *pkt;
1601 	struct iwm_host_cmd cmd = {
1602 		.id = IWM_NVM_ACCESS_CMD,
1603 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1604 		    IWM_CMD_SEND_IN_RFKILL,
1605 		.data = { &nvm_access_cmd, },
1606 	};
1607 	int ret, bytes_read, offset_read;
1608 	uint8_t *resp_data;
1609 
1610 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1611 
1612 	ret = iwm_send_cmd(sc, &cmd);
1613 	if (ret)
1614 		return ret;
1615 
1616 	pkt = cmd.resp_pkt;
1617 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1618 		device_printf(sc->sc_dev,
1619 		    "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1620 		    __func__, pkt->hdr.flags);
1621 		ret = EIO;
1622 		goto exit;
1623 	}
1624 
1625 	/* Extract NVM response */
1626 	nvm_resp = (void *)pkt->data;
1627 
1628 	ret = le16toh(nvm_resp->status);
1629 	bytes_read = le16toh(nvm_resp->length);
1630 	offset_read = le16toh(nvm_resp->offset);
1631 	resp_data = nvm_resp->data;
1632 	if (ret) {
1633 		device_printf(sc->sc_dev,
1634 		    "%s: NVM access command failed with status %d\n",
1635 		    __func__, ret);
1636 		ret = EINVAL;
1637 		goto exit;
1638 	}
1639 
1640 	if (offset_read != offset) {
1641 		device_printf(sc->sc_dev,
1642 		    "%s: NVM ACCESS response with invalid offset %d\n",
1643 		    __func__, offset_read);
1644 		ret = EINVAL;
1645 		goto exit;
1646 	}
1647 
1648 	memcpy(data + offset, resp_data, bytes_read);
1649 	*len = bytes_read;
1650 
1651  exit:
1652 	iwm_free_resp(sc, &cmd);
1653 	return ret;
1654 }
1655 
1656 /*
1657  * Reads an NVM section completely.
1658  * NICs prior to 7000 family doesn't have a real NVM, but just read
1659  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1660  * by uCode, we need to manually check in this case that we don't
1661  * overflow and try to read more than the EEPROM size.
1662  * For 7000 family NICs, we supply the maximal size we can read, and
1663  * the uCode fills the response with as much data as we can,
1664  * without overflowing, so no check is needed.
1665  */
1666 static int
1667 iwm_nvm_read_section(struct iwm_softc *sc,
1668 	uint16_t section, uint8_t *data, uint16_t *len)
1669 {
1670 	uint16_t length, seglen;
1671 	int error;
1672 
1673 	/* Set nvm section read length */
1674 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1675 	*len = 0;
1676 
1677 	/* Read the NVM until exhausted (reading less than requested) */
1678 	while (seglen == length) {
1679 		error = iwm_nvm_read_chunk(sc,
1680 		    section, *len, length, data, &seglen);
1681 		if (error) {
1682 			device_printf(sc->sc_dev,
1683 			    "Cannot read NVM from section "
1684 			    "%d offset %d, length %d\n",
1685 			    section, *len, length);
1686 			return error;
1687 		}
1688 		*len += seglen;
1689 	}
1690 
1691 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1692 	    "NVM section %d read completed\n", section);
1693 	return 0;
1694 }
1695 
1696 /*
1697  * BEGIN IWM_NVM_PARSE
1698  */
1699 
1700 /* NVM offsets (in words) definitions */
1701 enum wkp_nvm_offsets {
1702 	/* NVM HW-Section offset (in words) definitions */
1703 	IWM_HW_ADDR = 0x15,
1704 
1705 /* NVM SW-Section offset (in words) definitions */
1706 	IWM_NVM_SW_SECTION = 0x1C0,
1707 	IWM_NVM_VERSION = 0,
1708 	IWM_RADIO_CFG = 1,
1709 	IWM_SKU = 2,
1710 	IWM_N_HW_ADDRS = 3,
1711 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1712 
1713 /* NVM calibration section offset (in words) definitions */
1714 	IWM_NVM_CALIB_SECTION = 0x2B8,
1715 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1716 };
1717 
1718 /* SKU Capabilities (actual values from NVM definition) */
1719 enum nvm_sku_bits {
1720 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1721 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1722 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1723 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1724 };
1725 
1726 /* radio config bits (actual values from NVM definition) */
1727 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1728 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1729 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1730 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1731 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1732 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1733 
1734 #define DEFAULT_MAX_TX_POWER 16
1735 
1736 /**
1737  * enum iwm_nvm_channel_flags - channel flags in NVM
1738  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1739  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1740  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1741  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1742  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1743  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1744  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1745  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1746  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1747  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1748  */
1749 enum iwm_nvm_channel_flags {
1750 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1751 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1752 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1753 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1754 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1755 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1756 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1757 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1758 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1759 };
1760 
1761 /*
1762  * Translate EEPROM flags to net80211.
1763  */
1764 static uint32_t
1765 iwm_eeprom_channel_flags(uint16_t ch_flags)
1766 {
1767 	uint32_t nflags;
1768 
1769 	nflags = 0;
1770 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1771 		nflags |= IEEE80211_CHAN_PASSIVE;
1772 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1773 		nflags |= IEEE80211_CHAN_NOADHOC;
1774 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1775 		nflags |= IEEE80211_CHAN_DFS;
1776 		/* Just in case. */
1777 		nflags |= IEEE80211_CHAN_NOADHOC;
1778 	}
1779 
1780 	return (nflags);
1781 }
1782 
1783 static void
1784 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1785     int maxchans, int *nchans, int ch_idx, int ch_num, const uint8_t bands[])
1786 {
1787 	const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1788 	uint32_t nflags;
1789 	uint16_t ch_flags;
1790 	uint8_t ieee;
1791 	int error;
1792 
1793 	for (; ch_idx < ch_num; ch_idx++) {
1794 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1795 		ieee = iwm_nvm_channels[ch_idx];
1796 
1797 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1798 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1799 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1800 			    ieee, ch_flags,
1801 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1802 			    "5.2" : "2.4");
1803 			continue;
1804 		}
1805 
1806 		nflags = iwm_eeprom_channel_flags(ch_flags);
1807 		error = ieee80211_add_channel(chans, maxchans, nchans,
1808 		    ieee, 0, 0, nflags, bands);
1809 		if (error != 0)
1810 			break;
1811 
1812 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1813 		    "Ch. %d Flags %x [%sGHz] - Added\n",
1814 		    ieee, ch_flags,
1815 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1816 		    "5.2" : "2.4");
1817 	}
1818 }
1819 
1820 static void
1821 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1822     struct ieee80211_channel chans[])
1823 {
1824 	struct iwm_softc *sc = ic->ic_softc;
1825 	struct iwm_nvm_data *data = &sc->sc_nvm;
1826 	uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
1827 
1828 	memset(bands, 0, sizeof(bands));
1829 	/* 1-13: 11b/g channels. */
1830 	setbit(bands, IEEE80211_MODE_11B);
1831 	setbit(bands, IEEE80211_MODE_11G);
1832 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1833 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1834 
1835 	/* 14: 11b channel only. */
1836 	clrbit(bands, IEEE80211_MODE_11G);
1837 	iwm_add_channel_band(sc, chans, maxchans, nchans,
1838 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1839 
1840 	if (data->sku_cap_band_52GHz_enable) {
1841 		memset(bands, 0, sizeof(bands));
1842 		setbit(bands, IEEE80211_MODE_11A);
1843 		iwm_add_channel_band(sc, chans, maxchans, nchans,
1844 		    IWM_NUM_2GHZ_CHANNELS, nitems(iwm_nvm_channels), bands);
1845 	}
1846 }
1847 
1848 static int
1849 iwm_parse_nvm_data(struct iwm_softc *sc,
1850 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1851 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1852 {
1853 	struct iwm_nvm_data *data = &sc->sc_nvm;
1854 	uint8_t hw_addr[IEEE80211_ADDR_LEN];
1855 	uint16_t radio_cfg, sku;
1856 
1857 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1858 
1859 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1860 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1861 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1862 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1863 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1864 
1865 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
1866 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1867 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1868 	data->sku_cap_11n_enable = 0;
1869 
1870 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1871 
1872 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1873 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1874 
1875 	/* The byte order is little endian 16 bit, meaning 214365 */
1876 	IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1877 	data->hw_addr[0] = hw_addr[1];
1878 	data->hw_addr[1] = hw_addr[0];
1879 	data->hw_addr[2] = hw_addr[3];
1880 	data->hw_addr[3] = hw_addr[2];
1881 	data->hw_addr[4] = hw_addr[5];
1882 	data->hw_addr[5] = hw_addr[4];
1883 
1884 	memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
1885 	    sizeof(data->nvm_ch_flags));
1886 	data->calib_version = 255;   /* TODO:
1887 					this value will prevent some checks from
1888 					failing, we need to check if this
1889 					field is still needed, and if it does,
1890 					where is it in the NVM */
1891 
1892 	return 0;
1893 }
1894 
1895 /*
1896  * END NVM PARSE
1897  */
1898 
1899 struct iwm_nvm_section {
1900 	uint16_t length;
1901 	const uint8_t *data;
1902 };
1903 
1904 static int
1905 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1906 {
1907 	const uint16_t *hw, *sw, *calib;
1908 
1909 	/* Checking for required sections */
1910 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1911 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1912 		device_printf(sc->sc_dev,
1913 		    "%s: Can't parse empty NVM sections\n",
1914 		    __func__);
1915 		return ENOENT;
1916 	}
1917 
1918 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1919 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1920 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1921 	return iwm_parse_nvm_data(sc, hw, sw, calib,
1922 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1923 }
1924 
1925 static int
1926 iwm_nvm_init(struct iwm_softc *sc)
1927 {
1928 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1929 	int i, section, error;
1930 	uint16_t len;
1931 	uint8_t *nvm_buffer, *temp;
1932 
1933 	/* Read From FW NVM */
1934 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1935 	    "%s: Read NVM\n",
1936 	    __func__);
1937 
1938 	/* TODO: find correct NVM max size for a section */
1939 	nvm_buffer = kmalloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_INTWAIT);
1940 	if (nvm_buffer == NULL)
1941 		return (ENOMEM);
1942 	for (i = 0; i < nitems(nvm_to_read); i++) {
1943 		section = nvm_to_read[i];
1944 		KASSERT(section <= nitems(nvm_sections),
1945 		    ("too many sections"));
1946 
1947 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1948 		if (error)
1949 			break;
1950 
1951 		temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
1952 		if (temp == NULL) {
1953 			error = ENOMEM;
1954 			break;
1955 		}
1956 		memcpy(temp, nvm_buffer, len);
1957 		nvm_sections[section].data = temp;
1958 		nvm_sections[section].length = len;
1959 	}
1960 	kfree(nvm_buffer, M_DEVBUF);
1961 	if (error)
1962 		return error;
1963 
1964 	return iwm_parse_nvm_sections(sc, nvm_sections);
1965 }
1966 
1967 /*
1968  * Firmware loading gunk.  This is kind of a weird hybrid between the
1969  * iwn driver and the Linux iwlwifi driver.
1970  */
1971 
1972 static int
1973 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
1974 	const uint8_t *section, uint32_t byte_cnt)
1975 {
1976 	struct iwm_dma_info *dma = &sc->fw_dma;
1977 	int error;
1978 
1979 	/* Copy firmware section into pre-allocated DMA-safe memory. */
1980 	memcpy(dma->vaddr, section, byte_cnt);
1981 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1982 
1983 	if (!iwm_nic_lock(sc))
1984 		return EBUSY;
1985 
1986 	sc->sc_fw_chunk_done = 0;
1987 
1988 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1989 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
1990 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
1991 	    dst_addr);
1992 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
1993 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
1994 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
1995 	    (iwm_get_dma_hi_addr(dma->paddr)
1996 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
1997 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
1998 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1999 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2000 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2001 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2002 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2003 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2004 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2005 
2006 	iwm_nic_unlock(sc);
2007 
2008 	/* wait 1s for this segment to load */
2009 	error = 0;
2010 	while (!sc->sc_fw_chunk_done) {
2011 #if defined(__DragonFly__)
2012 		error = iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2013 #else
2014 		error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2015 #endif
2016 		if (error)
2017 			break;
2018 	}
2019 
2020 	return error;
2021 }
2022 
2023 static int
2024 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2025 {
2026 	struct iwm_fw_sects *fws;
2027 	int error, i, w;
2028 	const void *data;
2029 	uint32_t dlen;
2030 	uint32_t offset;
2031 
2032 	sc->sc_uc.uc_intr = 0;
2033 
2034 	fws = &sc->sc_fw.fw_sects[ucode_type];
2035 	for (i = 0; i < fws->fw_count; i++) {
2036 		data = fws->fw_sect[i].fws_data;
2037 		dlen = fws->fw_sect[i].fws_len;
2038 		offset = fws->fw_sect[i].fws_devoff;
2039 		IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2040 		    "LOAD FIRMWARE type %d offset %u len %d\n",
2041 		    ucode_type, offset, dlen);
2042 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2043 		if (error) {
2044 			device_printf(sc->sc_dev,
2045 			    "%s: chunk %u of %u returned error %02d\n",
2046 			    __func__, i, fws->fw_count, error);
2047 			return error;
2048 		}
2049 	}
2050 
2051 	/* wait for the firmware to load */
2052 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2053 
2054 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2055 #if defined(__DragonFly__)
2056 		error = iwmsleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2057 #else
2058 		error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2059 #endif
2060 	}
2061 
2062 	return error;
2063 }
2064 
2065 static int
2066 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2067 {
2068 	int error;
2069 
2070 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2071 
2072 	if ((error = iwm_nic_init(sc)) != 0) {
2073 		device_printf(sc->sc_dev, "unable to init nic\n");
2074 		return error;
2075 	}
2076 
2077 	/* make sure rfkill handshake bits are cleared */
2078 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2079 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2080 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2081 
2082 	/* clear (again), then enable host interrupts */
2083 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2084 	iwm_enable_interrupts(sc);
2085 
2086 	/* really make sure rfkill handshake bits are cleared */
2087 	/* maybe we should write a few times more?  just to make sure */
2088 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2089 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2090 
2091 	/* Load the given image to the HW */
2092 	return iwm_load_firmware(sc, ucode_type);
2093 }
2094 
2095 static int
2096 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2097 {
2098 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2099 		.valid = htole32(valid_tx_ant),
2100 	};
2101 
2102 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2103 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2104 }
2105 
2106 static int
2107 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2108 {
2109 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2110 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2111 
2112 	/* Set parameters */
2113 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2114 	phy_cfg_cmd.calib_control.event_trigger =
2115 	    sc->sc_default_calib[ucode_type].event_trigger;
2116 	phy_cfg_cmd.calib_control.flow_trigger =
2117 	    sc->sc_default_calib[ucode_type].flow_trigger;
2118 
2119 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2120 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2121 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2122 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2123 }
2124 
2125 static int
2126 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2127 	enum iwm_ucode_type ucode_type)
2128 {
2129 	enum iwm_ucode_type old_type = sc->sc_uc_current;
2130 	int error;
2131 
2132 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2133 		kprintf("iwm_read_firmweare: failed %d\n",
2134 			error);
2135 		return error;
2136 	}
2137 
2138 	sc->sc_uc_current = ucode_type;
2139 	error = iwm_start_fw(sc, ucode_type);
2140 	if (error) {
2141 		kprintf("iwm_start_fw: failed %d\n", error);
2142 		sc->sc_uc_current = old_type;
2143 		return error;
2144 	}
2145 
2146 	error = iwm_post_alive(sc);
2147 	if (error) {
2148 		kprintf("iwm_fw_alive: failed %d\n", error);
2149 	}
2150 	return error;
2151 }
2152 
2153 /*
2154  * mvm misc bits
2155  */
2156 
2157 static int
2158 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2159 {
2160 	int error;
2161 
2162 	/* do not operate with rfkill switch turned on */
2163 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2164 		device_printf(sc->sc_dev,
2165 		    "radio is disabled by hardware switch\n");
2166 		return EPERM;
2167 	}
2168 
2169 	sc->sc_init_complete = 0;
2170 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2171 	    IWM_UCODE_TYPE_INIT)) != 0) {
2172 		device_printf(sc->sc_dev, "failed to load init firmware\n");
2173 		return error;
2174 	}
2175 
2176 	if (justnvm) {
2177 		if ((error = iwm_nvm_init(sc)) != 0) {
2178 			device_printf(sc->sc_dev, "failed to read nvm\n");
2179 			return error;
2180 		}
2181 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2182 
2183 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2184 		    + sc->sc_capa_max_probe_len
2185 		    + IWM_MAX_NUM_SCAN_CHANNELS
2186 		    * sizeof(struct iwm_scan_channel);
2187 		sc->sc_scan_cmd = kmalloc(sc->sc_scan_cmd_len, M_DEVBUF,
2188 		    M_INTWAIT);
2189 		if (sc->sc_scan_cmd == NULL)
2190 			return (ENOMEM);
2191 
2192 		return 0;
2193 	}
2194 
2195 	/* Send TX valid antennas before triggering calibrations */
2196 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
2197 		kprintf("iwm_send_tx_ant_cfg: failed %d\n", error);
2198 		return error;
2199 	}
2200 
2201 	/*
2202 	* Send phy configurations command to init uCode
2203 	* to start the 16.0 uCode init image internal calibrations.
2204 	*/
2205 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2206 		device_printf(sc->sc_dev,
2207 		    "%s: failed to run internal calibration: %d\n",
2208 		    __func__, error);
2209 		return error;
2210 	}
2211 
2212 	/*
2213 	 * Nothing to do but wait for the init complete notification
2214 	 * from the firmware
2215 	 */
2216 	while (!sc->sc_init_complete) {
2217 #if defined(__DragonFly__)
2218 		error = iwmsleep(&sc->sc_init_complete, &sc->sc_lk,
2219 				 0, "iwminit", 2*hz);
2220 #else
2221 		error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2222 				 0, "iwminit", 2*hz);
2223 #endif
2224 		if (error) {
2225 			kprintf("init complete failed %d\n",
2226 				sc->sc_init_complete);
2227 			break;
2228 		}
2229 	}
2230 
2231 	return error;
2232 }
2233 
2234 /*
2235  * receive side
2236  */
2237 
2238 /* (re)stock rx ring, called at init-time and at runtime */
2239 static int
2240 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2241 {
2242 	struct iwm_rx_ring *ring = &sc->rxq;
2243 	struct iwm_rx_data *data = &ring->data[idx];
2244 	struct mbuf *m;
2245 	int error;
2246 	bus_addr_t paddr;
2247 
2248 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2249 	if (m == NULL)
2250 		return ENOBUFS;
2251 
2252 	if (data->m != NULL)
2253 		bus_dmamap_unload(ring->data_dmat, data->map);
2254 
2255 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2256 	error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2257 	if (error != 0) {
2258 		device_printf(sc->sc_dev,
2259 		    "%s: could not create RX buf DMA map, error %d\n",
2260 		    __func__, error);
2261 		goto fail;
2262 	}
2263 	data->m = m;
2264 	error = bus_dmamap_load(ring->data_dmat, data->map,
2265 	    mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2266 	    &paddr, BUS_DMA_NOWAIT);
2267 	if (error != 0 && error != EFBIG) {
2268 		device_printf(sc->sc_dev,
2269 		    "%s: can't map mbuf, error %d\n", __func__,
2270 		    error);
2271 		goto fail;
2272 	}
2273 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2274 
2275 	/* Update RX descriptor. */
2276 	KKASSERT((paddr & 255) == 0);
2277 	ring->desc[idx] = htole32(paddr >> 8);
2278 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2279 	    BUS_DMASYNC_PREWRITE);
2280 
2281 	return 0;
2282 fail:
2283 	return error;
2284 }
2285 
2286 #define IWM_RSSI_OFFSET 50
2287 static int
2288 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2289 {
2290 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2291 	uint32_t agc_a, agc_b;
2292 	uint32_t val;
2293 
2294 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2295 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2296 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2297 
2298 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2299 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2300 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2301 
2302 	/*
2303 	 * dBm = rssi dB - agc dB - constant.
2304 	 * Higher AGC (higher radio gain) means lower signal.
2305 	 */
2306 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2307 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2308 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2309 
2310 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2311 	    "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2312 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2313 
2314 	return max_rssi_dbm;
2315 }
2316 
2317 /*
2318  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2319  * values are reported by the fw as positive values - need to negate
2320  * to obtain their dBM.  Account for missing antennas by replacing 0
2321  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2322  */
2323 static int
2324 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2325 {
2326 	int energy_a, energy_b, energy_c, max_energy;
2327 	uint32_t val;
2328 
2329 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2330 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2331 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
2332 	energy_a = energy_a ? -energy_a : -256;
2333 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2334 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
2335 	energy_b = energy_b ? -energy_b : -256;
2336 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2337 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
2338 	energy_c = energy_c ? -energy_c : -256;
2339 	max_energy = MAX(energy_a, energy_b);
2340 	max_energy = MAX(max_energy, energy_c);
2341 
2342 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2343 	    "energy In A %d B %d C %d , and max %d\n",
2344 	    energy_a, energy_b, energy_c, max_energy);
2345 
2346 	return max_energy;
2347 }
2348 
2349 static void
2350 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2351 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2352 {
2353 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2354 
2355 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2356 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2357 
2358 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2359 }
2360 
2361 /*
2362  * Retrieve the average noise (in dBm) among receivers.
2363  */
2364 static int
2365 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2366 {
2367 	int i, total, nbant, noise;
2368 
2369 	total = nbant = noise = 0;
2370 	for (i = 0; i < 3; i++) {
2371 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2372 		if (noise) {
2373 			total += noise;
2374 			nbant++;
2375 		}
2376 	}
2377 
2378 	/* There should be at least one antenna but check anyway. */
2379 	return (nbant == 0) ? -127 : (total / nbant) - 107;
2380 }
2381 
2382 /*
2383  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2384  *
2385  * Handles the actual data of the Rx packet from the fw
2386  */
2387 static void
2388 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2389 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2390 {
2391 	struct ieee80211com *ic = &sc->sc_ic;
2392 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2393 	struct ieee80211_frame *wh;
2394 	struct ieee80211_node *ni;
2395 	struct ieee80211_rx_stats rxs;
2396 	struct mbuf *m;
2397 	struct iwm_rx_phy_info *phy_info;
2398 	struct iwm_rx_mpdu_res_start *rx_res;
2399 	uint32_t len;
2400 	uint32_t rx_pkt_status;
2401 	int rssi;
2402 
2403 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2404 
2405 	phy_info = &sc->sc_last_phy_info;
2406 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2407 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2408 	len = le16toh(rx_res->byte_count);
2409 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2410 
2411 	m = data->m;
2412 	m->m_data = pkt->data + sizeof(*rx_res);
2413 	m->m_pkthdr.len = m->m_len = len;
2414 
2415 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2416 		device_printf(sc->sc_dev,
2417 		    "dsp size out of range [0,20]: %d\n",
2418 		    phy_info->cfg_phy_cnt);
2419 		return;
2420 	}
2421 
2422 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2423 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2424 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2425 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2426 		return; /* drop */
2427 	}
2428 
2429 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2430 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2431 	} else {
2432 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
2433 	}
2434 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
2435 	rssi = MIN(rssi, sc->sc_max_rssi);	/* clip to max. 100% */
2436 
2437 	/* replenish ring for the buffer we're going to feed to the sharks */
2438 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2439 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2440 		    __func__);
2441 		return;
2442 	}
2443 
2444 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2445 
2446 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2447 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
2448 	    __func__,
2449 	    le16toh(phy_info->channel),
2450 	    le16toh(phy_info->phy_flags));
2451 
2452 	/*
2453 	 * Populate an RX state struct with the provided information.
2454 	 */
2455 	bzero(&rxs, sizeof(rxs));
2456 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2457 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2458 	rxs.c_ieee = le16toh(phy_info->channel);
2459 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2460 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2461 	} else {
2462 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2463 	}
2464 	rxs.rssi = rssi - sc->sc_noise;
2465 	rxs.nf = sc->sc_noise;
2466 
2467 	if (ieee80211_radiotap_active_vap(vap)) {
2468 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2469 
2470 		tap->wr_flags = 0;
2471 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2472 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2473 		tap->wr_chan_freq = htole16(rxs.c_freq);
2474 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2475 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2476 		tap->wr_dbm_antsignal = (int8_t)rssi;
2477 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2478 		tap->wr_tsft = phy_info->system_timestamp;
2479 		switch (phy_info->rate) {
2480 		/* CCK rates. */
2481 		case  10: tap->wr_rate =   2; break;
2482 		case  20: tap->wr_rate =   4; break;
2483 		case  55: tap->wr_rate =  11; break;
2484 		case 110: tap->wr_rate =  22; break;
2485 		/* OFDM rates. */
2486 		case 0xd: tap->wr_rate =  12; break;
2487 		case 0xf: tap->wr_rate =  18; break;
2488 		case 0x5: tap->wr_rate =  24; break;
2489 		case 0x7: tap->wr_rate =  36; break;
2490 		case 0x9: tap->wr_rate =  48; break;
2491 		case 0xb: tap->wr_rate =  72; break;
2492 		case 0x1: tap->wr_rate =  96; break;
2493 		case 0x3: tap->wr_rate = 108; break;
2494 		/* Unknown rate: should not happen. */
2495 		default:  tap->wr_rate =   0;
2496 		}
2497 	}
2498 
2499 	IWM_UNLOCK(sc);
2500 	if (ni != NULL) {
2501 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2502 		ieee80211_input_mimo(ni, m, &rxs);
2503 		ieee80211_free_node(ni);
2504 	} else {
2505 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2506 		ieee80211_input_mimo_all(ic, m, &rxs);
2507 	}
2508 	IWM_LOCK(sc);
2509 }
2510 
2511 static int
2512 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2513 	struct iwm_node *in)
2514 {
2515 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2516 	struct ieee80211_node *ni = &in->in_ni;
2517 	struct ieee80211vap *vap = ni->ni_vap;
2518 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2519 	int failack = tx_resp->failure_frame;
2520 
2521 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2522 
2523 	/* Update rate control statistics. */
2524 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
2525 	    __func__,
2526 	    (int) le16toh(tx_resp->status.status),
2527 	    (int) le16toh(tx_resp->status.sequence),
2528 	    tx_resp->frame_count,
2529 	    tx_resp->bt_kill_count,
2530 	    tx_resp->failure_rts,
2531 	    tx_resp->failure_frame,
2532 	    le32toh(tx_resp->initial_rate),
2533 	    (int) le16toh(tx_resp->wireless_media_time));
2534 
2535 	if (status != IWM_TX_STATUS_SUCCESS &&
2536 	    status != IWM_TX_STATUS_DIRECT_DONE) {
2537 		ieee80211_ratectl_tx_complete(vap, ni,
2538 		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2539 		return (1);
2540 	} else {
2541 		ieee80211_ratectl_tx_complete(vap, ni,
2542 		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2543 		return (0);
2544 	}
2545 }
2546 
2547 static void
2548 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2549 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2550 {
2551 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2552 	int idx = cmd_hdr->idx;
2553 	int qid = cmd_hdr->qid;
2554 	struct iwm_tx_ring *ring = &sc->txq[qid];
2555 	struct iwm_tx_data *txd = &ring->data[idx];
2556 	struct iwm_node *in = txd->in;
2557 	struct mbuf *m = txd->m;
2558 	int status;
2559 
2560 	KASSERT(txd->done == 0, ("txd not done"));
2561 	KASSERT(txd->in != NULL, ("txd without node"));
2562 	KASSERT(txd->m != NULL, ("txd without mbuf"));
2563 
2564 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2565 
2566 	sc->sc_tx_timer = 0;
2567 
2568 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2569 
2570 	/* Unmap and free mbuf. */
2571 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2572 	bus_dmamap_unload(ring->data_dmat, txd->map);
2573 
2574 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2575 	    "free txd %p, in %p\n", txd, txd->in);
2576 	txd->done = 1;
2577 	txd->m = NULL;
2578 	txd->in = NULL;
2579 
2580 	ieee80211_tx_complete(&in->in_ni, m, status);
2581 
2582 	if (--ring->queued < IWM_TX_RING_LOMARK) {
2583 		sc->qfullmsk &= ~(1 << ring->qid);
2584 		if (sc->qfullmsk == 0) {
2585 			/*
2586 			 * Well, we're in interrupt context, but then again
2587 			 * I guess net80211 does all sorts of stunts in
2588 			 * interrupt context, so maybe this is no biggie.
2589 			 */
2590 			iwm_start(sc);
2591 		}
2592 	}
2593 }
2594 
2595 /*
2596  * transmit side
2597  */
2598 
2599 /*
2600  * Process a "command done" firmware notification.  This is where we wakeup
2601  * processes waiting for a synchronous command completion.
2602  * from if_iwn
2603  */
2604 static void
2605 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2606 {
2607 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2608 	struct iwm_tx_data *data;
2609 
2610 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2611 		return;	/* Not a command ack. */
2612 	}
2613 
2614 	data = &ring->data[pkt->hdr.idx];
2615 
2616 	/* If the command was mapped in an mbuf, free it. */
2617 	if (data->m != NULL) {
2618 		bus_dmamap_sync(ring->data_dmat, data->map,
2619 		    BUS_DMASYNC_POSTWRITE);
2620 		bus_dmamap_unload(ring->data_dmat, data->map);
2621 		m_freem(data->m);
2622 		data->m = NULL;
2623 	}
2624 	wakeup(&ring->desc[pkt->hdr.idx]);
2625 }
2626 
2627 #if 0
2628 /*
2629  * necessary only for block ack mode
2630  */
2631 void
2632 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2633 	uint16_t len)
2634 {
2635 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2636 	uint16_t w_val;
2637 
2638 	scd_bc_tbl = sc->sched_dma.vaddr;
2639 
2640 	len += 8; /* magic numbers came naturally from paris */
2641 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2642 		len = roundup(len, 4) / 4;
2643 
2644 	w_val = htole16(sta_id << 12 | len);
2645 
2646 	/* Update TX scheduler. */
2647 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2648 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2649 	    BUS_DMASYNC_PREWRITE);
2650 
2651 	/* I really wonder what this is ?!? */
2652 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2653 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2654 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2655 		    BUS_DMASYNC_PREWRITE);
2656 	}
2657 }
2658 #endif
2659 
2660 /*
2661  * Take an 802.11 (non-n) rate, find the relevant rate
2662  * table entry.  return the index into in_ridx[].
2663  *
2664  * The caller then uses that index back into in_ridx
2665  * to figure out the rate index programmed /into/
2666  * the firmware for this given node.
2667  */
2668 static int
2669 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2670     uint8_t rate)
2671 {
2672 	int i;
2673 	uint8_t r;
2674 
2675 	for (i = 0; i < nitems(in->in_ridx); i++) {
2676 		r = iwm_rates[in->in_ridx[i]].rate;
2677 		if (rate == r)
2678 			return (i);
2679 	}
2680 	/* XXX Return the first */
2681 	/* XXX TODO: have it return the /lowest/ */
2682 	return (0);
2683 }
2684 
2685 /*
2686  * Fill in the rate related information for a transmit command.
2687  */
2688 static const struct iwm_rate *
2689 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2690 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2691 {
2692 	struct ieee80211com *ic = &sc->sc_ic;
2693 	struct ieee80211_node *ni = &in->in_ni;
2694 	const struct iwm_rate *rinfo;
2695 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2696 	int ridx, rate_flags;
2697 
2698 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2699 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2700 
2701 	/*
2702 	 * XXX TODO: everything about the rate selection here is terrible!
2703 	 */
2704 
2705 	if (type == IEEE80211_FC0_TYPE_DATA) {
2706 		int i;
2707 		/* for data frames, use RS table */
2708 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
2709 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2710 		ridx = in->in_ridx[i];
2711 
2712 		/* This is the index into the programmed table */
2713 		tx->initial_rate_index = i;
2714 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2715 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2716 		    "%s: start with i=%d, txrate %d\n",
2717 		    __func__, i, iwm_rates[ridx].rate);
2718 	} else {
2719 		/*
2720 		 * For non-data, use the lowest supported rate for the given
2721 		 * operational mode.
2722 		 *
2723 		 * Note: there may not be any rate control information available.
2724 		 * This driver currently assumes if we're transmitting data
2725 		 * frames, use the rate control table.  Grr.
2726 		 *
2727 		 * XXX TODO: use the configured rate for the traffic type!
2728 		 * XXX TODO: this should be per-vap, not curmode; as we later
2729 		 * on we'll want to handle off-channel stuff (eg TDLS).
2730 		 */
2731 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
2732 			/*
2733 			 * XXX this assumes the mode is either 11a or not 11a;
2734 			 * definitely won't work for 11n.
2735 			 */
2736 			ridx = IWM_RIDX_OFDM;
2737 		} else {
2738 			ridx = IWM_RIDX_CCK;
2739 		}
2740 	}
2741 
2742 	rinfo = &iwm_rates[ridx];
2743 
2744 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2745 	    __func__, ridx,
2746 	    rinfo->rate,
2747 	    !! (IWM_RIDX_IS_CCK(ridx))
2748 	    );
2749 
2750 	/* XXX TODO: hard-coded TX antenna? */
2751 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2752 	if (IWM_RIDX_IS_CCK(ridx))
2753 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
2754 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2755 
2756 	return rinfo;
2757 }
2758 
2759 #define TB0_SIZE 16
2760 static int
2761 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2762 {
2763 	struct ieee80211com *ic = &sc->sc_ic;
2764 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2765 	struct iwm_node *in = IWM_NODE(ni);
2766 	struct iwm_tx_ring *ring;
2767 	struct iwm_tx_data *data;
2768 	struct iwm_tfd *desc;
2769 	struct iwm_device_cmd *cmd;
2770 	struct iwm_tx_cmd *tx;
2771 	struct ieee80211_frame *wh;
2772 	struct ieee80211_key *k = NULL;
2773 	const struct iwm_rate *rinfo;
2774 	uint32_t flags;
2775 	u_int hdrlen;
2776 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2777 	int nsegs;
2778 	uint8_t tid, type;
2779 	int i, totlen, error, pad;
2780 
2781 	wh = mtod(m, struct ieee80211_frame *);
2782 	hdrlen = ieee80211_anyhdrsize(wh);
2783 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2784 	tid = 0;
2785 	ring = &sc->txq[ac];
2786 	desc = &ring->desc[ring->cur];
2787 	memset(desc, 0, sizeof(*desc));
2788 	data = &ring->data[ring->cur];
2789 
2790 	/* Fill out iwm_tx_cmd to send to the firmware */
2791 	cmd = &ring->cmd[ring->cur];
2792 	cmd->hdr.code = IWM_TX_CMD;
2793 	cmd->hdr.flags = 0;
2794 	cmd->hdr.qid = ring->qid;
2795 	cmd->hdr.idx = ring->cur;
2796 
2797 	tx = (void *)cmd->data;
2798 	memset(tx, 0, sizeof(*tx));
2799 
2800 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2801 
2802 	/* Encrypt the frame if need be. */
2803 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2804 		/* Retrieve key for TX && do software encryption. */
2805 		k = ieee80211_crypto_encap(ni, m);
2806 		if (k == NULL) {
2807 			m_freem(m);
2808 			return (ENOBUFS);
2809 		}
2810 		/* 802.11 header may have moved. */
2811 		wh = mtod(m, struct ieee80211_frame *);
2812 	}
2813 
2814 	if (ieee80211_radiotap_active_vap(vap)) {
2815 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2816 
2817 		tap->wt_flags = 0;
2818 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2819 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2820 		tap->wt_rate = rinfo->rate;
2821 		if (k != NULL)
2822 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2823 		ieee80211_radiotap_tx(vap, m);
2824 	}
2825 
2826 
2827 	totlen = m->m_pkthdr.len;
2828 
2829 	flags = 0;
2830 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2831 		flags |= IWM_TX_CMD_FLG_ACK;
2832 	}
2833 
2834 	if (type != IEEE80211_FC0_TYPE_DATA
2835 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2836 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2837 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2838 	}
2839 
2840 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2841 	    type != IEEE80211_FC0_TYPE_DATA)
2842 		tx->sta_id = sc->sc_aux_sta.sta_id;
2843 	else
2844 		tx->sta_id = IWM_STATION_ID;
2845 
2846 	if (type == IEEE80211_FC0_TYPE_MGT) {
2847 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2848 
2849 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2850 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2851 			tx->pm_frame_timeout = htole16(3);
2852 		else
2853 			tx->pm_frame_timeout = htole16(2);
2854 	} else {
2855 		tx->pm_frame_timeout = htole16(0);
2856 	}
2857 
2858 	if (hdrlen & 3) {
2859 		/* First segment length must be a multiple of 4. */
2860 		flags |= IWM_TX_CMD_FLG_MH_PAD;
2861 		pad = 4 - (hdrlen & 3);
2862 	} else
2863 		pad = 0;
2864 
2865 	tx->driver_txop = 0;
2866 	tx->next_frame_len = 0;
2867 
2868 	tx->len = htole16(totlen);
2869 	tx->tid_tspec = tid;
2870 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2871 
2872 	/* Set physical address of "scratch area". */
2873 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2874 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2875 
2876 	/* Copy 802.11 header in TX command. */
2877 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2878 
2879 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2880 
2881 	tx->sec_ctl = 0;
2882 	tx->tx_flags |= htole32(flags);
2883 
2884 	/* Trim 802.11 header. */
2885 	m_adj(m, hdrlen);
2886 #if defined(__DragonFly__)
2887 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2888 					    segs, IWM_MAX_SCATTER - 2,
2889 					    &nsegs, BUS_DMA_NOWAIT);
2890 #else
2891 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2892 	    segs, &nsegs, BUS_DMA_NOWAIT);
2893 #endif
2894 	if (error != 0) {
2895 		if (error != EFBIG) {
2896 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2897 			    error);
2898 			m_freem(m);
2899 			return error;
2900 		}
2901 		/* Too many DMA segments, linearize mbuf. */
2902 		if (m_defrag(m, M_NOWAIT)) {
2903 			device_printf(sc->sc_dev,
2904 			    "%s: could not defrag mbuf\n", __func__);
2905 			m_freem(m);
2906 			return (ENOBUFS);
2907 		}
2908 
2909 #if defined(__DragonFly__)
2910 		error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2911 						    segs, IWM_MAX_SCATTER - 2,
2912 						    &nsegs, BUS_DMA_NOWAIT);
2913 #else
2914 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2915 		    segs, &nsegs, BUS_DMA_NOWAIT);
2916 #endif
2917 		if (error != 0) {
2918 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2919 			    error);
2920 			m_freem(m);
2921 			return error;
2922 		}
2923 	}
2924 	data->m = m;
2925 	data->in = in;
2926 	data->done = 0;
2927 
2928 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2929 	    "sending txd %p, in %p\n", data, data->in);
2930 	KASSERT(data->in != NULL, ("node is NULL"));
2931 
2932 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2933 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
2934 	    ring->qid, ring->cur, totlen, nsegs,
2935 	    le32toh(tx->tx_flags),
2936 	    le32toh(tx->rate_n_flags),
2937 	    tx->initial_rate_index
2938 	    );
2939 
2940 	/* Fill TX descriptor. */
2941 	desc->num_tbs = 2 + nsegs;
2942 
2943 	desc->tbs[0].lo = htole32(data->cmd_paddr);
2944 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2945 	    (TB0_SIZE << 4);
2946 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
2947 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2948 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
2949 	      + hdrlen + pad - TB0_SIZE) << 4);
2950 
2951 	/* Other DMA segments are for data payload. */
2952 	for (i = 0; i < nsegs; i++) {
2953 		seg = &segs[i];
2954 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
2955 		desc->tbs[i+2].hi_n_len = \
2956 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
2957 		    | ((seg->ds_len) << 4);
2958 	}
2959 
2960 	bus_dmamap_sync(ring->data_dmat, data->map,
2961 	    BUS_DMASYNC_PREWRITE);
2962 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
2963 	    BUS_DMASYNC_PREWRITE);
2964 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2965 	    BUS_DMASYNC_PREWRITE);
2966 
2967 #if 0
2968 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
2969 #endif
2970 
2971 	/* Kick TX ring. */
2972 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
2973 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2974 
2975 	/* Mark TX ring as full if we reach a certain threshold. */
2976 	if (++ring->queued > IWM_TX_RING_HIMARK) {
2977 		sc->qfullmsk |= 1 << ring->qid;
2978 	}
2979 
2980 	return 0;
2981 }
2982 
2983 static int
2984 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2985     const struct ieee80211_bpf_params *params)
2986 {
2987 	struct ieee80211com *ic = ni->ni_ic;
2988 	struct iwm_softc *sc = ic->ic_softc;
2989 	int error = 0;
2990 
2991 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2992 	    "->%s begin\n", __func__);
2993 
2994 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
2995 		m_freem(m);
2996 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2997 		    "<-%s not RUNNING\n", __func__);
2998 		return (ENETDOWN);
2999         }
3000 
3001 	IWM_LOCK(sc);
3002 	/* XXX fix this */
3003         if (params == NULL) {
3004 		error = iwm_tx(sc, m, ni, 0);
3005 	} else {
3006 		error = iwm_tx(sc, m, ni, 0);
3007 	}
3008 	sc->sc_tx_timer = 5;
3009 	IWM_UNLOCK(sc);
3010 
3011         return (error);
3012 }
3013 
3014 /*
3015  * mvm/tx.c
3016  */
3017 
3018 #if 0
3019 /*
3020  * Note that there are transports that buffer frames before they reach
3021  * the firmware. This means that after flush_tx_path is called, the
3022  * queue might not be empty. The race-free way to handle this is to:
3023  * 1) set the station as draining
3024  * 2) flush the Tx path
3025  * 3) wait for the transport queues to be empty
3026  */
3027 int
3028 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3029 {
3030 	struct iwm_tx_path_flush_cmd flush_cmd = {
3031 		.queues_ctl = htole32(tfd_msk),
3032 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3033 	};
3034 	int ret;
3035 
3036 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3037 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3038 	    sizeof(flush_cmd), &flush_cmd);
3039 	if (ret)
3040                 device_printf(sc->sc_dev,
3041 		    "Flushing tx queue failed: %d\n", ret);
3042 	return ret;
3043 }
3044 #endif
3045 
3046 static void
3047 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
3048 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
3049 {
3050 	memset(cmd_v5, 0, sizeof(*cmd_v5));
3051 
3052 	cmd_v5->add_modify = cmd_v6->add_modify;
3053 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
3054 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
3055 	IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
3056 	cmd_v5->sta_id = cmd_v6->sta_id;
3057 	cmd_v5->modify_mask = cmd_v6->modify_mask;
3058 	cmd_v5->station_flags = cmd_v6->station_flags;
3059 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
3060 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
3061 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
3062 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
3063 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
3064 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
3065 	cmd_v5->assoc_id = cmd_v6->assoc_id;
3066 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
3067 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
3068 }
3069 
3070 static int
3071 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3072 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
3073 {
3074 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
3075 
3076 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
3077 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
3078 		    sizeof(*cmd), cmd, status);
3079 	}
3080 
3081 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
3082 
3083 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
3084 	    &cmd_v5, status);
3085 }
3086 
3087 /* send station add/update command to firmware */
3088 static int
3089 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3090 {
3091 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
3092 	int ret;
3093 	uint32_t status;
3094 
3095 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3096 
3097 	add_sta_cmd.sta_id = IWM_STATION_ID;
3098 	add_sta_cmd.mac_id_n_color
3099 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3100 	        IWM_DEFAULT_COLOR));
3101 	if (!update) {
3102 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
3103 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3104 	}
3105 	add_sta_cmd.add_modify = update ? 1 : 0;
3106 	add_sta_cmd.station_flags_msk
3107 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3108 
3109 	status = IWM_ADD_STA_SUCCESS;
3110 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3111 	if (ret)
3112 		return ret;
3113 
3114 	switch (status) {
3115 	case IWM_ADD_STA_SUCCESS:
3116 		break;
3117 	default:
3118 		ret = EIO;
3119 		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3120 		break;
3121 	}
3122 
3123 	return ret;
3124 }
3125 
3126 static int
3127 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3128 {
3129 	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3130 }
3131 
3132 static int
3133 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3134 {
3135 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3136 }
3137 
3138 static int
3139 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3140 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3141 {
3142 	struct iwm_mvm_add_sta_cmd_v6 cmd;
3143 	int ret;
3144 	uint32_t status;
3145 
3146 	memset(&cmd, 0, sizeof(cmd));
3147 	cmd.sta_id = sta->sta_id;
3148 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3149 
3150 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3151 
3152 	if (addr)
3153 		IEEE80211_ADDR_COPY(cmd.addr, addr);
3154 
3155 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3156 	if (ret)
3157 		return ret;
3158 
3159 	switch (status) {
3160 	case IWM_ADD_STA_SUCCESS:
3161 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3162 		    "%s: Internal station added.\n", __func__);
3163 		return 0;
3164 	default:
3165 		device_printf(sc->sc_dev,
3166 		    "%s: Add internal station failed, status=0x%x\n",
3167 		    __func__, status);
3168 		ret = EIO;
3169 		break;
3170 	}
3171 	return ret;
3172 }
3173 
3174 static int
3175 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3176 {
3177 	int ret;
3178 
3179 	sc->sc_aux_sta.sta_id = 3;
3180 	sc->sc_aux_sta.tfd_queue_msk = 0;
3181 
3182 	ret = iwm_mvm_add_int_sta_common(sc,
3183 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3184 
3185 	if (ret)
3186 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3187 	return ret;
3188 }
3189 
3190 static int
3191 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3192 {
3193 	struct iwm_time_quota_cmd cmd;
3194 	int i, idx, ret, num_active_macs, quota, quota_rem;
3195 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3196 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3197 	uint16_t id;
3198 
3199 	memset(&cmd, 0, sizeof(cmd));
3200 
3201 	/* currently, PHY ID == binding ID */
3202 	if (in) {
3203 		id = in->in_phyctxt->id;
3204 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3205 		colors[id] = in->in_phyctxt->color;
3206 
3207 		if (1)
3208 			n_ifs[id] = 1;
3209 	}
3210 
3211 	/*
3212 	 * The FW's scheduling session consists of
3213 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3214 	 * equally between all the bindings that require quota
3215 	 */
3216 	num_active_macs = 0;
3217 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3218 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3219 		num_active_macs += n_ifs[i];
3220 	}
3221 
3222 	quota = 0;
3223 	quota_rem = 0;
3224 	if (num_active_macs) {
3225 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3226 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3227 	}
3228 
3229 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3230 		if (colors[i] < 0)
3231 			continue;
3232 
3233 		cmd.quotas[idx].id_and_color =
3234 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3235 
3236 		if (n_ifs[i] <= 0) {
3237 			cmd.quotas[idx].quota = htole32(0);
3238 			cmd.quotas[idx].max_duration = htole32(0);
3239 		} else {
3240 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3241 			cmd.quotas[idx].max_duration = htole32(0);
3242 		}
3243 		idx++;
3244 	}
3245 
3246 	/* Give the remainder of the session to the first binding */
3247 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3248 
3249 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3250 	    sizeof(cmd), &cmd);
3251 	if (ret)
3252 		device_printf(sc->sc_dev,
3253 		    "%s: Failed to send quota: %d\n", __func__, ret);
3254 	return ret;
3255 }
3256 
3257 /*
3258  * ieee80211 routines
3259  */
3260 
3261 /*
3262  * Change to AUTH state in 80211 state machine.  Roughly matches what
3263  * Linux does in bss_info_changed().
3264  */
3265 static int
3266 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3267 {
3268 	struct ieee80211_node *ni;
3269 	struct iwm_node *in;
3270 	struct iwm_vap *iv = IWM_VAP(vap);
3271 	uint32_t duration;
3272 	int error;
3273 
3274 	/*
3275 	 * XXX i have a feeling that the vap node is being
3276 	 * freed from underneath us. Grr.
3277 	 */
3278 	ni = ieee80211_ref_node(vap->iv_bss);
3279 	in = IWM_NODE(ni);
3280 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3281 	    "%s: called; vap=%p, bss ni=%p\n",
3282 	    __func__,
3283 	    vap,
3284 	    ni);
3285 
3286 	in->in_assoc = 0;
3287 
3288 	error = iwm_allow_mcast(vap, sc);
3289 	if (error) {
3290 		device_printf(sc->sc_dev,
3291 		    "%s: failed to set multicast\n", __func__);
3292 		goto out;
3293 	}
3294 
3295 	/*
3296 	 * This is where it deviates from what Linux does.
3297 	 *
3298 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3299 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3300 	 * and always does does a mac_ctx_changed().
3301 	 *
3302 	 * The openbsd port doesn't attempt to do that - it reset things
3303 	 * at odd states and does the add here.
3304 	 *
3305 	 * So, until the state handling is fixed (ie, we never reset
3306 	 * the NIC except for a firmware failure, which should drag
3307 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3308 	 * contexts that are required), let's do a dirty hack here.
3309 	 */
3310 	if (iv->is_uploaded) {
3311 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3312 			device_printf(sc->sc_dev,
3313 			    "%s: failed to update MAC\n", __func__);
3314 			goto out;
3315 		}
3316 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3317 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3318 			device_printf(sc->sc_dev,
3319 			    "%s: failed update phy ctxt\n", __func__);
3320 			goto out;
3321 		}
3322 		in->in_phyctxt = &sc->sc_phyctxt[0];
3323 
3324 		if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3325 			device_printf(sc->sc_dev,
3326 			    "%s: binding update cmd\n", __func__);
3327 			goto out;
3328 		}
3329 		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3330 			device_printf(sc->sc_dev,
3331 			    "%s: failed to update sta\n", __func__);
3332 			goto out;
3333 		}
3334 	} else {
3335 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3336 			device_printf(sc->sc_dev,
3337 			    "%s: failed to add MAC\n", __func__);
3338 			goto out;
3339 		}
3340 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3341 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3342 			device_printf(sc->sc_dev,
3343 			    "%s: failed add phy ctxt!\n", __func__);
3344 			error = ETIMEDOUT;
3345 			goto out;
3346 		}
3347 		in->in_phyctxt = &sc->sc_phyctxt[0];
3348 
3349 		if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3350 			device_printf(sc->sc_dev,
3351 			    "%s: binding add cmd\n", __func__);
3352 			goto out;
3353 		}
3354 		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3355 			device_printf(sc->sc_dev,
3356 			    "%s: failed to add sta\n", __func__);
3357 			goto out;
3358 		}
3359 	}
3360 
3361 	/*
3362 	 * Prevent the FW from wandering off channel during association
3363 	 * by "protecting" the session with a time event.
3364 	 */
3365 	/* XXX duration is in units of TU, not MS */
3366 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3367 	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3368 	DELAY(100);
3369 
3370 	error = 0;
3371 out:
3372 	ieee80211_free_node(ni);
3373 	return (error);
3374 }
3375 
3376 static int
3377 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3378 {
3379 	struct iwm_node *in = IWM_NODE(vap->iv_bss);
3380 	int error;
3381 
3382 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3383 		device_printf(sc->sc_dev,
3384 		    "%s: failed to update STA\n", __func__);
3385 		return error;
3386 	}
3387 
3388 	in->in_assoc = 1;
3389 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3390 		device_printf(sc->sc_dev,
3391 		    "%s: failed to update MAC\n", __func__);
3392 		return error;
3393 	}
3394 
3395 	return 0;
3396 }
3397 
3398 static int
3399 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3400 {
3401 	/*
3402 	 * Ok, so *technically* the proper set of calls for going
3403 	 * from RUN back to SCAN is:
3404 	 *
3405 	 * iwm_mvm_power_mac_disable(sc, in);
3406 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3407 	 * iwm_mvm_rm_sta(sc, in);
3408 	 * iwm_mvm_update_quotas(sc, NULL);
3409 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3410 	 * iwm_mvm_binding_remove_vif(sc, in);
3411 	 * iwm_mvm_mac_ctxt_remove(sc, in);
3412 	 *
3413 	 * However, that freezes the device not matter which permutations
3414 	 * and modifications are attempted.  Obviously, this driver is missing
3415 	 * something since it works in the Linux driver, but figuring out what
3416 	 * is missing is a little more complicated.  Now, since we're going
3417 	 * back to nothing anyway, we'll just do a complete device reset.
3418 	 * Up your's, device!
3419 	 */
3420 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
3421 	iwm_stop_device(sc);
3422 	iwm_init_hw(sc);
3423 	if (in)
3424 		in->in_assoc = 0;
3425 	return 0;
3426 
3427 #if 0
3428 	int error;
3429 
3430 	iwm_mvm_power_mac_disable(sc, in);
3431 
3432 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3433 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3434 		return error;
3435 	}
3436 
3437 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3438 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3439 		return error;
3440 	}
3441 	error = iwm_mvm_rm_sta(sc, in);
3442 	in->in_assoc = 0;
3443 	iwm_mvm_update_quotas(sc, NULL);
3444 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3445 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3446 		return error;
3447 	}
3448 	iwm_mvm_binding_remove_vif(sc, in);
3449 
3450 	iwm_mvm_mac_ctxt_remove(sc, in);
3451 
3452 	return error;
3453 #endif
3454 }
3455 
3456 static struct ieee80211_node *
3457 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3458 {
3459 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
3460 	    M_INTWAIT | M_ZERO);
3461 }
3462 
3463 static void
3464 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3465 {
3466 	struct ieee80211_node *ni = &in->in_ni;
3467 	struct iwm_lq_cmd *lq = &in->in_lq;
3468 	int nrates = ni->ni_rates.rs_nrates;
3469 	int i, ridx, tab = 0;
3470 	int txant = 0;
3471 
3472 	if (nrates > nitems(lq->rs_table)) {
3473 		device_printf(sc->sc_dev,
3474 		    "%s: node supports %d rates, driver handles "
3475 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3476 		return;
3477 	}
3478 	if (nrates == 0) {
3479 		device_printf(sc->sc_dev,
3480 		    "%s: node supports 0 rates, odd!\n", __func__);
3481 		return;
3482 	}
3483 
3484 	/*
3485 	 * XXX .. and most of iwm_node is not initialised explicitly;
3486 	 * it's all just 0x0 passed to the firmware.
3487 	 */
3488 
3489 	/* first figure out which rates we should support */
3490 	/* XXX TODO: this isn't 11n aware /at all/ */
3491 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3492 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3493 	    "%s: nrates=%d\n", __func__, nrates);
3494 
3495 	/*
3496 	 * Loop over nrates and populate in_ridx from the highest
3497 	 * rate to the lowest rate.  Remember, in_ridx[] has
3498 	 * IEEE80211_RATE_MAXSIZE entries!
3499 	 */
3500 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
3501 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
3502 
3503 		/* Map 802.11 rate to HW rate index. */
3504 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3505 			if (iwm_rates[ridx].rate == rate)
3506 				break;
3507 		if (ridx > IWM_RIDX_MAX) {
3508 			device_printf(sc->sc_dev,
3509 			    "%s: WARNING: device rate for %d not found!\n",
3510 			    __func__, rate);
3511 		} else {
3512 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3513 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
3514 			    __func__,
3515 			    i,
3516 			    rate,
3517 			    ridx);
3518 			in->in_ridx[i] = ridx;
3519 		}
3520 	}
3521 
3522 	/* then construct a lq_cmd based on those */
3523 	memset(lq, 0, sizeof(*lq));
3524 	lq->sta_id = IWM_STATION_ID;
3525 
3526 	/*
3527 	 * are these used? (we don't do SISO or MIMO)
3528 	 * need to set them to non-zero, though, or we get an error.
3529 	 */
3530 	lq->single_stream_ant_msk = 1;
3531 	lq->dual_stream_ant_msk = 1;
3532 
3533 	/*
3534 	 * Build the actual rate selection table.
3535 	 * The lowest bits are the rates.  Additionally,
3536 	 * CCK needs bit 9 to be set.  The rest of the bits
3537 	 * we add to the table select the tx antenna
3538 	 * Note that we add the rates in the highest rate first
3539 	 * (opposite of ni_rates).
3540 	 */
3541 	/*
3542 	 * XXX TODO: this should be looping over the min of nrates
3543 	 * and LQ_MAX_RETRY_NUM.  Sigh.
3544 	 */
3545 	for (i = 0; i < nrates; i++) {
3546 		int nextant;
3547 
3548 		if (txant == 0)
3549 			txant = IWM_FW_VALID_TX_ANT(sc);
3550 		nextant = 1<<(ffs(txant)-1);
3551 		txant &= ~nextant;
3552 
3553 		/*
3554 		 * Map the rate id into a rate index into
3555 		 * our hardware table containing the
3556 		 * configuration to use for this rate.
3557 		 */
3558 		ridx = in->in_ridx[i];
3559 		tab = iwm_rates[ridx].plcp;
3560 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
3561 		if (IWM_RIDX_IS_CCK(ridx))
3562 			tab |= IWM_RATE_MCS_CCK_MSK;
3563 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3564 		    "station rate i=%d, rate=%d, hw=%x\n",
3565 		    i, iwm_rates[ridx].rate, tab);
3566 		lq->rs_table[i] = htole32(tab);
3567 	}
3568 	/* then fill the rest with the lowest possible rate */
3569 	for (i = nrates; i < nitems(lq->rs_table); i++) {
3570 		KASSERT(tab != 0, ("invalid tab"));
3571 		lq->rs_table[i] = htole32(tab);
3572 	}
3573 }
3574 
3575 static int
3576 iwm_media_change(struct ifnet *ifp)
3577 {
3578 	struct ieee80211vap *vap = ifp->if_softc;
3579 	struct ieee80211com *ic = vap->iv_ic;
3580 	struct iwm_softc *sc = ic->ic_softc;
3581 	int error;
3582 
3583 	error = ieee80211_media_change(ifp);
3584 	if (error != ENETRESET)
3585 		return error;
3586 
3587 	IWM_LOCK(sc);
3588 	if (ic->ic_nrunning > 0) {
3589 		iwm_stop(sc);
3590 		iwm_init(sc);
3591 	}
3592 	IWM_UNLOCK(sc);
3593 	return error;
3594 }
3595 
3596 
3597 static int
3598 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3599 {
3600 	struct iwm_vap *ivp = IWM_VAP(vap);
3601 	struct ieee80211com *ic = vap->iv_ic;
3602 	struct iwm_softc *sc = ic->ic_softc;
3603 	struct iwm_node *in;
3604 	int error;
3605 
3606 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3607 	    "switching state %s -> %s\n",
3608 	    ieee80211_state_name[vap->iv_state],
3609 	    ieee80211_state_name[nstate]);
3610 	IEEE80211_UNLOCK(ic);
3611 	IWM_LOCK(sc);
3612 
3613 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
3614 		iwm_led_blink_stop(sc);
3615 
3616 	/* disable beacon filtering if we're hopping out of RUN */
3617 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3618 		iwm_mvm_disable_beacon_filter(sc);
3619 
3620 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
3621 			in->in_assoc = 0;
3622 
3623 		iwm_release(sc, NULL);
3624 
3625 		/*
3626 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
3627 		 * above then the card will be completely reinitialized,
3628 		 * so the driver must do everything necessary to bring the card
3629 		 * from INIT to SCAN.
3630 		 *
3631 		 * Additionally, upon receiving deauth frame from AP,
3632 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3633 		 * state. This will also fail with this driver, so bring the FSM
3634 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3635 		 *
3636 		 * XXX TODO: fix this for FreeBSD!
3637 		 */
3638 		if (nstate == IEEE80211_S_SCAN ||
3639 		    nstate == IEEE80211_S_AUTH ||
3640 		    nstate == IEEE80211_S_ASSOC) {
3641 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3642 			    "Force transition to INIT; MGT=%d\n", arg);
3643 			IWM_UNLOCK(sc);
3644 			IEEE80211_LOCK(ic);
3645 			vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3646 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3647 			    "Going INIT->SCAN\n");
3648 			nstate = IEEE80211_S_SCAN;
3649 			IEEE80211_UNLOCK(ic);
3650 			IWM_LOCK(sc);
3651 		}
3652 	}
3653 
3654 	switch (nstate) {
3655 	case IEEE80211_S_INIT:
3656 		sc->sc_scanband = 0;
3657 		break;
3658 
3659 	case IEEE80211_S_AUTH:
3660 		if ((error = iwm_auth(vap, sc)) != 0) {
3661 			device_printf(sc->sc_dev,
3662 			    "%s: could not move to auth state: %d\n",
3663 			    __func__, error);
3664 			break;
3665 		}
3666 		break;
3667 
3668 	case IEEE80211_S_ASSOC:
3669 		if ((error = iwm_assoc(vap, sc)) != 0) {
3670 			device_printf(sc->sc_dev,
3671 			    "%s: failed to associate: %d\n", __func__,
3672 			    error);
3673 			break;
3674 		}
3675 		break;
3676 
3677 	case IEEE80211_S_RUN:
3678 	{
3679 		struct iwm_host_cmd cmd = {
3680 			.id = IWM_LQ_CMD,
3681 			.len = { sizeof(in->in_lq), },
3682 			.flags = IWM_CMD_SYNC,
3683 		};
3684 
3685 		/* Update the association state, now we have it all */
3686 		/* (eg associd comes in at this point */
3687 		error = iwm_assoc(vap, sc);
3688 		if (error != 0) {
3689 			device_printf(sc->sc_dev,
3690 			    "%s: failed to update association state: %d\n",
3691 			    __func__,
3692 			    error);
3693 			break;
3694 		}
3695 
3696 		in = IWM_NODE(vap->iv_bss);
3697 		iwm_mvm_power_mac_update_mode(sc, in);
3698 		iwm_mvm_enable_beacon_filter(sc, in);
3699 		iwm_mvm_update_quotas(sc, in);
3700 		iwm_setrates(sc, in);
3701 
3702 		cmd.data[0] = &in->in_lq;
3703 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3704 			device_printf(sc->sc_dev,
3705 			    "%s: IWM_LQ_CMD failed\n", __func__);
3706 		}
3707 
3708 		iwm_mvm_led_enable(sc);
3709 		break;
3710 	}
3711 
3712 	default:
3713 		break;
3714 	}
3715 	IWM_UNLOCK(sc);
3716 	IEEE80211_LOCK(ic);
3717 
3718 	return (ivp->iv_newstate(vap, nstate, arg));
3719 }
3720 
3721 void
3722 iwm_endscan_cb(void *arg, int pending)
3723 {
3724 	struct iwm_softc *sc = arg;
3725 	struct ieee80211com *ic = &sc->sc_ic;
3726 	int done;
3727 	int error;
3728 
3729 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3730 	    "%s: scan ended\n",
3731 	    __func__);
3732 
3733 	IWM_LOCK(sc);
3734 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3735 	    sc->sc_nvm.sku_cap_band_52GHz_enable) {
3736 		done = 0;
3737 		if ((error = iwm_mvm_scan_request(sc,
3738 		    IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3739 			device_printf(sc->sc_dev,
3740 			    "could not initiate 5 GHz scan\n");
3741 			done = 1;
3742 		}
3743 	} else {
3744 		done = 1;
3745 	}
3746 
3747 	if (done) {
3748 		IWM_UNLOCK(sc);
3749 		ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3750 		IWM_LOCK(sc);
3751 		sc->sc_scanband = 0;
3752 	}
3753 	IWM_UNLOCK(sc);
3754 }
3755 
3756 static int
3757 iwm_init_hw(struct iwm_softc *sc)
3758 {
3759 	struct ieee80211com *ic = &sc->sc_ic;
3760 	int error, i, qid;
3761 
3762 	if ((error = iwm_start_hw(sc)) != 0) {
3763 		kprintf("iwm_start_hw: failed %d\n", error);
3764 		return error;
3765 	}
3766 
3767 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3768 		kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
3769 		return error;
3770 	}
3771 
3772 	/*
3773 	 * should stop and start HW since that INIT
3774 	 * image just loaded
3775 	 */
3776 	iwm_stop_device(sc);
3777 	if ((error = iwm_start_hw(sc)) != 0) {
3778 		device_printf(sc->sc_dev, "could not initialize hardware\n");
3779 		return error;
3780 	}
3781 
3782 	/* omstart, this time with the regular firmware */
3783 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3784 	if (error) {
3785 		device_printf(sc->sc_dev, "could not load firmware\n");
3786 		goto error;
3787 	}
3788 
3789 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
3790 		device_printf(sc->sc_dev, "antenna config failed\n");
3791 		goto error;
3792 	}
3793 
3794 	/* Send phy db control command and then phy db calibration*/
3795 	if ((error = iwm_send_phy_db_data(sc)) != 0) {
3796 		device_printf(sc->sc_dev, "phy_db_data failed\n");
3797 		goto error;
3798 	}
3799 
3800 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
3801 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
3802 		goto error;
3803 	}
3804 
3805 	/* Add auxiliary station for scanning */
3806 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
3807 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
3808 		goto error;
3809 	}
3810 
3811 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3812 		/*
3813 		 * The channel used here isn't relevant as it's
3814 		 * going to be overwritten in the other flows.
3815 		 * For now use the first channel we have.
3816 		 */
3817 		if ((error = iwm_mvm_phy_ctxt_add(sc,
3818 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3819 			goto error;
3820 	}
3821 
3822 	error = iwm_mvm_power_update_device(sc);
3823 	if (error)
3824 		goto error;
3825 
3826 	/* Mark TX rings as active. */
3827 	for (qid = 0; qid < 4; qid++) {
3828 		iwm_enable_txq(sc, qid, qid);
3829 	}
3830 
3831 	return 0;
3832 
3833  error:
3834 	iwm_stop_device(sc);
3835 	return error;
3836 }
3837 
3838 /* Allow multicast from our BSSID. */
3839 static int
3840 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3841 {
3842 	struct ieee80211_node *ni = vap->iv_bss;
3843 	struct iwm_mcast_filter_cmd *cmd;
3844 	size_t size;
3845 	int error;
3846 
3847 	size = roundup(sizeof(*cmd), 4);
3848 	cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
3849 	if (cmd == NULL)
3850 		return ENOMEM;
3851 	cmd->filter_own = 1;
3852 	cmd->port_id = 0;
3853 	cmd->count = 0;
3854 	cmd->pass_all = 1;
3855 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3856 
3857 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3858 	    IWM_CMD_SYNC, size, cmd);
3859 	kfree(cmd, M_DEVBUF);
3860 
3861 	return (error);
3862 }
3863 
3864 /*
3865  * ifnet interfaces
3866  */
3867 
3868 static void
3869 iwm_init(struct iwm_softc *sc)
3870 {
3871 	int error;
3872 
3873 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3874 		return;
3875 	}
3876 	sc->sc_generation++;
3877 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
3878 
3879 	if ((error = iwm_init_hw(sc)) != 0) {
3880 		kprintf("iwm_init_hw failed %d\n", error);
3881 		iwm_stop(sc);
3882 		return;
3883 	}
3884 
3885 	/*
3886 	 * Ok, firmware loaded and we are jogging
3887 	 */
3888 	sc->sc_flags |= IWM_FLAG_HW_INITED;
3889 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3890 }
3891 
3892 static int
3893 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
3894 {
3895 	struct iwm_softc *sc;
3896 	int error;
3897 
3898 	sc = ic->ic_softc;
3899 
3900 	IWM_LOCK(sc);
3901 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3902 		IWM_UNLOCK(sc);
3903 		return (ENXIO);
3904 	}
3905 	error = mbufq_enqueue(&sc->sc_snd, m);
3906 	if (error) {
3907 		IWM_UNLOCK(sc);
3908 		return (error);
3909 	}
3910 	iwm_start(sc);
3911 	IWM_UNLOCK(sc);
3912 	return (0);
3913 }
3914 
3915 /*
3916  * Dequeue packets from sendq and call send.
3917  */
3918 static void
3919 iwm_start(struct iwm_softc *sc)
3920 {
3921 	struct ieee80211_node *ni;
3922 	struct mbuf *m;
3923 	int ac = 0;
3924 
3925 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
3926 	while (sc->qfullmsk == 0 &&
3927 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3928 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3929 		if (iwm_tx(sc, m, ni, ac) != 0) {
3930 			if_inc_counter(ni->ni_vap->iv_ifp,
3931 			    IFCOUNTER_OERRORS, 1);
3932 			ieee80211_free_node(ni);
3933 			continue;
3934 		}
3935 		sc->sc_tx_timer = 15;
3936 	}
3937 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
3938 }
3939 
3940 static void
3941 iwm_stop(struct iwm_softc *sc)
3942 {
3943 
3944 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
3945 	sc->sc_flags |= IWM_FLAG_STOPPED;
3946 	sc->sc_generation++;
3947 	sc->sc_scanband = 0;
3948 	iwm_led_blink_stop(sc);
3949 	sc->sc_tx_timer = 0;
3950 	iwm_stop_device(sc);
3951 }
3952 
3953 static void
3954 iwm_watchdog(void *arg)
3955 {
3956 	struct iwm_softc *sc = arg;
3957 
3958 	if (sc->sc_tx_timer > 0) {
3959 		if (--sc->sc_tx_timer == 0) {
3960 			device_printf(sc->sc_dev, "device timeout\n");
3961 #ifdef IWM_DEBUG
3962 			iwm_nic_error(sc);
3963 #endif
3964 			iwm_stop(sc);
3965 #if defined(__DragonFly__)
3966 			++sc->sc_ic.ic_oerrors;
3967 #else
3968 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
3969 #endif
3970 			return;
3971 		}
3972 	}
3973 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3974 }
3975 
3976 static void
3977 iwm_parent(struct ieee80211com *ic)
3978 {
3979 	struct iwm_softc *sc = ic->ic_softc;
3980 	int startall = 0;
3981 
3982 	IWM_LOCK(sc);
3983 	if (ic->ic_nrunning > 0) {
3984 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
3985 			iwm_init(sc);
3986 			startall = 1;
3987 		}
3988 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
3989 		iwm_stop(sc);
3990 	IWM_UNLOCK(sc);
3991 	if (startall)
3992 		ieee80211_start_all(ic);
3993 }
3994 
3995 /*
3996  * The interrupt side of things
3997  */
3998 
3999 /*
4000  * error dumping routines are from iwlwifi/mvm/utils.c
4001  */
4002 
4003 /*
4004  * Note: This structure is read from the device with IO accesses,
4005  * and the reading already does the endian conversion. As it is
4006  * read with uint32_t-sized accesses, any members with a different size
4007  * need to be ordered correctly though!
4008  */
4009 struct iwm_error_event_table {
4010 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4011 	uint32_t error_id;		/* type of error */
4012 	uint32_t pc;			/* program counter */
4013 	uint32_t blink1;		/* branch link */
4014 	uint32_t blink2;		/* branch link */
4015 	uint32_t ilink1;		/* interrupt link */
4016 	uint32_t ilink2;		/* interrupt link */
4017 	uint32_t data1;		/* error-specific data */
4018 	uint32_t data2;		/* error-specific data */
4019 	uint32_t data3;		/* error-specific data */
4020 	uint32_t bcon_time;		/* beacon timer */
4021 	uint32_t tsf_low;		/* network timestamp function timer */
4022 	uint32_t tsf_hi;		/* network timestamp function timer */
4023 	uint32_t gp1;		/* GP1 timer register */
4024 	uint32_t gp2;		/* GP2 timer register */
4025 	uint32_t gp3;		/* GP3 timer register */
4026 	uint32_t ucode_ver;		/* uCode version */
4027 	uint32_t hw_ver;		/* HW Silicon version */
4028 	uint32_t brd_ver;		/* HW board version */
4029 	uint32_t log_pc;		/* log program counter */
4030 	uint32_t frame_ptr;		/* frame pointer */
4031 	uint32_t stack_ptr;		/* stack pointer */
4032 	uint32_t hcmd;		/* last host command header */
4033 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4034 				 * rxtx_flag */
4035 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4036 				 * host_flag */
4037 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4038 				 * enc_flag */
4039 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4040 				 * time_flag */
4041 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4042 				 * wico interrupt */
4043 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
4044 	uint32_t wait_event;		/* wait event() caller address */
4045 	uint32_t l2p_control;	/* L2pControlField */
4046 	uint32_t l2p_duration;	/* L2pDurationField */
4047 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4048 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4049 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4050 				 * (LMPM_PMG_SEL) */
4051 	uint32_t u_timestamp;	/* indicate when the date and time of the
4052 				 * compilation */
4053 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4054 } __packed;
4055 
4056 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4057 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4058 
4059 #ifdef IWM_DEBUG
4060 struct {
4061 	const char *name;
4062 	uint8_t num;
4063 } advanced_lookup[] = {
4064 	{ "NMI_INTERRUPT_WDG", 0x34 },
4065 	{ "SYSASSERT", 0x35 },
4066 	{ "UCODE_VERSION_MISMATCH", 0x37 },
4067 	{ "BAD_COMMAND", 0x38 },
4068 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4069 	{ "FATAL_ERROR", 0x3D },
4070 	{ "NMI_TRM_HW_ERR", 0x46 },
4071 	{ "NMI_INTERRUPT_TRM", 0x4C },
4072 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4073 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4074 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4075 	{ "NMI_INTERRUPT_HOST", 0x66 },
4076 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
4077 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
4078 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4079 	{ "ADVANCED_SYSASSERT", 0 },
4080 };
4081 
4082 static const char *
4083 iwm_desc_lookup(uint32_t num)
4084 {
4085 	int i;
4086 
4087 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4088 		if (advanced_lookup[i].num == num)
4089 			return advanced_lookup[i].name;
4090 
4091 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4092 	return advanced_lookup[i].name;
4093 }
4094 
4095 /*
4096  * Support for dumping the error log seemed like a good idea ...
4097  * but it's mostly hex junk and the only sensible thing is the
4098  * hw/ucode revision (which we know anyway).  Since it's here,
4099  * I'll just leave it in, just in case e.g. the Intel guys want to
4100  * help us decipher some "ADVANCED_SYSASSERT" later.
4101  */
4102 static void
4103 iwm_nic_error(struct iwm_softc *sc)
4104 {
4105 	struct iwm_error_event_table table;
4106 	uint32_t base;
4107 
4108 	device_printf(sc->sc_dev, "dumping device error log\n");
4109 	base = sc->sc_uc.uc_error_event_table;
4110 	if (base < 0x800000 || base >= 0x80C000) {
4111 		device_printf(sc->sc_dev,
4112 		    "Not valid error log pointer 0x%08x\n", base);
4113 		return;
4114 	}
4115 
4116 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
4117 		device_printf(sc->sc_dev, "reading errlog failed\n");
4118 		return;
4119 	}
4120 
4121 	if (!table.valid) {
4122 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
4123 		return;
4124 	}
4125 
4126 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4127 		device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4128 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4129 		    sc->sc_flags, table.valid);
4130 	}
4131 
4132 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4133 		iwm_desc_lookup(table.error_id));
4134 	device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4135 	device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4136 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4137 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4138 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4139 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4140 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4141 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4142 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4143 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4144 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4145 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4146 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4147 	device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4148 	device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4149 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4150 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4151 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4152 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4153 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4154 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4155 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4156 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4157 	device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4158 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4159 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4160 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4161 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4162 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4163 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4164 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4165 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4166 }
4167 #endif
4168 
4169 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
4170 do {									\
4171 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4172 	_var_ = (void *)((_pkt_)+1);					\
4173 } while (/*CONSTCOND*/0)
4174 
4175 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
4176 do {									\
4177 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4178 	_ptr_ = (void *)((_pkt_)+1);					\
4179 } while (/*CONSTCOND*/0)
4180 
4181 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4182 
4183 /*
4184  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4185  * Basic structure from if_iwn
4186  */
4187 static void
4188 iwm_notif_intr(struct iwm_softc *sc)
4189 {
4190 	uint16_t hw;
4191 
4192 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4193 	    BUS_DMASYNC_POSTREAD);
4194 
4195 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4196 
4197 	/*
4198 	 * Process responses
4199 	 */
4200 	while (sc->rxq.cur != hw) {
4201 		struct iwm_rx_ring *ring = &sc->rxq;
4202 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4203 		struct iwm_rx_packet *pkt;
4204 		struct iwm_cmd_response *cresp;
4205 		int qid, idx;
4206 
4207 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4208 		    BUS_DMASYNC_POSTREAD);
4209 		pkt = mtod(data->m, struct iwm_rx_packet *);
4210 
4211 		qid = pkt->hdr.qid & ~0x80;
4212 		idx = pkt->hdr.idx;
4213 
4214 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4215 		    "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4216 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4217 		    pkt->hdr.code, sc->rxq.cur, hw);
4218 
4219 		/*
4220 		 * randomly get these from the firmware, no idea why.
4221 		 * they at least seem harmless, so just ignore them for now
4222 		 */
4223 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4224 		    || pkt->len_n_flags == htole32(0x55550000))) {
4225 			ADVANCE_RXQ(sc);
4226 			continue;
4227 		}
4228 
4229 		switch (pkt->hdr.code) {
4230 		case IWM_REPLY_RX_PHY_CMD:
4231 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4232 			break;
4233 
4234 		case IWM_REPLY_RX_MPDU_CMD:
4235 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4236 			break;
4237 
4238 		case IWM_TX_CMD:
4239 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
4240 			break;
4241 
4242 		case IWM_MISSED_BEACONS_NOTIFICATION: {
4243 			struct iwm_missed_beacons_notif *resp;
4244 			int missed;
4245 
4246 			/* XXX look at mac_id to determine interface ID */
4247 			struct ieee80211com *ic = &sc->sc_ic;
4248 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4249 
4250 			SYNC_RESP_STRUCT(resp, pkt);
4251 			missed = le32toh(resp->consec_missed_beacons);
4252 
4253 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4254 			    "%s: MISSED_BEACON: mac_id=%d, "
4255 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4256 			    "num_rx=%d\n",
4257 			    __func__,
4258 			    le32toh(resp->mac_id),
4259 			    le32toh(resp->consec_missed_beacons_since_last_rx),
4260 			    le32toh(resp->consec_missed_beacons),
4261 			    le32toh(resp->num_expected_beacons),
4262 			    le32toh(resp->num_recvd_beacons));
4263 
4264 			/* Be paranoid */
4265 			if (vap == NULL)
4266 				break;
4267 
4268 			/* XXX no net80211 locking? */
4269 			if (vap->iv_state == IEEE80211_S_RUN &&
4270 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4271 				if (missed > vap->iv_bmissthreshold) {
4272 					/* XXX bad locking; turn into task */
4273 					IWM_UNLOCK(sc);
4274 					ieee80211_beacon_miss(ic);
4275 					IWM_LOCK(sc);
4276 				}
4277 			}
4278 
4279 			break; }
4280 
4281 		case IWM_MVM_ALIVE: {
4282 			struct iwm_mvm_alive_resp *resp;
4283 			SYNC_RESP_STRUCT(resp, pkt);
4284 
4285 			sc->sc_uc.uc_error_event_table
4286 			    = le32toh(resp->error_event_table_ptr);
4287 			sc->sc_uc.uc_log_event_table
4288 			    = le32toh(resp->log_event_table_ptr);
4289 			sc->sched_base = le32toh(resp->scd_base_ptr);
4290 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4291 
4292 			sc->sc_uc.uc_intr = 1;
4293 			wakeup(&sc->sc_uc);
4294 			break; }
4295 
4296 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
4297 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
4298 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
4299 
4300 			iwm_phy_db_set_section(sc, phy_db_notif);
4301 
4302 			break; }
4303 
4304 		case IWM_STATISTICS_NOTIFICATION: {
4305 			struct iwm_notif_statistics *stats;
4306 			SYNC_RESP_STRUCT(stats, pkt);
4307 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4308 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
4309 			break; }
4310 
4311 		case IWM_NVM_ACCESS_CMD:
4312 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
4313 				bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4314 				    BUS_DMASYNC_POSTREAD);
4315 				memcpy(sc->sc_cmd_resp,
4316 				    pkt, sizeof(sc->sc_cmd_resp));
4317 			}
4318 			break;
4319 
4320 		case IWM_PHY_CONFIGURATION_CMD:
4321 		case IWM_TX_ANT_CONFIGURATION_CMD:
4322 		case IWM_ADD_STA:
4323 		case IWM_MAC_CONTEXT_CMD:
4324 		case IWM_REPLY_SF_CFG_CMD:
4325 		case IWM_POWER_TABLE_CMD:
4326 		case IWM_PHY_CONTEXT_CMD:
4327 		case IWM_BINDING_CONTEXT_CMD:
4328 		case IWM_TIME_EVENT_CMD:
4329 		case IWM_SCAN_REQUEST_CMD:
4330 		case IWM_REPLY_BEACON_FILTERING_CMD:
4331 		case IWM_MAC_PM_POWER_TABLE:
4332 		case IWM_TIME_QUOTA_CMD:
4333 		case IWM_REMOVE_STA:
4334 		case IWM_TXPATH_FLUSH:
4335 		case IWM_LQ_CMD:
4336 			SYNC_RESP_STRUCT(cresp, pkt);
4337 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
4338 				memcpy(sc->sc_cmd_resp,
4339 				    pkt, sizeof(*pkt)+sizeof(*cresp));
4340 			}
4341 			break;
4342 
4343 		/* ignore */
4344 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4345 			break;
4346 
4347 		case IWM_INIT_COMPLETE_NOTIF:
4348 			sc->sc_init_complete = 1;
4349 			wakeup(&sc->sc_init_complete);
4350 			break;
4351 
4352 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
4353 			struct iwm_scan_complete_notif *notif;
4354 			SYNC_RESP_STRUCT(notif, pkt);
4355 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4356 			break; }
4357 
4358 		case IWM_REPLY_ERROR: {
4359 			struct iwm_error_resp *resp;
4360 			SYNC_RESP_STRUCT(resp, pkt);
4361 
4362 			device_printf(sc->sc_dev,
4363 			    "firmware error 0x%x, cmd 0x%x\n",
4364 			    le32toh(resp->error_type),
4365 			    resp->cmd_id);
4366 			break; }
4367 
4368 		case IWM_TIME_EVENT_NOTIFICATION: {
4369 			struct iwm_time_event_notif *notif;
4370 			SYNC_RESP_STRUCT(notif, pkt);
4371 
4372 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4373 			    "TE notif status = 0x%x action = 0x%x\n",
4374 			    notif->status, notif->action);
4375 			break; }
4376 
4377 		case IWM_MCAST_FILTER_CMD:
4378 			break;
4379 
4380 		default:
4381 			device_printf(sc->sc_dev,
4382 			    "frame %d/%d %x UNHANDLED (this should "
4383 			    "not happen)\n", qid, idx,
4384 			    pkt->len_n_flags);
4385 			break;
4386 		}
4387 
4388 		/*
4389 		 * Why test bit 0x80?  The Linux driver:
4390 		 *
4391 		 * There is one exception:  uCode sets bit 15 when it
4392 		 * originates the response/notification, i.e. when the
4393 		 * response/notification is not a direct response to a
4394 		 * command sent by the driver.  For example, uCode issues
4395 		 * IWM_REPLY_RX when it sends a received frame to the driver;
4396 		 * it is not a direct response to any driver command.
4397 		 *
4398 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
4399 		 * uses a slightly different format for pkt->hdr, and "qid"
4400 		 * is actually the upper byte of a two-byte field.
4401 		 */
4402 		if (!(pkt->hdr.qid & (1 << 7))) {
4403 			iwm_cmd_done(sc, pkt);
4404 		}
4405 
4406 		ADVANCE_RXQ(sc);
4407 	}
4408 
4409 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4410 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4411 
4412 	/*
4413 	 * Tell the firmware what we have processed.
4414 	 * Seems like the hardware gets upset unless we align
4415 	 * the write by 8??
4416 	 */
4417 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4418 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4419 }
4420 
4421 static void
4422 iwm_intr(void *arg)
4423 {
4424 	struct iwm_softc *sc = arg;
4425 	int handled = 0;
4426 	int r1, r2, rv = 0;
4427 	int isperiodic = 0;
4428 
4429 #if defined(__DragonFly__)
4430 	if (sc->sc_mem == NULL) {
4431 		kprintf("iwm_intr: detached\n");
4432 		return;
4433 	}
4434 #endif
4435 	IWM_LOCK(sc);
4436 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4437 
4438 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4439 		uint32_t *ict = sc->ict_dma.vaddr;
4440 		int tmp;
4441 
4442 		tmp = htole32(ict[sc->ict_cur]);
4443 		if (!tmp)
4444 			goto out_ena;
4445 
4446 		/*
4447 		 * ok, there was something.  keep plowing until we have all.
4448 		 */
4449 		r1 = r2 = 0;
4450 		while (tmp) {
4451 			r1 |= tmp;
4452 			ict[sc->ict_cur] = 0;
4453 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4454 			tmp = htole32(ict[sc->ict_cur]);
4455 		}
4456 
4457 		/* this is where the fun begins.  don't ask */
4458 		if (r1 == 0xffffffff)
4459 			r1 = 0;
4460 
4461 		/* i am not expected to understand this */
4462 		if (r1 & 0xc0000)
4463 			r1 |= 0x8000;
4464 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4465 	} else {
4466 		r1 = IWM_READ(sc, IWM_CSR_INT);
4467 		/* "hardware gone" (where, fishing?) */
4468 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4469 			goto out;
4470 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4471 	}
4472 	if (r1 == 0 && r2 == 0) {
4473 		goto out_ena;
4474 	}
4475 
4476 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4477 
4478 	/* ignored */
4479 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4480 
4481 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4482 		int i;
4483 		struct ieee80211com *ic = &sc->sc_ic;
4484 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4485 
4486 #ifdef IWM_DEBUG
4487 		iwm_nic_error(sc);
4488 #endif
4489 		/* Dump driver status (TX and RX rings) while we're here. */
4490 		device_printf(sc->sc_dev, "driver status:\n");
4491 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4492 			struct iwm_tx_ring *ring = &sc->txq[i];
4493 			device_printf(sc->sc_dev,
4494 			    "  tx ring %2d: qid=%-2d cur=%-3d "
4495 			    "queued=%-3d\n",
4496 			    i, ring->qid, ring->cur, ring->queued);
4497 		}
4498 		device_printf(sc->sc_dev,
4499 		    "  rx ring: cur=%d\n", sc->rxq.cur);
4500 		device_printf(sc->sc_dev,
4501 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
4502 
4503 		/* Don't stop the device; just do a VAP restart */
4504 		IWM_UNLOCK(sc);
4505 
4506 		if (vap == NULL) {
4507 			kprintf("%s: null vap\n", __func__);
4508 			return;
4509 		}
4510 
4511 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
4512 		    "restarting\n", __func__, vap->iv_state);
4513 
4514 		/* XXX TODO: turn this into a callout/taskqueue */
4515 		ieee80211_restart_all(ic);
4516 		return;
4517 	}
4518 
4519 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4520 		handled |= IWM_CSR_INT_BIT_HW_ERR;
4521 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
4522 		iwm_stop(sc);
4523 		rv = 1;
4524 		goto out;
4525 	}
4526 
4527 	/* firmware chunk loaded */
4528 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4529 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4530 		handled |= IWM_CSR_INT_BIT_FH_TX;
4531 		sc->sc_fw_chunk_done = 1;
4532 		wakeup(&sc->sc_fw);
4533 	}
4534 
4535 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4536 		handled |= IWM_CSR_INT_BIT_RF_KILL;
4537 		if (iwm_check_rfkill(sc)) {
4538 			device_printf(sc->sc_dev,
4539 			    "%s: rfkill switch, disabling interface\n",
4540 			    __func__);
4541 			iwm_stop(sc);
4542 		}
4543 	}
4544 
4545 	/*
4546 	 * The Linux driver uses periodic interrupts to avoid races.
4547 	 * We cargo-cult like it's going out of fashion.
4548 	 */
4549 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4550 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4551 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4552 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4553 			IWM_WRITE_1(sc,
4554 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4555 		isperiodic = 1;
4556 	}
4557 
4558 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4559 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4560 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4561 
4562 		iwm_notif_intr(sc);
4563 
4564 		/* enable periodic interrupt, see above */
4565 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4566 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4567 			    IWM_CSR_INT_PERIODIC_ENA);
4568 	}
4569 
4570 	if (__predict_false(r1 & ~handled))
4571 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4572 		    "%s: unhandled interrupts: %x\n", __func__, r1);
4573 	rv = 1;
4574 
4575  out_ena:
4576 	iwm_restore_interrupts(sc);
4577  out:
4578 	IWM_UNLOCK(sc);
4579 	return;
4580 }
4581 
4582 /*
4583  * Autoconf glue-sniffing
4584  */
4585 #define	PCI_VENDOR_INTEL		0x8086
4586 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
4587 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
4588 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
4589 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
4590 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
4591 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
4592 
4593 static const struct iwm_devices {
4594 	uint16_t	device;
4595 	const char	*name;
4596 } iwm_devices[] = {
4597 	{ PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4598 	{ PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4599 	{ PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4600 	{ PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4601 	{ PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4602 	{ PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4603 };
4604 
4605 static int
4606 iwm_probe(device_t dev)
4607 {
4608 	int i;
4609 
4610 	for (i = 0; i < nitems(iwm_devices); i++) {
4611 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4612 		    pci_get_device(dev) == iwm_devices[i].device) {
4613 			device_set_desc(dev, iwm_devices[i].name);
4614 			return (BUS_PROBE_DEFAULT);
4615 		}
4616 	}
4617 
4618 	return (ENXIO);
4619 }
4620 
4621 static int
4622 iwm_dev_check(device_t dev)
4623 {
4624 	struct iwm_softc *sc;
4625 
4626 	sc = device_get_softc(dev);
4627 
4628 	switch (pci_get_device(dev)) {
4629 	case PCI_PRODUCT_INTEL_WL_3160_1:
4630 	case PCI_PRODUCT_INTEL_WL_3160_2:
4631 		sc->sc_fwname = "iwm3160fw";
4632 		sc->host_interrupt_operation_mode = 1;
4633 		return (0);
4634 	case PCI_PRODUCT_INTEL_WL_7260_1:
4635 	case PCI_PRODUCT_INTEL_WL_7260_2:
4636 		sc->sc_fwname = "iwm7260fw";
4637 		sc->host_interrupt_operation_mode = 1;
4638 		return (0);
4639 	case PCI_PRODUCT_INTEL_WL_7265_1:
4640 	case PCI_PRODUCT_INTEL_WL_7265_2:
4641 		sc->sc_fwname = "iwm7265fw";
4642 		sc->host_interrupt_operation_mode = 0;
4643 		return (0);
4644 	default:
4645 		device_printf(dev, "unknown adapter type\n");
4646 		return ENXIO;
4647 	}
4648 }
4649 
4650 static int
4651 iwm_pci_attach(device_t dev)
4652 {
4653 	struct iwm_softc *sc;
4654 	int count, error, rid;
4655 	uint16_t reg;
4656 #if defined(__DragonFly__)
4657 	int irq_flags;
4658 #endif
4659 
4660 	sc = device_get_softc(dev);
4661 
4662 	/* Clear device-specific "PCI retry timeout" register (41h). */
4663 	reg = pci_read_config(dev, 0x40, sizeof(reg));
4664 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4665 
4666 	/* Enable bus-mastering and hardware bug workaround. */
4667 	pci_enable_busmaster(dev);
4668 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4669 	/* if !MSI */
4670 	if (reg & PCIM_STATUS_INTxSTATE) {
4671 		reg &= ~PCIM_STATUS_INTxSTATE;
4672 	}
4673 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4674 
4675 	rid = PCIR_BAR(0);
4676 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4677 	    RF_ACTIVE);
4678 	if (sc->sc_mem == NULL) {
4679 		device_printf(sc->sc_dev, "can't map mem space\n");
4680 		return (ENXIO);
4681 	}
4682 	sc->sc_st = rman_get_bustag(sc->sc_mem);
4683 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4684 
4685 	/* Install interrupt handler. */
4686 	count = 1;
4687 	rid = 0;
4688 #if defined(__DragonFly__)
4689 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
4690 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
4691 #else
4692 	if (pci_alloc_msi(dev, &count) == 0)
4693 		rid = 1;
4694 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4695 	    (rid != 0 ? 0 : RF_SHAREABLE));
4696 #endif
4697 	if (sc->sc_irq == NULL) {
4698 		device_printf(dev, "can't map interrupt\n");
4699 			return (ENXIO);
4700 	}
4701 #if defined(__DragonFly__)
4702 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
4703 			       iwm_intr, sc, &sc->sc_ih,
4704 			       &wlan_global_serializer);
4705 #else
4706 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4707 	    NULL, iwm_intr, sc, &sc->sc_ih);
4708 #endif
4709 	if (sc->sc_ih == NULL) {
4710 		device_printf(dev, "can't establish interrupt");
4711 #if defined(__DragonFly__)
4712                 pci_release_msi(dev);
4713 #endif
4714 			return (ENXIO);
4715 	}
4716 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4717 
4718 	return (0);
4719 }
4720 
4721 static void
4722 iwm_pci_detach(device_t dev)
4723 {
4724 	struct iwm_softc *sc = device_get_softc(dev);
4725 
4726 	if (sc->sc_irq != NULL) {
4727 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4728 		bus_release_resource(dev, SYS_RES_IRQ,
4729 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
4730 		pci_release_msi(dev);
4731 #if defined(__DragonFly__)
4732 		sc->sc_irq = NULL;
4733 #endif
4734         }
4735 	if (sc->sc_mem != NULL) {
4736 		bus_release_resource(dev, SYS_RES_MEMORY,
4737 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
4738 #if defined(__DragonFly__)
4739 		sc->sc_mem = NULL;
4740 #endif
4741 	}
4742 }
4743 
4744 
4745 
4746 static int
4747 iwm_attach(device_t dev)
4748 {
4749 	struct iwm_softc *sc = device_get_softc(dev);
4750 	struct ieee80211com *ic = &sc->sc_ic;
4751 	int error;
4752 	int txq_i, i;
4753 
4754 	sc->sc_dev = dev;
4755 	IWM_LOCK_INIT(sc);
4756 	mbufq_init(&sc->sc_snd, ifqmaxlen);
4757 #if defined(__DragonFly__)
4758 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
4759 #else
4760 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4761 #endif
4762 	callout_init(&sc->sc_led_blink_to);
4763 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4764 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4765             taskqueue_thread_enqueue, &sc->sc_tq);
4766 #if defined(__DragonFly__)
4767 	error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
4768 					-1, "iwm_taskq");
4769 #else
4770         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4771 #endif
4772         if (error != 0) {
4773                 device_printf(dev, "can't start threads, error %d\n",
4774 		    error);
4775 		goto fail;
4776         }
4777 
4778 	/* PCI attach */
4779 	error = iwm_pci_attach(dev);
4780 	if (error != 0)
4781 		goto fail;
4782 
4783 	sc->sc_wantresp = -1;
4784 
4785 	/* Check device type */
4786 	error = iwm_dev_check(dev);
4787 	if (error != 0)
4788 		goto fail;
4789 
4790 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4791 
4792 	/*
4793 	 * We now start fiddling with the hardware
4794 	 */
4795 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
4796 	if (iwm_prepare_card_hw(sc) != 0) {
4797 		device_printf(dev, "could not initialize hardware\n");
4798 		goto fail;
4799 	}
4800 
4801 	/* Allocate DMA memory for firmware transfers. */
4802 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
4803 		device_printf(dev, "could not allocate memory for firmware\n");
4804 		goto fail;
4805 	}
4806 
4807 	/* Allocate "Keep Warm" page. */
4808 	if ((error = iwm_alloc_kw(sc)) != 0) {
4809 		device_printf(dev, "could not allocate keep warm page\n");
4810 		goto fail;
4811 	}
4812 
4813 	/* We use ICT interrupts */
4814 	if ((error = iwm_alloc_ict(sc)) != 0) {
4815 		device_printf(dev, "could not allocate ICT table\n");
4816 		goto fail;
4817 	}
4818 
4819 	/* Allocate TX scheduler "rings". */
4820 	if ((error = iwm_alloc_sched(sc)) != 0) {
4821 		device_printf(dev, "could not allocate TX scheduler rings\n");
4822 		goto fail;
4823 	}
4824 
4825 	/* Allocate TX rings */
4826 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
4827 		if ((error = iwm_alloc_tx_ring(sc,
4828 		    &sc->txq[txq_i], txq_i)) != 0) {
4829 			device_printf(dev,
4830 			    "could not allocate TX ring %d\n",
4831 			    txq_i);
4832 			goto fail;
4833 		}
4834 	}
4835 
4836 	/* Allocate RX ring. */
4837 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
4838 		device_printf(dev, "could not allocate RX ring\n");
4839 		goto fail;
4840 	}
4841 
4842 	/* Clear pending interrupts. */
4843 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
4844 
4845 	ic->ic_softc = sc;
4846 	ic->ic_name = device_get_nameunit(sc->sc_dev);
4847 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
4848 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
4849 
4850 	/* Set device capabilities. */
4851 	ic->ic_caps =
4852 	    IEEE80211_C_STA |
4853 	    IEEE80211_C_WPA |		/* WPA/RSN */
4854 	    IEEE80211_C_WME |
4855 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
4856 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
4857 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
4858 	    ;
4859 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
4860 		sc->sc_phyctxt[i].id = i;
4861 		sc->sc_phyctxt[i].color = 0;
4862 		sc->sc_phyctxt[i].ref = 0;
4863 		sc->sc_phyctxt[i].channel = NULL;
4864 	}
4865 
4866 	/* Max RSSI */
4867 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
4868 	sc->sc_preinit_hook.ich_func = iwm_preinit;
4869 	sc->sc_preinit_hook.ich_arg = sc;
4870 	sc->sc_preinit_hook.ich_desc = "iwm";
4871 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
4872 		device_printf(dev, "config_intrhook_establish failed\n");
4873 		goto fail;
4874 	}
4875 
4876 #ifdef IWM_DEBUG
4877 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4878 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
4879 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
4880 #endif
4881 
4882 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4883 	    "<-%s\n", __func__);
4884 
4885 	return 0;
4886 
4887 	/* Free allocated memory if something failed during attachment. */
4888 fail:
4889 	iwm_detach_local(sc, 0);
4890 
4891 	return ENXIO;
4892 }
4893 
4894 static int
4895 iwm_update_edca(struct ieee80211com *ic)
4896 {
4897 	struct iwm_softc *sc = ic->ic_softc;
4898 
4899 	device_printf(sc->sc_dev, "%s: called\n", __func__);
4900 	return (0);
4901 }
4902 
4903 static void
4904 iwm_preinit(void *arg)
4905 {
4906 	struct iwm_softc *sc = arg;
4907 	device_t dev = sc->sc_dev;
4908 	struct ieee80211com *ic = &sc->sc_ic;
4909 	int error;
4910 
4911 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4912 	    "->%s\n", __func__);
4913 
4914 	IWM_LOCK(sc);
4915 	if ((error = iwm_start_hw(sc)) != 0) {
4916 		device_printf(dev, "could not initialize hardware\n");
4917 		IWM_UNLOCK(sc);
4918 		goto fail;
4919 	}
4920 
4921 	error = iwm_run_init_mvm_ucode(sc, 1);
4922 	iwm_stop_device(sc);
4923 	if (error) {
4924 		IWM_UNLOCK(sc);
4925 		goto fail;
4926 	}
4927 	device_printf(dev,
4928 	    "revision 0x%x, firmware %d.%d (API ver. %d)\n",
4929 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
4930 	    IWM_UCODE_MAJOR(sc->sc_fwver),
4931 	    IWM_UCODE_MINOR(sc->sc_fwver),
4932 	    IWM_UCODE_API(sc->sc_fwver));
4933 
4934 	/* not all hardware can do 5GHz band */
4935 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
4936 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
4937 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
4938 	IWM_UNLOCK(sc);
4939 
4940 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
4941 	    ic->ic_channels);
4942 
4943 	/*
4944 	 * At this point we've committed - if we fail to do setup,
4945 	 * we now also have to tear down the net80211 state.
4946 	 */
4947 	ieee80211_ifattach(ic);
4948 	ic->ic_vap_create = iwm_vap_create;
4949 	ic->ic_vap_delete = iwm_vap_delete;
4950 	ic->ic_raw_xmit = iwm_raw_xmit;
4951 	ic->ic_node_alloc = iwm_node_alloc;
4952 	ic->ic_scan_start = iwm_scan_start;
4953 	ic->ic_scan_end = iwm_scan_end;
4954 	ic->ic_update_mcast = iwm_update_mcast;
4955 	ic->ic_getradiocaps = iwm_init_channel_map;
4956 	ic->ic_set_channel = iwm_set_channel;
4957 	ic->ic_scan_curchan = iwm_scan_curchan;
4958 	ic->ic_scan_mindwell = iwm_scan_mindwell;
4959 	ic->ic_wme.wme_update = iwm_update_edca;
4960 	ic->ic_parent = iwm_parent;
4961 	ic->ic_transmit = iwm_transmit;
4962 	iwm_radiotap_attach(sc);
4963 	if (bootverbose)
4964 		ieee80211_announce(ic);
4965 
4966 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4967 	    "<-%s\n", __func__);
4968 	config_intrhook_disestablish(&sc->sc_preinit_hook);
4969 
4970 	return;
4971 fail:
4972 	config_intrhook_disestablish(&sc->sc_preinit_hook);
4973 	iwm_detach_local(sc, 0);
4974 }
4975 
4976 /*
4977  * Attach the interface to 802.11 radiotap.
4978  */
4979 static void
4980 iwm_radiotap_attach(struct iwm_softc *sc)
4981 {
4982         struct ieee80211com *ic = &sc->sc_ic;
4983 
4984 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4985 	    "->%s begin\n", __func__);
4986         ieee80211_radiotap_attach(ic,
4987             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
4988                 IWM_TX_RADIOTAP_PRESENT,
4989             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
4990                 IWM_RX_RADIOTAP_PRESENT);
4991 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4992 	    "->%s end\n", __func__);
4993 }
4994 
4995 static struct ieee80211vap *
4996 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
4997     enum ieee80211_opmode opmode, int flags,
4998     const uint8_t bssid[IEEE80211_ADDR_LEN],
4999     const uint8_t mac[IEEE80211_ADDR_LEN])
5000 {
5001 	struct iwm_vap *ivp;
5002 	struct ieee80211vap *vap;
5003 
5004 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
5005 		return NULL;
5006 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
5007 	vap = &ivp->iv_vap;
5008 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
5009 	vap->iv_bmissthreshold = 10;            /* override default */
5010 	/* Override with driver methods. */
5011 	ivp->iv_newstate = vap->iv_newstate;
5012 	vap->iv_newstate = iwm_newstate;
5013 
5014 	ieee80211_ratectl_init(vap);
5015 	/* Complete setup. */
5016 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
5017 	    mac);
5018 	ic->ic_opmode = opmode;
5019 
5020 	return vap;
5021 }
5022 
5023 static void
5024 iwm_vap_delete(struct ieee80211vap *vap)
5025 {
5026 	struct iwm_vap *ivp = IWM_VAP(vap);
5027 
5028 	ieee80211_ratectl_deinit(vap);
5029 	ieee80211_vap_detach(vap);
5030 	kfree(ivp, M_80211_VAP);
5031 }
5032 
5033 static void
5034 iwm_scan_start(struct ieee80211com *ic)
5035 {
5036 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5037 	struct iwm_softc *sc = ic->ic_softc;
5038 	int error;
5039 
5040 	if (sc->sc_scanband)
5041 		return;
5042 	IWM_LOCK(sc);
5043 	error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
5044 	if (error) {
5045 		device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
5046 		IWM_UNLOCK(sc);
5047 		ieee80211_cancel_scan(vap);
5048 		sc->sc_scanband = 0;
5049 	} else {
5050 		iwm_led_blink_start(sc);
5051 		IWM_UNLOCK(sc);
5052 	}
5053 }
5054 
5055 static void
5056 iwm_scan_end(struct ieee80211com *ic)
5057 {
5058 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5059 	struct iwm_softc *sc = ic->ic_softc;
5060 
5061 	IWM_LOCK(sc);
5062 	iwm_led_blink_stop(sc);
5063 	if (vap->iv_state == IEEE80211_S_RUN)
5064 		iwm_mvm_led_enable(sc);
5065 	IWM_UNLOCK(sc);
5066 }
5067 
5068 static void
5069 iwm_update_mcast(struct ieee80211com *ic)
5070 {
5071 }
5072 
5073 static void
5074 iwm_set_channel(struct ieee80211com *ic)
5075 {
5076 }
5077 
5078 static void
5079 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5080 {
5081 }
5082 
5083 static void
5084 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
5085 {
5086 	return;
5087 }
5088 
5089 void
5090 iwm_init_task(void *arg1)
5091 {
5092 	struct iwm_softc *sc = arg1;
5093 
5094 	IWM_LOCK(sc);
5095 	while (sc->sc_flags & IWM_FLAG_BUSY) {
5096 #if defined(__DragonFly__)
5097 		iwmsleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
5098 #else
5099 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
5100 #endif
5101 }
5102 	sc->sc_flags |= IWM_FLAG_BUSY;
5103 	iwm_stop(sc);
5104 	if (sc->sc_ic.ic_nrunning > 0)
5105 		iwm_init(sc);
5106 	sc->sc_flags &= ~IWM_FLAG_BUSY;
5107 	wakeup(&sc->sc_flags);
5108 	IWM_UNLOCK(sc);
5109 }
5110 
5111 static int
5112 iwm_resume(device_t dev)
5113 {
5114 	struct iwm_softc *sc = device_get_softc(dev);
5115 	int do_reinit = 0;
5116 	uint16_t reg;
5117 
5118 	/* Clear device-specific "PCI retry timeout" register (41h). */
5119 	reg = pci_read_config(dev, 0x40, sizeof(reg));
5120 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5121 	iwm_init_task(device_get_softc(dev));
5122 
5123 	IWM_LOCK(sc);
5124 	if (sc->sc_flags & IWM_FLAG_DORESUME) {
5125 		sc->sc_flags &= ~IWM_FLAG_DORESUME;
5126 		do_reinit = 1;
5127 	}
5128 	IWM_UNLOCK(sc);
5129 
5130 	if (do_reinit)
5131 		ieee80211_resume_all(&sc->sc_ic);
5132 
5133 	return 0;
5134 }
5135 
5136 static int
5137 iwm_suspend(device_t dev)
5138 {
5139 	int do_stop = 0;
5140 	struct iwm_softc *sc = device_get_softc(dev);
5141 
5142 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
5143 
5144 	ieee80211_suspend_all(&sc->sc_ic);
5145 
5146 	if (do_stop) {
5147 		IWM_LOCK(sc);
5148 		iwm_stop(sc);
5149 		sc->sc_flags |= IWM_FLAG_DORESUME;
5150 		IWM_UNLOCK(sc);
5151 	}
5152 
5153 	return (0);
5154 }
5155 
5156 static int
5157 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
5158 {
5159 	struct iwm_fw_info *fw = &sc->sc_fw;
5160 	device_t dev = sc->sc_dev;
5161 	int i;
5162 
5163 	if (sc->sc_tq) {
5164 #if defined(__DragonFly__)
5165 		/* doesn't exist for DFly, DFly drains tasks on free */
5166 #else
5167 		taskqueue_drain_all(sc->sc_tq);
5168 #endif
5169 		taskqueue_free(sc->sc_tq);
5170 #if defined(__DragonFly__)
5171 		sc->sc_tq = NULL;
5172 #endif
5173 	}
5174 	callout_drain(&sc->sc_led_blink_to);
5175 	callout_drain(&sc->sc_watchdog_to);
5176 	iwm_stop_device(sc);
5177 	if (do_net80211) {
5178 		ieee80211_ifdetach(&sc->sc_ic);
5179 	}
5180 
5181 	/* Free descriptor rings */
5182 	for (i = 0; i < nitems(sc->txq); i++)
5183 		iwm_free_tx_ring(sc, &sc->txq[i]);
5184 
5185 	/* Free firmware */
5186 	if (fw->fw_fp != NULL)
5187 		iwm_fw_info_free(fw);
5188 
5189 	/* Free scheduler */
5190 	iwm_free_sched(sc);
5191 	if (sc->ict_dma.vaddr != NULL)
5192 		iwm_free_ict(sc);
5193 	if (sc->kw_dma.vaddr != NULL)
5194 		iwm_free_kw(sc);
5195 	if (sc->fw_dma.vaddr != NULL)
5196 		iwm_free_fwmem(sc);
5197 
5198 	/* Finished with the hardware - detach things */
5199 	iwm_pci_detach(dev);
5200 
5201 	mbufq_drain(&sc->sc_snd);
5202 	IWM_LOCK_DESTROY(sc);
5203 
5204 	return (0);
5205 }
5206 
5207 static int
5208 iwm_detach(device_t dev)
5209 {
5210 	struct iwm_softc *sc = device_get_softc(dev);
5211 
5212 	return (iwm_detach_local(sc, 1));
5213 }
5214 
5215 static device_method_t iwm_pci_methods[] = {
5216         /* Device interface */
5217         DEVMETHOD(device_probe,         iwm_probe),
5218         DEVMETHOD(device_attach,        iwm_attach),
5219         DEVMETHOD(device_detach,        iwm_detach),
5220         DEVMETHOD(device_suspend,       iwm_suspend),
5221         DEVMETHOD(device_resume,        iwm_resume),
5222 
5223         DEVMETHOD_END
5224 };
5225 
5226 static driver_t iwm_pci_driver = {
5227         "iwm",
5228         iwm_pci_methods,
5229         sizeof (struct iwm_softc)
5230 };
5231 
5232 static devclass_t iwm_devclass;
5233 
5234 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5235 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5236 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5237 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
5238