xref: /dragonfly/sys/dev/netif/iwm/if_iwm.c (revision 277350a0)
1 /*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *				DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *	 changes to remove per-device network interface (DragonFly has not
110  *	 caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *	malloc -> kmalloc	(in particular, changing improper M_NOWAIT
114  *				specifications to M_INTWAIT.  We still don't
115  *				understand why FreeBSD uses M_NOWAIT for
116  *				critical must-not-fail kmalloc()s).
117  *	free -> kfree
118  *	printf -> kprintf
119  *	(bug fix) memset in iwm_reset_rx_ring.
120  *	(debug)   added several kprintf()s on error
121  *
122  *	header file paths (DFly allows localized path specifications).
123  *	minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *	(safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *	packet counters
128  *	msleep -> lksleep
129  *	mtx -> lk  (mtx functions -> lockmgr functions)
130  *	callout differences
131  *	taskqueue differences
132  *	MSI differences
133  *	bus_setup_intr() differences
134  *	minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138 
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/mutex.h>
147 #include <sys/module.h>
148 #include <sys/proc.h>
149 #include <sys/rman.h>
150 #include <sys/socket.h>
151 #include <sys/sockio.h>
152 #include <sys/sysctl.h>
153 #include <sys/linker.h>
154 
155 #include <machine/endian.h>
156 
157 #include <bus/pci/pcivar.h>
158 #include <bus/pci/pcireg.h>
159 
160 #include <net/bpf.h>
161 
162 #include <net/if.h>
163 #include <net/if_var.h>
164 #include <net/if_arp.h>
165 #include <net/if_dl.h>
166 #include <net/if_media.h>
167 #include <net/if_types.h>
168 
169 #include <netinet/in.h>
170 #include <netinet/in_systm.h>
171 #include <netinet/if_ether.h>
172 #include <netinet/ip.h>
173 
174 #include <netproto/802_11/ieee80211_var.h>
175 #include <netproto/802_11/ieee80211_regdomain.h>
176 #include <netproto/802_11/ieee80211_ratectl.h>
177 #include <netproto/802_11/ieee80211_radiotap.h>
178 
179 #include "if_iwmreg.h"
180 #include "if_iwmvar.h"
181 #include "if_iwm_debug.h"
182 #include "if_iwm_util.h"
183 #include "if_iwm_binding.h"
184 #include "if_iwm_phy_db.h"
185 #include "if_iwm_mac_ctxt.h"
186 #include "if_iwm_phy_ctxt.h"
187 #include "if_iwm_time_event.h"
188 #include "if_iwm_power.h"
189 #include "if_iwm_scan.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 
193 const uint8_t iwm_nvm_channels[] = {
194 	/* 2.4 GHz */
195 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196 	/* 5 GHz */
197 	36, 40, 44, 48, 52, 56, 60, 64,
198 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
199 	149, 153, 157, 161, 165
200 };
201 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
202     "IWM_NUM_CHANNELS is too small");
203 
204 const uint8_t iwm_nvm_channels_8000[] = {
205 	/* 2.4 GHz */
206 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
207 	/* 5 GHz */
208 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
209 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
210 	149, 153, 157, 161, 165, 169, 173, 177, 181
211 };
212 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
213     "IWM_NUM_CHANNELS_8000 is too small");
214 
215 #define IWM_NUM_2GHZ_CHANNELS	14
216 #define IWM_N_HW_ADDR_MASK	0xF
217 
218 /*
219  * XXX For now, there's simply a fixed set of rate table entries
220  * that are populated.
221  */
222 const struct iwm_rate {
223 	uint8_t rate;
224 	uint8_t plcp;
225 } iwm_rates[] = {
226 	{   2,	IWM_RATE_1M_PLCP  },
227 	{   4,	IWM_RATE_2M_PLCP  },
228 	{  11,	IWM_RATE_5M_PLCP  },
229 	{  22,	IWM_RATE_11M_PLCP },
230 	{  12,	IWM_RATE_6M_PLCP  },
231 	{  18,	IWM_RATE_9M_PLCP  },
232 	{  24,	IWM_RATE_12M_PLCP },
233 	{  36,	IWM_RATE_18M_PLCP },
234 	{  48,	IWM_RATE_24M_PLCP },
235 	{  72,	IWM_RATE_36M_PLCP },
236 	{  96,	IWM_RATE_48M_PLCP },
237 	{ 108,	IWM_RATE_54M_PLCP },
238 };
239 #define IWM_RIDX_CCK	0
240 #define IWM_RIDX_OFDM	4
241 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
242 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
243 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
244 
245 struct iwm_nvm_section {
246 	uint16_t length;
247 	uint8_t *data;
248 };
249 
250 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
251 static int	iwm_firmware_store_section(struct iwm_softc *,
252                                            enum iwm_ucode_type,
253                                            const uint8_t *, size_t);
254 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
255 static void	iwm_fw_info_free(struct iwm_fw_info *);
256 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
257 #if !defined(__DragonFly__)
258 static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
259 #endif
260 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
261                                      bus_size_t, bus_size_t);
262 static void	iwm_dma_contig_free(struct iwm_dma_info *);
263 static int	iwm_alloc_fwmem(struct iwm_softc *);
264 static void	iwm_free_fwmem(struct iwm_softc *);
265 static int	iwm_alloc_sched(struct iwm_softc *);
266 static void	iwm_free_sched(struct iwm_softc *);
267 static int	iwm_alloc_kw(struct iwm_softc *);
268 static void	iwm_free_kw(struct iwm_softc *);
269 static int	iwm_alloc_ict(struct iwm_softc *);
270 static void	iwm_free_ict(struct iwm_softc *);
271 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void	iwm_disable_rx_dma(struct iwm_softc *);
273 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
276                                   int);
277 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void	iwm_enable_interrupts(struct iwm_softc *);
280 static void	iwm_restore_interrupts(struct iwm_softc *);
281 static void	iwm_disable_interrupts(struct iwm_softc *);
282 static void	iwm_ict_reset(struct iwm_softc *);
283 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
284 static void	iwm_stop_device(struct iwm_softc *);
285 static void	iwm_mvm_nic_config(struct iwm_softc *);
286 static int	iwm_nic_rx_init(struct iwm_softc *);
287 static int	iwm_nic_tx_init(struct iwm_softc *);
288 static int	iwm_nic_init(struct iwm_softc *);
289 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
290 static int	iwm_post_alive(struct iwm_softc *);
291 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292                                    uint16_t, uint8_t *, uint16_t *);
293 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294 				     uint16_t *, size_t);
295 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
296 static void	iwm_add_channel_band(struct iwm_softc *,
297 		    struct ieee80211_channel[], int, int *, int, size_t,
298 		    const uint8_t[]);
299 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
300 		    struct ieee80211_channel[]);
301 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
302 				   const uint16_t *, const uint16_t *,
303 				   const uint16_t *, const uint16_t *,
304 				   const uint16_t *);
305 static void	iwm_set_hw_address_8000(struct iwm_softc *,
306 					struct iwm_nvm_data *,
307 					const uint16_t *, const uint16_t *);
308 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
309 			    const uint16_t *);
310 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
311 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
312 				  const uint16_t *);
313 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
314 				   const const uint16_t *);
315 static void	iwm_set_radio_cfg(const struct iwm_softc *,
316 				  struct iwm_nvm_data *, uint32_t);
317 static int	iwm_parse_nvm_sections(struct iwm_softc *,
318                                        struct iwm_nvm_section *);
319 static int	iwm_nvm_init(struct iwm_softc *);
320 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
321                                        const uint8_t *, uint32_t);
322 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
323                                         const uint8_t *, uint32_t);
324 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
325 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
326 					   struct iwm_fw_sects *, int , int *);
327 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
328 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
329 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
330 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
331 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
332 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
333                                               enum iwm_ucode_type);
334 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
335 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
336 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
337 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
338 					    struct iwm_rx_phy_info *);
339 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
340                                       struct iwm_rx_packet *,
341                                       struct iwm_rx_data *);
342 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
343 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
344                                    struct iwm_rx_data *);
345 static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
346                                          struct iwm_rx_packet *,
347 				         struct iwm_node *);
348 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
349                                   struct iwm_rx_data *);
350 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
351 #if 0
352 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
353                                  uint16_t);
354 #endif
355 static const struct iwm_rate *
356 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
357 			struct ieee80211_frame *, struct iwm_tx_cmd *);
358 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
359                        struct ieee80211_node *, int);
360 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
361 			     const struct ieee80211_bpf_params *);
362 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
363 					        struct iwm_mvm_add_sta_cmd_v7 *,
364                                                 int *);
365 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
366                                        int);
367 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
368 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
369 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
370                                            struct iwm_int_sta *,
371 				           const uint8_t *, uint16_t, uint16_t);
372 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
373 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
374 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
375 static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
376 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
377 static struct ieee80211_node *
378 		iwm_node_alloc(struct ieee80211vap *,
379 		               const uint8_t[IEEE80211_ADDR_LEN]);
380 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
381 static int	iwm_media_change(struct ifnet *);
382 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
383 static void	iwm_endscan_cb(void *, int);
384 static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
385 					struct iwm_sf_cfg_cmd *,
386 					struct ieee80211_node *);
387 static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
388 static int	iwm_send_bt_init_conf(struct iwm_softc *);
389 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
390 static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
391 static int	iwm_init_hw(struct iwm_softc *);
392 static void	iwm_init(struct iwm_softc *);
393 static void	iwm_start(struct iwm_softc *);
394 static void	iwm_stop(struct iwm_softc *);
395 static void	iwm_watchdog(void *);
396 static void	iwm_parent(struct ieee80211com *);
397 #ifdef IWM_DEBUG
398 static const char *
399 		iwm_desc_lookup(uint32_t);
400 static void	iwm_nic_error(struct iwm_softc *);
401 static void	iwm_nic_umac_error(struct iwm_softc *);
402 #endif
403 static void	iwm_notif_intr(struct iwm_softc *);
404 static void	iwm_intr(void *);
405 static int	iwm_attach(device_t);
406 static int	iwm_is_valid_ether_addr(uint8_t *);
407 static void	iwm_preinit(void *);
408 static int	iwm_detach_local(struct iwm_softc *sc, int);
409 static void	iwm_init_task(void *);
410 static void	iwm_radiotap_attach(struct iwm_softc *);
411 static struct ieee80211vap *
412 		iwm_vap_create(struct ieee80211com *,
413 		               const char [IFNAMSIZ], int,
414 		               enum ieee80211_opmode, int,
415 		               const uint8_t [IEEE80211_ADDR_LEN],
416 		               const uint8_t [IEEE80211_ADDR_LEN]);
417 static void	iwm_vap_delete(struct ieee80211vap *);
418 static void	iwm_scan_start(struct ieee80211com *);
419 static void	iwm_scan_end(struct ieee80211com *);
420 static void	iwm_update_mcast(struct ieee80211com *);
421 static void	iwm_set_channel(struct ieee80211com *);
422 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
423 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
424 static int	iwm_detach(device_t);
425 
426 #if defined(__DragonFly__)
427 static int	iwm_msi_enable = 1;
428 
429 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
430 
431 #endif
432 
433 /*
434  * Firmware parser.
435  */
436 
437 static int
438 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
439 {
440 	const struct iwm_fw_cscheme_list *l = (const void *)data;
441 
442 	if (dlen < sizeof(*l) ||
443 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
444 		return EINVAL;
445 
446 	/* we don't actually store anything for now, always use s/w crypto */
447 
448 	return 0;
449 }
450 
451 static int
452 iwm_firmware_store_section(struct iwm_softc *sc,
453     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
454 {
455 	struct iwm_fw_sects *fws;
456 	struct iwm_fw_onesect *fwone;
457 
458 	if (type >= IWM_UCODE_TYPE_MAX)
459 		return EINVAL;
460 	if (dlen < sizeof(uint32_t))
461 		return EINVAL;
462 
463 	fws = &sc->sc_fw.fw_sects[type];
464 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
465 		return EINVAL;
466 
467 	fwone = &fws->fw_sect[fws->fw_count];
468 
469 	/* first 32bit are device load offset */
470 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
471 
472 	/* rest is data */
473 	fwone->fws_data = data + sizeof(uint32_t);
474 	fwone->fws_len = dlen - sizeof(uint32_t);
475 
476 	fws->fw_count++;
477 	fws->fw_totlen += fwone->fws_len;
478 
479 	return 0;
480 }
481 
482 struct iwm_tlv_calib_data {
483 	uint32_t ucode_type;
484 	struct iwm_tlv_calib_ctrl calib;
485 } __packed;
486 
487 static int
488 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
489 {
490 	const struct iwm_tlv_calib_data *def_calib = data;
491 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
492 
493 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
494 		device_printf(sc->sc_dev,
495 		    "Wrong ucode_type %u for default "
496 		    "calibration.\n", ucode_type);
497 		return EINVAL;
498 	}
499 
500 	sc->sc_default_calib[ucode_type].flow_trigger =
501 	    def_calib->calib.flow_trigger;
502 	sc->sc_default_calib[ucode_type].event_trigger =
503 	    def_calib->calib.event_trigger;
504 
505 	return 0;
506 }
507 
508 static void
509 iwm_fw_info_free(struct iwm_fw_info *fw)
510 {
511 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
512 	fw->fw_fp = NULL;
513 	/* don't touch fw->fw_status */
514 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
515 }
516 
517 static int
518 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
519 {
520 	struct iwm_fw_info *fw = &sc->sc_fw;
521 	const struct iwm_tlv_ucode_header *uhdr;
522 	struct iwm_ucode_tlv tlv;
523 	enum iwm_ucode_tlv_type tlv_type;
524 	const struct firmware *fwp;
525 	const uint8_t *data;
526 	int error = 0;
527 	size_t len;
528 
529 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
530 	    ucode_type != IWM_UCODE_TYPE_INIT)
531 		return 0;
532 
533 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
534 #if defined(__DragonFly__)
535 		lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
536 #else
537 		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
538 #endif
539 	}
540 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
541 
542 	if (fw->fw_fp != NULL)
543 		iwm_fw_info_free(fw);
544 
545 	/*
546 	 * Load firmware into driver memory.
547 	 * fw_fp will be set.
548 	 */
549 	IWM_UNLOCK(sc);
550 	fwp = firmware_get(sc->sc_fwname);
551 	IWM_LOCK(sc);
552 	if (fwp == NULL) {
553 		device_printf(sc->sc_dev,
554 		    "could not read firmware %s (error %d)\n",
555 		    sc->sc_fwname, error);
556 		goto out;
557 	}
558 	fw->fw_fp = fwp;
559 
560 	/* (Re-)Initialize default values. */
561 	sc->sc_capaflags = 0;
562 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
563 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
564 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
565 
566 	/*
567 	 * Parse firmware contents
568 	 */
569 
570 	uhdr = (const void *)fw->fw_fp->data;
571 	if (*(const uint32_t *)fw->fw_fp->data != 0
572 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
573 		device_printf(sc->sc_dev, "invalid firmware %s\n",
574 		    sc->sc_fwname);
575 		error = EINVAL;
576 		goto out;
577 	}
578 
579 	ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
580 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
581 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
582 	    IWM_UCODE_API(le32toh(uhdr->ver)));
583 	data = uhdr->data;
584 	len = fw->fw_fp->datasize - sizeof(*uhdr);
585 
586 	while (len >= sizeof(tlv)) {
587 		size_t tlv_len;
588 		const void *tlv_data;
589 
590 		memcpy(&tlv, data, sizeof(tlv));
591 		tlv_len = le32toh(tlv.length);
592 		tlv_type = le32toh(tlv.type);
593 
594 		len -= sizeof(tlv);
595 		data += sizeof(tlv);
596 		tlv_data = data;
597 
598 		if (len < tlv_len) {
599 			device_printf(sc->sc_dev,
600 			    "firmware too short: %zu bytes\n",
601 			    len);
602 			error = EINVAL;
603 			goto parse_out;
604 		}
605 
606 		switch ((int)tlv_type) {
607 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
608 			if (tlv_len < sizeof(uint32_t)) {
609 				device_printf(sc->sc_dev,
610 				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
611 				    __func__,
612 				    (int) tlv_len);
613 				error = EINVAL;
614 				goto parse_out;
615 			}
616 			sc->sc_capa_max_probe_len
617 			    = le32toh(*(const uint32_t *)tlv_data);
618 			/* limit it to something sensible */
619 			if (sc->sc_capa_max_probe_len >
620 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
621 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
622 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
623 				    "ridiculous\n", __func__);
624 				error = EINVAL;
625 				goto parse_out;
626 			}
627 			break;
628 		case IWM_UCODE_TLV_PAN:
629 			if (tlv_len) {
630 				device_printf(sc->sc_dev,
631 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
632 				    __func__,
633 				    (int) tlv_len);
634 				error = EINVAL;
635 				goto parse_out;
636 			}
637 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
638 			break;
639 		case IWM_UCODE_TLV_FLAGS:
640 			if (tlv_len < sizeof(uint32_t)) {
641 				device_printf(sc->sc_dev,
642 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
643 				    __func__,
644 				    (int) tlv_len);
645 				error = EINVAL;
646 				goto parse_out;
647 			}
648 			/*
649 			 * Apparently there can be many flags, but Linux driver
650 			 * parses only the first one, and so do we.
651 			 *
652 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
653 			 * Intentional or a bug?  Observations from
654 			 * current firmware file:
655 			 *  1) TLV_PAN is parsed first
656 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
657 			 * ==> this resets TLV_PAN to itself... hnnnk
658 			 */
659 			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
660 			break;
661 		case IWM_UCODE_TLV_CSCHEME:
662 			if ((error = iwm_store_cscheme(sc,
663 			    tlv_data, tlv_len)) != 0) {
664 				device_printf(sc->sc_dev,
665 				    "%s: iwm_store_cscheme(): returned %d\n",
666 				    __func__,
667 				    error);
668 				goto parse_out;
669 			}
670 			break;
671 		case IWM_UCODE_TLV_NUM_OF_CPU: {
672 			uint32_t num_cpu;
673 			if (tlv_len != sizeof(uint32_t)) {
674 				device_printf(sc->sc_dev,
675 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
676 				    __func__,
677 				    (int) tlv_len);
678 				error = EINVAL;
679 				goto parse_out;
680 			}
681 			num_cpu = le32toh(*(const uint32_t *)tlv_data);
682 			if (num_cpu < 1 || num_cpu > 2) {
683 				device_printf(sc->sc_dev,
684 				    "%s: Driver supports only 1 or 2 CPUs\n",
685 				    __func__);
686 				error = EINVAL;
687 				goto parse_out;
688 			}
689 			break;
690 		}
691 		case IWM_UCODE_TLV_SEC_RT:
692 			if ((error = iwm_firmware_store_section(sc,
693 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
694 				device_printf(sc->sc_dev,
695 				    "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
696 				    __func__,
697 				    error);
698 				goto parse_out;
699 			}
700 			break;
701 		case IWM_UCODE_TLV_SEC_INIT:
702 			if ((error = iwm_firmware_store_section(sc,
703 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
704 				device_printf(sc->sc_dev,
705 				    "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
706 				    __func__,
707 				    error);
708 				goto parse_out;
709 			}
710 			break;
711 		case IWM_UCODE_TLV_SEC_WOWLAN:
712 			if ((error = iwm_firmware_store_section(sc,
713 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
714 				device_printf(sc->sc_dev,
715 				    "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
716 				    __func__,
717 				    error);
718 				goto parse_out;
719 			}
720 			break;
721 		case IWM_UCODE_TLV_DEF_CALIB:
722 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
723 				device_printf(sc->sc_dev,
724 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
725 				    __func__,
726 				    (int) tlv_len,
727 				    (int) sizeof(struct iwm_tlv_calib_data));
728 				error = EINVAL;
729 				goto parse_out;
730 			}
731 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
732 				device_printf(sc->sc_dev,
733 				    "%s: iwm_set_default_calib() failed: %d\n",
734 				    __func__,
735 				    error);
736 				goto parse_out;
737 			}
738 			break;
739 		case IWM_UCODE_TLV_PHY_SKU:
740 			if (tlv_len != sizeof(uint32_t)) {
741 				error = EINVAL;
742 				device_printf(sc->sc_dev,
743 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
744 				    __func__,
745 				    (int) tlv_len);
746 				goto parse_out;
747 			}
748 			sc->sc_fw_phy_config =
749 			    le32toh(*(const uint32_t *)tlv_data);
750 			break;
751 
752 		case IWM_UCODE_TLV_API_CHANGES_SET: {
753 			const struct iwm_ucode_api *api;
754 			if (tlv_len != sizeof(*api)) {
755 				error = EINVAL;
756 				goto parse_out;
757 			}
758 			api = (const struct iwm_ucode_api *)tlv_data;
759 			/* Flags may exceed 32 bits in future firmware. */
760 			if (le32toh(api->api_index) > 0) {
761 				device_printf(sc->sc_dev,
762 				    "unsupported API index %d\n",
763 				    le32toh(api->api_index));
764 				goto parse_out;
765 			}
766 			sc->sc_ucode_api = le32toh(api->api_flags);
767 			break;
768 		}
769 
770 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
771 			const struct iwm_ucode_capa *capa;
772 			int idx, i;
773 			if (tlv_len != sizeof(*capa)) {
774 				error = EINVAL;
775 				goto parse_out;
776 			}
777 			capa = (const struct iwm_ucode_capa *)tlv_data;
778 			idx = le32toh(capa->api_index);
779 			if (idx > howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
780 				device_printf(sc->sc_dev,
781 				    "unsupported API index %d\n", idx);
782 				goto parse_out;
783 			}
784 			for (i = 0; i < 32; i++) {
785 				if ((le32toh(capa->api_capa) & (1U << i)) == 0)
786 					continue;
787 				setbit(sc->sc_enabled_capa, i + (32 * idx));
788 			}
789 			break;
790 		}
791 
792 		case 48: /* undocumented TLV */
793 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
794 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
795 			/* ignore, not used by current driver */
796 			break;
797 
798 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
799 			if ((error = iwm_firmware_store_section(sc,
800 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
801 			    tlv_len)) != 0)
802 				goto parse_out;
803 			break;
804 
805 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
806 			if (tlv_len != sizeof(uint32_t)) {
807 				error = EINVAL;
808 				goto parse_out;
809 			}
810 			sc->sc_capa_n_scan_channels =
811 			  le32toh(*(const uint32_t *)tlv_data);
812 			break;
813 
814 		case IWM_UCODE_TLV_FW_VERSION:
815 			if (tlv_len != sizeof(uint32_t) * 3) {
816 				error = EINVAL;
817 				goto parse_out;
818 			}
819 			ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
820 			    "%d.%d.%d",
821 			    le32toh(((const uint32_t *)tlv_data)[0]),
822 			    le32toh(((const uint32_t *)tlv_data)[1]),
823 			    le32toh(((const uint32_t *)tlv_data)[2]));
824 			break;
825 
826 		default:
827 			device_printf(sc->sc_dev,
828 			    "%s: unknown firmware section %d, abort\n",
829 			    __func__, tlv_type);
830 			error = EINVAL;
831 			goto parse_out;
832 		}
833 
834 		len -= roundup(tlv_len, 4);
835 		data += roundup(tlv_len, 4);
836 	}
837 
838 	KASSERT(error == 0, ("unhandled error"));
839 
840  parse_out:
841 	if (error) {
842 		device_printf(sc->sc_dev, "firmware parse error %d, "
843 		    "section type %d\n", error, tlv_type);
844 	}
845 
846 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
847 		device_printf(sc->sc_dev,
848 		    "device uses unsupported power ops\n");
849 		error = ENOTSUP;
850 	}
851 
852  out:
853 	if (error) {
854 		fw->fw_status = IWM_FW_STATUS_NONE;
855 		if (fw->fw_fp != NULL)
856 			iwm_fw_info_free(fw);
857 	} else
858 		fw->fw_status = IWM_FW_STATUS_DONE;
859 	wakeup(&sc->sc_fw);
860 
861 	return error;
862 }
863 
864 /*
865  * DMA resource routines
866  */
867 
868 #if !defined(__DragonFly__)
869 static void
870 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
871 {
872         if (error != 0)
873                 return;
874 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
875 	*(bus_addr_t *)arg = segs[0].ds_addr;
876 }
877 #endif
878 
879 static int
880 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
881     bus_size_t size, bus_size_t alignment)
882 {
883 	int error;
884 
885 	dma->tag = NULL;
886 	dma->map = NULL;
887 	dma->size = size;
888 	dma->vaddr = NULL;
889 
890 #if defined(__DragonFly__)
891 	bus_dmamem_t dmem;
892 	error = bus_dmamem_coherent(tag, alignment, 0,
893 				    BUS_SPACE_MAXADDR_32BIT,
894 				    BUS_SPACE_MAXADDR,
895 				    size, BUS_DMA_NOWAIT, &dmem);
896 	if (error != 0)
897 		goto fail;
898 
899 	dma->tag = dmem.dmem_tag;
900 	dma->map = dmem.dmem_map;
901 	dma->vaddr = dmem.dmem_addr;
902 	dma->paddr = dmem.dmem_busaddr;
903 #else
904 	error = bus_dma_tag_create(tag, alignment,
905             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
906             1, size, 0, NULL, NULL, &dma->tag);
907         if (error != 0)
908                 goto fail;
909 
910         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
911             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
912         if (error != 0)
913                 goto fail;
914 
915         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
916             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
917         if (error != 0) {
918 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
919 		dma->vaddr = NULL;
920 		goto fail;
921 	}
922 #endif
923 
924 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
925 
926 	return 0;
927 
928 fail:
929 	iwm_dma_contig_free(dma);
930 
931 	return error;
932 }
933 
934 static void
935 iwm_dma_contig_free(struct iwm_dma_info *dma)
936 {
937 	if (dma->vaddr != NULL) {
938 		bus_dmamap_sync(dma->tag, dma->map,
939 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
940 		bus_dmamap_unload(dma->tag, dma->map);
941 		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
942 		dma->vaddr = NULL;
943 	}
944 	if (dma->tag != NULL) {
945 		bus_dma_tag_destroy(dma->tag);
946 		dma->tag = NULL;
947 	}
948 }
949 
950 /* fwmem is used to load firmware onto the card */
951 static int
952 iwm_alloc_fwmem(struct iwm_softc *sc)
953 {
954 	/* Must be aligned on a 16-byte boundary. */
955 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
956 	    sc->sc_fwdmasegsz, 16);
957 }
958 
959 static void
960 iwm_free_fwmem(struct iwm_softc *sc)
961 {
962 	iwm_dma_contig_free(&sc->fw_dma);
963 }
964 
965 /* tx scheduler rings.  not used? */
966 static int
967 iwm_alloc_sched(struct iwm_softc *sc)
968 {
969 	/* TX scheduler rings must be aligned on a 1KB boundary. */
970 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
971 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
972 }
973 
974 static void
975 iwm_free_sched(struct iwm_softc *sc)
976 {
977 	iwm_dma_contig_free(&sc->sched_dma);
978 }
979 
980 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
981 static int
982 iwm_alloc_kw(struct iwm_softc *sc)
983 {
984 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
985 }
986 
987 static void
988 iwm_free_kw(struct iwm_softc *sc)
989 {
990 	iwm_dma_contig_free(&sc->kw_dma);
991 }
992 
993 /* interrupt cause table */
994 static int
995 iwm_alloc_ict(struct iwm_softc *sc)
996 {
997 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
998 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
999 }
1000 
1001 static void
1002 iwm_free_ict(struct iwm_softc *sc)
1003 {
1004 	iwm_dma_contig_free(&sc->ict_dma);
1005 }
1006 
1007 static int
1008 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1009 {
1010 	bus_size_t size;
1011 	int i, error;
1012 
1013 	ring->cur = 0;
1014 
1015 	/* Allocate RX descriptors (256-byte aligned). */
1016 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1017 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1018 	if (error != 0) {
1019 		device_printf(sc->sc_dev,
1020 		    "could not allocate RX ring DMA memory\n");
1021 		goto fail;
1022 	}
1023 	ring->desc = ring->desc_dma.vaddr;
1024 
1025 	/* Allocate RX status area (16-byte aligned). */
1026 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1027 	    sizeof(*ring->stat), 16);
1028 	if (error != 0) {
1029 		device_printf(sc->sc_dev,
1030 		    "could not allocate RX status DMA memory\n");
1031 		goto fail;
1032 	}
1033 	ring->stat = ring->stat_dma.vaddr;
1034 
1035         /* Create RX buffer DMA tag. */
1036 #if defined(__DragonFly__)
1037         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1038 				   0,
1039 				   BUS_SPACE_MAXADDR_32BIT,
1040 				   BUS_SPACE_MAXADDR,
1041 				   NULL, NULL,
1042 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1043 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1044 #else
1045         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1046             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1047             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1048 #endif
1049         if (error != 0) {
1050                 device_printf(sc->sc_dev,
1051                     "%s: could not create RX buf DMA tag, error %d\n",
1052                     __func__, error);
1053                 goto fail;
1054         }
1055 
1056 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1057 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1058 	if (error != 0) {
1059 		device_printf(sc->sc_dev,
1060 		    "%s: could not create RX buf DMA map, error %d\n",
1061 		    __func__, error);
1062 		goto fail;
1063 	}
1064 	/*
1065 	 * Allocate and map RX buffers.
1066 	 */
1067 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1068 		struct iwm_rx_data *data = &ring->data[i];
1069 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1070 		if (error != 0) {
1071 			device_printf(sc->sc_dev,
1072 			    "%s: could not create RX buf DMA map, error %d\n",
1073 			    __func__, error);
1074 			goto fail;
1075 		}
1076 		data->m = NULL;
1077 
1078 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1079 			goto fail;
1080 		}
1081 	}
1082 	return 0;
1083 
1084 fail:	iwm_free_rx_ring(sc, ring);
1085 	return error;
1086 }
1087 
1088 static void
1089 iwm_disable_rx_dma(struct iwm_softc *sc)
1090 {
1091 	/* XXX conditional nic locks are stupid */
1092 	/* XXX print out if we can't lock the NIC? */
1093 	if (iwm_nic_lock(sc)) {
1094 		/* XXX handle if RX stop doesn't finish? */
1095 		(void) iwm_pcie_rx_stop(sc);
1096 		iwm_nic_unlock(sc);
1097 	}
1098 }
1099 
1100 static void
1101 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1102 {
1103 	/* Reset the ring state */
1104 	ring->cur = 0;
1105 
1106 	/*
1107 	 * The hw rx ring index in shared memory must also be cleared,
1108 	 * otherwise the discrepancy can cause reprocessing chaos.
1109 	 */
1110 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1111 }
1112 
1113 static void
1114 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1115 {
1116 	int i;
1117 
1118 	iwm_dma_contig_free(&ring->desc_dma);
1119 	iwm_dma_contig_free(&ring->stat_dma);
1120 
1121 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1122 		struct iwm_rx_data *data = &ring->data[i];
1123 
1124 		if (data->m != NULL) {
1125 			bus_dmamap_sync(ring->data_dmat, data->map,
1126 			    BUS_DMASYNC_POSTREAD);
1127 			bus_dmamap_unload(ring->data_dmat, data->map);
1128 			m_freem(data->m);
1129 			data->m = NULL;
1130 		}
1131 		if (data->map != NULL) {
1132 			bus_dmamap_destroy(ring->data_dmat, data->map);
1133 			data->map = NULL;
1134 		}
1135 	}
1136 	if (ring->spare_map != NULL) {
1137 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1138 		ring->spare_map = NULL;
1139 	}
1140 	if (ring->data_dmat != NULL) {
1141 		bus_dma_tag_destroy(ring->data_dmat);
1142 		ring->data_dmat = NULL;
1143 	}
1144 }
1145 
1146 static int
1147 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1148 {
1149 	bus_addr_t paddr;
1150 	bus_size_t size;
1151 	size_t maxsize;
1152 	int nsegments;
1153 	int i, error;
1154 
1155 	ring->qid = qid;
1156 	ring->queued = 0;
1157 	ring->cur = 0;
1158 
1159 	/* Allocate TX descriptors (256-byte aligned). */
1160 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1161 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1162 	if (error != 0) {
1163 		device_printf(sc->sc_dev,
1164 		    "could not allocate TX ring DMA memory\n");
1165 		goto fail;
1166 	}
1167 	ring->desc = ring->desc_dma.vaddr;
1168 
1169 	/*
1170 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1171 	 * to allocate commands space for other rings.
1172 	 */
1173 	if (qid > IWM_MVM_CMD_QUEUE)
1174 		return 0;
1175 
1176 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1177 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1178 	if (error != 0) {
1179 		device_printf(sc->sc_dev,
1180 		    "could not allocate TX cmd DMA memory\n");
1181 		goto fail;
1182 	}
1183 	ring->cmd = ring->cmd_dma.vaddr;
1184 
1185 	/* FW commands may require more mapped space than packets. */
1186 	if (qid == IWM_MVM_CMD_QUEUE) {
1187 		maxsize = IWM_RBUF_SIZE;
1188 		nsegments = 1;
1189 	} else {
1190 		maxsize = MCLBYTES;
1191 		nsegments = IWM_MAX_SCATTER - 2;
1192 	}
1193 
1194 #if defined(__DragonFly__)
1195 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1196 				   0,
1197 				   BUS_SPACE_MAXADDR_32BIT,
1198 				   BUS_SPACE_MAXADDR,
1199 				   NULL, NULL,
1200 				   maxsize, nsegments, maxsize,
1201 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1202 #else
1203 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1204 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1205             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1206 #endif
1207 	if (error != 0) {
1208 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1209 		goto fail;
1210 	}
1211 
1212 	paddr = ring->cmd_dma.paddr;
1213 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1214 		struct iwm_tx_data *data = &ring->data[i];
1215 
1216 		data->cmd_paddr = paddr;
1217 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1218 		    + offsetof(struct iwm_tx_cmd, scratch);
1219 		paddr += sizeof(struct iwm_device_cmd);
1220 
1221 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1222 		if (error != 0) {
1223 			device_printf(sc->sc_dev,
1224 			    "could not create TX buf DMA map\n");
1225 			goto fail;
1226 		}
1227 	}
1228 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1229 	    ("invalid physical address"));
1230 	return 0;
1231 
1232 fail:	iwm_free_tx_ring(sc, ring);
1233 	return error;
1234 }
1235 
1236 static void
1237 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1238 {
1239 	int i;
1240 
1241 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1242 		struct iwm_tx_data *data = &ring->data[i];
1243 
1244 		if (data->m != NULL) {
1245 			bus_dmamap_sync(ring->data_dmat, data->map,
1246 			    BUS_DMASYNC_POSTWRITE);
1247 			bus_dmamap_unload(ring->data_dmat, data->map);
1248 			m_freem(data->m);
1249 			data->m = NULL;
1250 		}
1251 	}
1252 	/* Clear TX descriptors. */
1253 	memset(ring->desc, 0, ring->desc_dma.size);
1254 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1255 	    BUS_DMASYNC_PREWRITE);
1256 	sc->qfullmsk &= ~(1 << ring->qid);
1257 	ring->queued = 0;
1258 	ring->cur = 0;
1259 }
1260 
1261 static void
1262 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1263 {
1264 	int i;
1265 
1266 	iwm_dma_contig_free(&ring->desc_dma);
1267 	iwm_dma_contig_free(&ring->cmd_dma);
1268 
1269 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1270 		struct iwm_tx_data *data = &ring->data[i];
1271 
1272 		if (data->m != NULL) {
1273 			bus_dmamap_sync(ring->data_dmat, data->map,
1274 			    BUS_DMASYNC_POSTWRITE);
1275 			bus_dmamap_unload(ring->data_dmat, data->map);
1276 			m_freem(data->m);
1277 			data->m = NULL;
1278 		}
1279 		if (data->map != NULL) {
1280 			bus_dmamap_destroy(ring->data_dmat, data->map);
1281 			data->map = NULL;
1282 		}
1283 	}
1284 	if (ring->data_dmat != NULL) {
1285 		bus_dma_tag_destroy(ring->data_dmat);
1286 		ring->data_dmat = NULL;
1287 	}
1288 }
1289 
1290 /*
1291  * High-level hardware frobbing routines
1292  */
1293 
1294 static void
1295 iwm_enable_interrupts(struct iwm_softc *sc)
1296 {
1297 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1298 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1299 }
1300 
1301 static void
1302 iwm_restore_interrupts(struct iwm_softc *sc)
1303 {
1304 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1305 }
1306 
1307 static void
1308 iwm_disable_interrupts(struct iwm_softc *sc)
1309 {
1310 	/* disable interrupts */
1311 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1312 
1313 	/* acknowledge all interrupts */
1314 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1315 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1316 }
1317 
1318 static void
1319 iwm_ict_reset(struct iwm_softc *sc)
1320 {
1321 	iwm_disable_interrupts(sc);
1322 
1323 	/* Reset ICT table. */
1324 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1325 	sc->ict_cur = 0;
1326 
1327 	/* Set physical address of ICT table (4KB aligned). */
1328 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1329 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1330 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1331 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1332 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1333 
1334 	/* Switch to ICT interrupt mode in driver. */
1335 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1336 
1337 	/* Re-enable interrupts. */
1338 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1339 	iwm_enable_interrupts(sc);
1340 }
1341 
1342 /*
1343  * Since this .. hard-resets things, it's time to actually
1344  * mark the first vap (if any) as having no mac context.
1345  * It's annoying, but since the driver is potentially being
1346  * stop/start'ed whilst active (thanks openbsd port!) we
1347  * have to correctly track this.
1348  */
1349 static void
1350 iwm_stop_device(struct iwm_softc *sc)
1351 {
1352 	struct ieee80211com *ic = &sc->sc_ic;
1353 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1354 	int chnl, qid;
1355 	uint32_t mask = 0;
1356 
1357 	/* tell the device to stop sending interrupts */
1358 	iwm_disable_interrupts(sc);
1359 
1360 	/*
1361 	 * FreeBSD-local: mark the first vap as not-uploaded,
1362 	 * so the next transition through auth/assoc
1363 	 * will correctly populate the MAC context.
1364 	 */
1365 	if (vap) {
1366 		struct iwm_vap *iv = IWM_VAP(vap);
1367 		iv->is_uploaded = 0;
1368 	}
1369 
1370 	/* device going down, Stop using ICT table */
1371 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1372 
1373 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1374 
1375 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1376 
1377 	if (iwm_nic_lock(sc)) {
1378 		/* Stop each Tx DMA channel */
1379 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1380 			IWM_WRITE(sc,
1381 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1382 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1383 		}
1384 
1385 		/* Wait for DMA channels to be idle */
1386 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1387 		    5000)) {
1388 			device_printf(sc->sc_dev,
1389 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1390 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1391 		}
1392 		iwm_nic_unlock(sc);
1393 	}
1394 	iwm_disable_rx_dma(sc);
1395 
1396 	/* Stop RX ring. */
1397 	iwm_reset_rx_ring(sc, &sc->rxq);
1398 
1399 	/* Reset all TX rings. */
1400 	for (qid = 0; qid < nitems(sc->txq); qid++)
1401 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1402 
1403 	/*
1404 	 * Power-down device's busmaster DMA clocks
1405 	 */
1406 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1407 	DELAY(5);
1408 
1409 	/* Make sure (redundant) we've released our request to stay awake */
1410 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1411 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1412 
1413 	/* Stop the device, and put it in low power state */
1414 	iwm_apm_stop(sc);
1415 
1416 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1417 	 * Clean again the interrupt here
1418 	 */
1419 	iwm_disable_interrupts(sc);
1420 	/* stop and reset the on-board processor */
1421 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1422 
1423 	/*
1424 	 * Even if we stop the HW, we still want the RF kill
1425 	 * interrupt
1426 	 */
1427 	iwm_enable_rfkill_int(sc);
1428 	iwm_check_rfkill(sc);
1429 }
1430 
1431 static void
1432 iwm_mvm_nic_config(struct iwm_softc *sc)
1433 {
1434 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1435 	uint32_t reg_val = 0;
1436 
1437 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1438 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1439 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1440 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1441 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1442 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1443 
1444 	/* SKU control */
1445 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1446 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1447 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1448 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1449 
1450 	/* radio configuration */
1451 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1452 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1453 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1454 
1455 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1456 
1457 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1458 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1459 	    radio_cfg_step, radio_cfg_dash);
1460 
1461 	/*
1462 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1463 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1464 	 * to lose ownership and not being able to obtain it back.
1465 	 */
1466 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1467 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1468 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1469 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1470 	}
1471 }
1472 
1473 static int
1474 iwm_nic_rx_init(struct iwm_softc *sc)
1475 {
1476 	if (!iwm_nic_lock(sc))
1477 		return EBUSY;
1478 
1479 	/*
1480 	 * Initialize RX ring.  This is from the iwn driver.
1481 	 */
1482 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1483 
1484 	/* stop DMA */
1485 	iwm_disable_rx_dma(sc);
1486 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1487 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1488 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1489 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1490 
1491 	/* Set physical address of RX ring (256-byte aligned). */
1492 	IWM_WRITE(sc,
1493 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1494 
1495 	/* Set physical address of RX status (16-byte aligned). */
1496 	IWM_WRITE(sc,
1497 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1498 
1499 #if defined(__DragonFly__)
1500 	/* Force serialization (probably not needed but don't trust the HW) */
1501 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1502 #endif
1503 
1504 	/* Enable RX. */
1505 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1506 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1507 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1508 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1509 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1510 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1511 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1512 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1513 
1514 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1515 
1516 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1517 	if (sc->host_interrupt_operation_mode)
1518 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1519 
1520 	/*
1521 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1522 	 *
1523 	 * This value should initially be 0 (before preparing any
1524 	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1525 	 */
1526 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1527 
1528 	iwm_nic_unlock(sc);
1529 
1530 	return 0;
1531 }
1532 
1533 static int
1534 iwm_nic_tx_init(struct iwm_softc *sc)
1535 {
1536 	int qid;
1537 
1538 	if (!iwm_nic_lock(sc))
1539 		return EBUSY;
1540 
1541 	/* Deactivate TX scheduler. */
1542 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1543 
1544 	/* Set physical address of "keep warm" page (16-byte aligned). */
1545 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1546 
1547 	/* Initialize TX rings. */
1548 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1549 		struct iwm_tx_ring *txq = &sc->txq[qid];
1550 
1551 		/* Set physical address of TX ring (256-byte aligned). */
1552 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1553 		    txq->desc_dma.paddr >> 8);
1554 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1555 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1556 		    __func__,
1557 		    qid, txq->desc,
1558 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1559 	}
1560 
1561 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1562 
1563 	iwm_nic_unlock(sc);
1564 
1565 	return 0;
1566 }
1567 
1568 static int
1569 iwm_nic_init(struct iwm_softc *sc)
1570 {
1571 	int error;
1572 
1573 	iwm_apm_init(sc);
1574 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1575 		iwm_set_pwr(sc);
1576 
1577 	iwm_mvm_nic_config(sc);
1578 
1579 	if ((error = iwm_nic_rx_init(sc)) != 0)
1580 		return error;
1581 
1582 	/*
1583 	 * Ditto for TX, from iwn
1584 	 */
1585 	if ((error = iwm_nic_tx_init(sc)) != 0)
1586 		return error;
1587 
1588 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1589 	    "%s: shadow registers enabled\n", __func__);
1590 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1591 
1592 	return 0;
1593 }
1594 
1595 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1596 	IWM_MVM_TX_FIFO_VO,
1597 	IWM_MVM_TX_FIFO_VI,
1598 	IWM_MVM_TX_FIFO_BE,
1599 	IWM_MVM_TX_FIFO_BK,
1600 };
1601 
1602 static int
1603 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1604 {
1605 	if (!iwm_nic_lock(sc)) {
1606 		device_printf(sc->sc_dev,
1607 		    "%s: cannot enable txq %d\n",
1608 		    __func__,
1609 		    qid);
1610 		return EBUSY;
1611 	}
1612 
1613 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1614 
1615 	if (qid == IWM_MVM_CMD_QUEUE) {
1616 		/* unactivate before configuration */
1617 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1618 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1619 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1620 
1621 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1622 
1623 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1624 
1625 		iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1626 		/* Set scheduler window size and frame limit. */
1627 		iwm_write_mem32(sc,
1628 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1629 		    sizeof(uint32_t),
1630 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1631 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1632 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1633 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1634 
1635 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1636 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1637 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1638 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1639 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1640 	} else {
1641 		struct iwm_scd_txq_cfg_cmd cmd;
1642 		int error;
1643 
1644 		iwm_nic_unlock(sc);
1645 
1646 		memset(&cmd, 0, sizeof(cmd));
1647 		cmd.scd_queue = qid;
1648 		cmd.enable = 1;
1649 		cmd.sta_id = sta_id;
1650 		cmd.tx_fifo = fifo;
1651 		cmd.aggregate = 0;
1652 		cmd.window = IWM_FRAME_LIMIT;
1653 
1654 		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1655 		    sizeof(cmd), &cmd);
1656 		if (error) {
1657 			device_printf(sc->sc_dev,
1658 			    "cannot enable txq %d\n", qid);
1659 			return error;
1660 		}
1661 
1662 		if (!iwm_nic_lock(sc))
1663 			return EBUSY;
1664 	}
1665 
1666 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1667 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1668 
1669 	iwm_nic_unlock(sc);
1670 
1671 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1672 	    __func__, qid, fifo);
1673 
1674 	return 0;
1675 }
1676 
1677 static int
1678 iwm_post_alive(struct iwm_softc *sc)
1679 {
1680 	int nwords;
1681 	int error, chnl;
1682 	uint32_t base;
1683 
1684 	if (!iwm_nic_lock(sc))
1685 		return EBUSY;
1686 
1687 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1688 	if (sc->sched_base != base) {
1689 		device_printf(sc->sc_dev,
1690 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1691 		    __func__, sc->sched_base, base);
1692 	}
1693 
1694 	iwm_ict_reset(sc);
1695 
1696 	/* Clear TX scheduler state in SRAM. */
1697 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1698 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1699 	    / sizeof(uint32_t);
1700 	error = iwm_write_mem(sc,
1701 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1702 	    NULL, nwords);
1703 	if (error)
1704 		goto out;
1705 
1706 	/* Set physical address of TX scheduler rings (1KB aligned). */
1707 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1708 
1709 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1710 
1711 	iwm_nic_unlock(sc);
1712 
1713 	/* enable command channel */
1714 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1715 	if (error)
1716 		return error;
1717 
1718 	if (!iwm_nic_lock(sc))
1719 		return EBUSY;
1720 
1721 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1722 
1723 	/* Enable DMA channels. */
1724 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1725 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1726 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1727 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1728 	}
1729 
1730 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1731 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1732 
1733 	/* Enable L1-Active */
1734 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1735 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1736 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1737 	}
1738 
1739  out:
1740 	iwm_nic_unlock(sc);
1741 	return error;
1742 }
1743 
1744 /*
1745  * NVM read access and content parsing.  We do not support
1746  * external NVM or writing NVM.
1747  * iwlwifi/mvm/nvm.c
1748  */
1749 
1750 /* list of NVM sections we are allowed/need to read */
1751 const int nvm_to_read[] = {
1752 	IWM_NVM_SECTION_TYPE_HW,
1753 	IWM_NVM_SECTION_TYPE_SW,
1754 	IWM_NVM_SECTION_TYPE_REGULATORY,
1755 	IWM_NVM_SECTION_TYPE_CALIBRATION,
1756 	IWM_NVM_SECTION_TYPE_PRODUCTION,
1757 	IWM_NVM_SECTION_TYPE_HW_8000,
1758 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1759 	IWM_NVM_SECTION_TYPE_PHY_SKU,
1760 };
1761 
1762 /* Default NVM size to read */
1763 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1764 #define IWM_MAX_NVM_SECTION_SIZE	8192
1765 
1766 #define IWM_NVM_WRITE_OPCODE 1
1767 #define IWM_NVM_READ_OPCODE 0
1768 
1769 /* load nvm chunk response */
1770 #define IWM_READ_NVM_CHUNK_SUCCEED		0
1771 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS	1
1772 
1773 static int
1774 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1775 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1776 {
1777 	offset = 0;
1778 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1779 		.offset = htole16(offset),
1780 		.length = htole16(length),
1781 		.type = htole16(section),
1782 		.op_code = IWM_NVM_READ_OPCODE,
1783 	};
1784 	struct iwm_nvm_access_resp *nvm_resp;
1785 	struct iwm_rx_packet *pkt;
1786 	struct iwm_host_cmd cmd = {
1787 		.id = IWM_NVM_ACCESS_CMD,
1788 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1789 		    IWM_CMD_SEND_IN_RFKILL,
1790 		.data = { &nvm_access_cmd, },
1791 	};
1792 	int ret, offset_read;
1793 	size_t bytes_read;
1794 	uint8_t *resp_data;
1795 
1796 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1797 
1798 	ret = iwm_send_cmd(sc, &cmd);
1799 	if (ret) {
1800 		device_printf(sc->sc_dev,
1801 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1802 		return ret;
1803 	}
1804 
1805 	pkt = cmd.resp_pkt;
1806 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1807 		device_printf(sc->sc_dev,
1808 		    "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1809 		    pkt->hdr.flags);
1810 		ret = EIO;
1811 		goto exit;
1812 	}
1813 
1814 	/* Extract NVM response */
1815 	nvm_resp = (void *)pkt->data;
1816 
1817 	ret = le16toh(nvm_resp->status);
1818 	bytes_read = le16toh(nvm_resp->length);
1819 	offset_read = le16toh(nvm_resp->offset);
1820 	resp_data = nvm_resp->data;
1821 	if (ret) {
1822 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1823 		    "NVM access command failed with status %d\n", ret);
1824 		ret = EINVAL;
1825 		goto exit;
1826 	}
1827 
1828 	if (offset_read != offset) {
1829 		device_printf(sc->sc_dev,
1830 		    "NVM ACCESS response with invalid offset %d\n",
1831 		    offset_read);
1832 		ret = EINVAL;
1833 		goto exit;
1834 	}
1835 
1836 	if (bytes_read > length) {
1837 		device_printf(sc->sc_dev,
1838 		    "NVM ACCESS response with too much data "
1839 		    "(%d bytes requested, %zd bytes received)\n",
1840 		    length, bytes_read);
1841 		ret = EINVAL;
1842 		goto exit;
1843 	}
1844 
1845 	memcpy(data + offset, resp_data, bytes_read);
1846 	*len = bytes_read;
1847 
1848  exit:
1849 	iwm_free_resp(sc, &cmd);
1850 	return ret;
1851 }
1852 
1853 /*
1854  * Reads an NVM section completely.
1855  * NICs prior to 7000 family don't have a real NVM, but just read
1856  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1857  * by uCode, we need to manually check in this case that we don't
1858  * overflow and try to read more than the EEPROM size.
1859  * For 7000 family NICs, we supply the maximal size we can read, and
1860  * the uCode fills the response with as much data as we can,
1861  * without overflowing, so no check is needed.
1862  */
1863 static int
1864 iwm_nvm_read_section(struct iwm_softc *sc,
1865 	uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1866 {
1867 	uint16_t chunklen, seglen;
1868 	int error = 0;
1869 
1870 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1871 	    "reading NVM section %d\n", section);
1872 
1873 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1874 	*len = 0;
1875 
1876 	/* Read NVM chunks until exhausted (reading less than requested) */
1877 	while (seglen == chunklen && *len < max_len) {
1878 		error = iwm_nvm_read_chunk(sc,
1879 		    section, *len, chunklen, data, &seglen);
1880 		if (error) {
1881 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1882 			    "Cannot read from NVM section "
1883 			    "%d at offset %d\n", section, *len);
1884 			return error;
1885 		}
1886 		*len += seglen;
1887 	}
1888 
1889 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1890 	    "NVM section %d read completed (%d bytes, error=%d)\n",
1891 	    section, *len, error);
1892 	return error;
1893 }
1894 
1895 /* NVM offsets (in words) definitions */
1896 enum iwm_nvm_offsets {
1897 	/* NVM HW-Section offset (in words) definitions */
1898 	IWM_HW_ADDR = 0x15,
1899 
1900 /* NVM SW-Section offset (in words) definitions */
1901 	IWM_NVM_SW_SECTION = 0x1C0,
1902 	IWM_NVM_VERSION = 0,
1903 	IWM_RADIO_CFG = 1,
1904 	IWM_SKU = 2,
1905 	IWM_N_HW_ADDRS = 3,
1906 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1907 
1908 /* NVM calibration section offset (in words) definitions */
1909 	IWM_NVM_CALIB_SECTION = 0x2B8,
1910 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1911 };
1912 
1913 enum iwm_8000_nvm_offsets {
1914 	/* NVM HW-Section offset (in words) definitions */
1915 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1916 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1917 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1918 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1919 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1920 
1921 	/* NVM SW-Section offset (in words) definitions */
1922 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1923 	IWM_NVM_VERSION_8000 = 0,
1924 	IWM_RADIO_CFG_8000 = 0,
1925 	IWM_SKU_8000 = 2,
1926 	IWM_N_HW_ADDRS_8000 = 3,
1927 
1928 	/* NVM REGULATORY -Section offset (in words) definitions */
1929 	IWM_NVM_CHANNELS_8000 = 0,
1930 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1931 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1932 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1933 
1934 	/* NVM calibration section offset (in words) definitions */
1935 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1936 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1937 };
1938 
1939 /* SKU Capabilities (actual values from NVM definition) */
1940 enum nvm_sku_bits {
1941 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1942 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1943 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1944 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1945 };
1946 
1947 /* radio config bits (actual values from NVM definition) */
1948 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1949 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1950 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1951 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1952 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1953 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1954 
1955 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1956 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1957 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1958 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1959 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1960 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1961 
1962 #define DEFAULT_MAX_TX_POWER 16
1963 
1964 /**
1965  * enum iwm_nvm_channel_flags - channel flags in NVM
1966  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1967  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1968  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1969  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1970  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1971  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1972  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1973  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1974  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1975  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1976  */
1977 enum iwm_nvm_channel_flags {
1978 	IWM_NVM_CHANNEL_VALID = (1 << 0),
1979 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1980 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1981 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1982 	IWM_NVM_CHANNEL_DFS = (1 << 7),
1983 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1984 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1985 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1986 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1987 };
1988 
1989 /*
1990  * Translate EEPROM flags to net80211.
1991  */
1992 static uint32_t
1993 iwm_eeprom_channel_flags(uint16_t ch_flags)
1994 {
1995 	uint32_t nflags;
1996 
1997 	nflags = 0;
1998 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1999 		nflags |= IEEE80211_CHAN_PASSIVE;
2000 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2001 		nflags |= IEEE80211_CHAN_NOADHOC;
2002 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2003 		nflags |= IEEE80211_CHAN_DFS;
2004 		/* Just in case. */
2005 		nflags |= IEEE80211_CHAN_NOADHOC;
2006 	}
2007 
2008 	return (nflags);
2009 }
2010 
2011 static void
2012 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2013     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2014     const uint8_t bands[])
2015 {
2016 	const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
2017 	uint32_t nflags;
2018 	uint16_t ch_flags;
2019 	uint8_t ieee;
2020 	int error;
2021 
2022 	for (; ch_idx < ch_num; ch_idx++) {
2023 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2024 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2025 			ieee = iwm_nvm_channels[ch_idx];
2026 		else
2027 			ieee = iwm_nvm_channels_8000[ch_idx];
2028 
2029 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2030 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2031 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2032 			    ieee, ch_flags,
2033 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2034 			    "5.2" : "2.4");
2035 			continue;
2036 		}
2037 
2038 		nflags = iwm_eeprom_channel_flags(ch_flags);
2039 		error = ieee80211_add_channel(chans, maxchans, nchans,
2040 		    ieee, 0, 0, nflags, bands);
2041 		if (error != 0)
2042 			break;
2043 
2044 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2045 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2046 		    ieee, ch_flags,
2047 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2048 		    "5.2" : "2.4");
2049 	}
2050 }
2051 
2052 static void
2053 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2054     struct ieee80211_channel chans[])
2055 {
2056 	struct iwm_softc *sc = ic->ic_softc;
2057 	struct iwm_nvm_data *data = &sc->sc_nvm;
2058 	uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2059 	size_t ch_num;
2060 
2061 	memset(bands, 0, sizeof(bands));
2062 	/* 1-13: 11b/g channels. */
2063 	setbit(bands, IEEE80211_MODE_11B);
2064 	setbit(bands, IEEE80211_MODE_11G);
2065 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2066 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2067 
2068 	/* 14: 11b channel only. */
2069 	clrbit(bands, IEEE80211_MODE_11G);
2070 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2071 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2072 
2073 	if (data->sku_cap_band_52GHz_enable) {
2074 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2075 			ch_num = nitems(iwm_nvm_channels);
2076 		else
2077 			ch_num = nitems(iwm_nvm_channels_8000);
2078 		memset(bands, 0, sizeof(bands));
2079 		setbit(bands, IEEE80211_MODE_11A);
2080 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2081 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2082 	}
2083 }
2084 
2085 static void
2086 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2087 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2088 {
2089 	const uint8_t *hw_addr;
2090 
2091 	if (mac_override) {
2092 		static const uint8_t reserved_mac[] = {
2093 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2094 		};
2095 
2096 		hw_addr = (const uint8_t *)(mac_override +
2097 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2098 
2099 		/*
2100 		 * Store the MAC address from MAO section.
2101 		 * No byte swapping is required in MAO section
2102 		 */
2103 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2104 
2105 		/*
2106 		 * Force the use of the OTP MAC address in case of reserved MAC
2107 		 * address in the NVM, or if address is given but invalid.
2108 		 */
2109 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2110 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2111 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2112 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2113 			return;
2114 
2115 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2116 		    "%s: mac address from nvm override section invalid\n",
2117 		    __func__);
2118 	}
2119 
2120 	if (nvm_hw) {
2121 		/* read the mac address from WFMP registers */
2122 		uint32_t mac_addr0 =
2123 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2124 		uint32_t mac_addr1 =
2125 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2126 
2127 		hw_addr = (const uint8_t *)&mac_addr0;
2128 		data->hw_addr[0] = hw_addr[3];
2129 		data->hw_addr[1] = hw_addr[2];
2130 		data->hw_addr[2] = hw_addr[1];
2131 		data->hw_addr[3] = hw_addr[0];
2132 
2133 		hw_addr = (const uint8_t *)&mac_addr1;
2134 		data->hw_addr[4] = hw_addr[1];
2135 		data->hw_addr[5] = hw_addr[0];
2136 
2137 		return;
2138 	}
2139 
2140 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2141 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2142 }
2143 
2144 static int
2145 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2146 	    const uint16_t *phy_sku)
2147 {
2148 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2149 		return le16_to_cpup(nvm_sw + IWM_SKU);
2150 
2151 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2152 }
2153 
2154 static int
2155 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2156 {
2157 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2158 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2159 	else
2160 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2161 						IWM_NVM_VERSION_8000));
2162 }
2163 
2164 static int
2165 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2166 		  const uint16_t *phy_sku)
2167 {
2168         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2169                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2170 
2171         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2172 }
2173 
2174 static int
2175 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const const uint16_t *nvm_sw)
2176 {
2177 	int n_hw_addr;
2178 
2179 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2180 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2181 
2182 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2183 
2184         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2185 }
2186 
2187 static void
2188 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2189 		  uint32_t radio_cfg)
2190 {
2191 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2192 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2193 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2194 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2195 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2196 		return;
2197 	}
2198 
2199 	/* set the radio configuration for family 8000 */
2200 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2201 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2202 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2203 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2204 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2205 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2206 }
2207 
2208 static int
2209 iwm_parse_nvm_data(struct iwm_softc *sc,
2210 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2211 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2212 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2213 {
2214 	struct iwm_nvm_data *data = &sc->sc_nvm;
2215 	uint8_t hw_addr[IEEE80211_ADDR_LEN];
2216 	uint32_t sku, radio_cfg;
2217 
2218 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2219 
2220 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2221 	iwm_set_radio_cfg(sc, data, radio_cfg);
2222 
2223 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2224 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2225 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2226 	data->sku_cap_11n_enable = 0;
2227 
2228 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2229 
2230 	/* The byte order is little endian 16 bit, meaning 214365 */
2231 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2232 		IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2233 		data->hw_addr[0] = hw_addr[1];
2234 		data->hw_addr[1] = hw_addr[0];
2235 		data->hw_addr[2] = hw_addr[3];
2236 		data->hw_addr[3] = hw_addr[2];
2237 		data->hw_addr[4] = hw_addr[5];
2238 		data->hw_addr[5] = hw_addr[4];
2239 	} else {
2240 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2241 	}
2242 
2243 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2244 		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2245 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2246 	} else {
2247 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2248 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2249 	}
2250 	data->calib_version = 255;   /* TODO:
2251 					this value will prevent some checks from
2252 					failing, we need to check if this
2253 					field is still needed, and if it does,
2254 					where is it in the NVM */
2255 
2256 	return 0;
2257 }
2258 
2259 static int
2260 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2261 {
2262 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2263 
2264 	/* Checking for required sections */
2265 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2266 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2267 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2268 			device_printf(sc->sc_dev,
2269 			    "Can't parse empty OTP/NVM sections\n");
2270 			return ENOENT;
2271 		}
2272 
2273 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2274 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2275 		/* SW and REGULATORY sections are mandatory */
2276 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2277 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2278 			device_printf(sc->sc_dev,
2279 			    "Can't parse empty OTP/NVM sections\n");
2280 			return ENOENT;
2281 		}
2282 		/* MAC_OVERRIDE or at least HW section must exist */
2283 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2284 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2285 			device_printf(sc->sc_dev,
2286 			    "Can't parse mac_address, empty sections\n");
2287 			return ENOENT;
2288 		}
2289 
2290 		/* PHY_SKU section is mandatory in B0 */
2291 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2292 			device_printf(sc->sc_dev,
2293 			    "Can't parse phy_sku in B0, empty sections\n");
2294 			return ENOENT;
2295 		}
2296 
2297 		hw = (const uint16_t *)
2298 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2299 	} else {
2300 		panic("unknown device family %d\n", sc->sc_device_family);
2301 	}
2302 
2303 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2304 	calib = (const uint16_t *)
2305 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2306 	regulatory = (const uint16_t *)
2307 	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2308 	mac_override = (const uint16_t *)
2309 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2310 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2311 
2312 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2313 	    phy_sku, regulatory);
2314 }
2315 
2316 static int
2317 iwm_nvm_init(struct iwm_softc *sc)
2318 {
2319 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2320 	int i, section, error;
2321 	uint16_t len;
2322 	uint8_t *buf;
2323 	const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2324 
2325 	memset(nvm_sections, 0 , sizeof(nvm_sections));
2326 
2327 	buf = kmalloc(bufsz, M_DEVBUF, M_INTWAIT);
2328 	if (buf == NULL)
2329 		return ENOMEM;
2330 
2331 	for (i = 0; i < nitems(nvm_to_read); i++) {
2332 		section = nvm_to_read[i];
2333 		KKASSERT(section <= nitems(nvm_sections));
2334 
2335 		error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2336 		if (error) {
2337 			error = 0;
2338 			continue;
2339 		}
2340 		nvm_sections[section].data = kmalloc(len, M_DEVBUF, M_INTWAIT);
2341 		if (nvm_sections[section].data == NULL) {
2342 			error = ENOMEM;
2343 			break;
2344 		}
2345 		memcpy(nvm_sections[section].data, buf, len);
2346 		nvm_sections[section].length = len;
2347 	}
2348 	kfree(buf, M_DEVBUF);
2349 	if (error == 0)
2350 		error = iwm_parse_nvm_sections(sc, nvm_sections);
2351 
2352 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2353 		if (nvm_sections[i].data != NULL)
2354 			kfree(nvm_sections[i].data, M_DEVBUF);
2355 	}
2356 
2357 	return error;
2358 }
2359 
2360 /*
2361  * Firmware loading gunk.  This is kind of a weird hybrid between the
2362  * iwn driver and the Linux iwlwifi driver.
2363  */
2364 
2365 static int
2366 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2367 	const uint8_t *section, uint32_t byte_cnt)
2368 {
2369 	int error = EINVAL;
2370 	uint32_t chunk_sz, offset;
2371 
2372 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2373 
2374 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2375 		uint32_t addr, len;
2376 		const uint8_t *data;
2377 
2378 		addr = dst_addr + offset;
2379 		len = MIN(chunk_sz, byte_cnt - offset);
2380 		data = section + offset;
2381 
2382 		error = iwm_firmware_load_chunk(sc, addr, data, len);
2383 		if (error)
2384 			break;
2385 	}
2386 
2387 	return error;
2388 }
2389 
2390 static int
2391 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2392 	const uint8_t *chunk, uint32_t byte_cnt)
2393 {
2394 	struct iwm_dma_info *dma = &sc->fw_dma;
2395 	int error;
2396 
2397 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
2398 	memcpy(dma->vaddr, chunk, byte_cnt);
2399 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2400 
2401 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2402 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2403 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2404 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2405 	}
2406 
2407 	sc->sc_fw_chunk_done = 0;
2408 
2409 	if (!iwm_nic_lock(sc))
2410 		return EBUSY;
2411 
2412 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2413 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2414 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2415 	    dst_addr);
2416 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2417 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2418 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2419 	    (iwm_get_dma_hi_addr(dma->paddr)
2420 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2421 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2422 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2423 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2424 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2425 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2426 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2427 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2428 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2429 
2430 	iwm_nic_unlock(sc);
2431 
2432 	/* wait 1s for this segment to load */
2433 	error = 0;
2434 	while (!sc->sc_fw_chunk_done) {
2435 #if defined(__DragonFly__)
2436 		error = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2437 #else
2438 		error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2439 #endif
2440 		if (error)
2441 			break;
2442 	}
2443 
2444 	if (!sc->sc_fw_chunk_done) {
2445 		device_printf(sc->sc_dev,
2446 		    "fw chunk addr 0x%x len %d failed to load\n",
2447 		    dst_addr, byte_cnt);
2448 	}
2449 
2450 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2451 	    dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2452 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2453 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2454 		iwm_nic_unlock(sc);
2455 	}
2456 
2457 	return error;
2458 }
2459 
2460 int
2461 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2462     int cpu, int *first_ucode_section)
2463 {
2464 	int shift_param;
2465 	int i, error = 0, sec_num = 0x1;
2466 	uint32_t val, last_read_idx = 0;
2467 	const void *data;
2468 	uint32_t dlen;
2469 	uint32_t offset;
2470 
2471 	if (cpu == 1) {
2472 		shift_param = 0;
2473 		*first_ucode_section = 0;
2474 	} else {
2475 		shift_param = 16;
2476 		(*first_ucode_section)++;
2477 	}
2478 
2479 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2480 		last_read_idx = i;
2481 		data = fws->fw_sect[i].fws_data;
2482 		dlen = fws->fw_sect[i].fws_len;
2483 		offset = fws->fw_sect[i].fws_devoff;
2484 
2485 		/*
2486 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2487 		 * CPU1 to CPU2.
2488 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2489 		 * CPU2 non paged to CPU2 paging sec.
2490 		 */
2491 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2492 		    offset == IWM_PAGING_SEPARATOR_SECTION)
2493 			break;
2494 
2495 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2496 		    "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2497 		    i, offset, dlen, cpu);
2498 
2499 		if (dlen > sc->sc_fwdmasegsz) {
2500 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2501 			    "chunk %d too large (%d bytes)\n", i, dlen);
2502 			error = EFBIG;
2503 		} else {
2504 			error = iwm_firmware_load_sect(sc, offset, data, dlen);
2505 		}
2506 		if (error) {
2507 			device_printf(sc->sc_dev,
2508 			    "could not load firmware chunk %d (error %d)\n",
2509 			    i, error);
2510 			return error;
2511 		}
2512 
2513 		/* Notify the ucode of the loaded section number and status */
2514 		if (iwm_nic_lock(sc)) {
2515 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2516 			val = val | (sec_num << shift_param);
2517 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2518 			sec_num = (sec_num << 1) | 0x1;
2519 			iwm_nic_unlock(sc);
2520 
2521 			/*
2522 			 * The firmware won't load correctly without this delay.
2523 			 */
2524 			DELAY(8000);
2525 		}
2526 	}
2527 
2528 	*first_ucode_section = last_read_idx;
2529 
2530 	if (iwm_nic_lock(sc)) {
2531 		if (cpu == 1)
2532 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2533 		else
2534 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2535 		iwm_nic_unlock(sc);
2536 	}
2537 
2538 	return 0;
2539 }
2540 
2541 int
2542 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2543 {
2544 	struct iwm_fw_sects *fws;
2545 	int error = 0;
2546 	int first_ucode_section;
2547 
2548 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2549 	    ucode_type);
2550 
2551 	fws = &sc->sc_fw.fw_sects[ucode_type];
2552 
2553 	/* configure the ucode to be ready to get the secured image */
2554 	/* release CPU reset */
2555 	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2556 
2557 	/* load to FW the binary Secured sections of CPU1 */
2558 	error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2559 	if (error)
2560 		return error;
2561 
2562 	/* load to FW the binary sections of CPU2 */
2563 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2564 }
2565 
2566 static int
2567 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2568 {
2569 	struct iwm_fw_sects *fws;
2570 	int error, i;
2571 	const void *data;
2572 	uint32_t dlen;
2573 	uint32_t offset;
2574 
2575 	sc->sc_uc.uc_intr = 0;
2576 
2577 	fws = &sc->sc_fw.fw_sects[ucode_type];
2578 	for (i = 0; i < fws->fw_count; i++) {
2579 		data = fws->fw_sect[i].fws_data;
2580 		dlen = fws->fw_sect[i].fws_len;
2581 		offset = fws->fw_sect[i].fws_devoff;
2582 		IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2583 		    "LOAD FIRMWARE type %d offset %u len %d\n",
2584 		    ucode_type, offset, dlen);
2585 		if (dlen > sc->sc_fwdmasegsz) {
2586 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2587 			    "chunk %d too large (%d bytes)\n", i, dlen);
2588 			error = EFBIG;
2589 		} else {
2590 			error = iwm_firmware_load_sect(sc, offset, data, dlen);
2591 		}
2592 		if (error) {
2593 			device_printf(sc->sc_dev,
2594 			    "could not load firmware chunk %u of %u "
2595 			    "(error=%d)\n", i, fws->fw_count, error);
2596 			return error;
2597 		}
2598 	}
2599 
2600 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2601 
2602 	return 0;
2603 }
2604 
2605 static int
2606 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2607 {
2608 	int error, w;
2609 
2610 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2611 		error = iwm_load_firmware_8000(sc, ucode_type);
2612 	else
2613 		error = iwm_load_firmware_7000(sc, ucode_type);
2614 	if (error)
2615 		return error;
2616 
2617 	/* wait for the firmware to load */
2618 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2619 #if defined(__DragonFly__)
2620 		error = lksleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2621 #else
2622 		error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2623 #endif
2624 	}
2625 	if (error || !sc->sc_uc.uc_ok) {
2626 		device_printf(sc->sc_dev, "could not load firmware\n");
2627 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2628 			device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2629 			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2630 			device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2631 			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2632 		}
2633 	}
2634 
2635 	/*
2636 	 * Give the firmware some time to initialize.
2637 	 * Accessing it too early causes errors.
2638 	 */
2639 	lksleep(&w, &sc->sc_lk, 0, "iwmfwinit", hz);
2640 
2641 	return error;
2642 }
2643 
2644 static int
2645 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2646 {
2647 	int error;
2648 
2649 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2650 
2651 	if ((error = iwm_nic_init(sc)) != 0) {
2652 		device_printf(sc->sc_dev, "unable to init nic\n");
2653 		return error;
2654 	}
2655 
2656 	/* make sure rfkill handshake bits are cleared */
2657 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2658 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2659 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2660 
2661 	/* clear (again), then enable host interrupts */
2662 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2663 	iwm_enable_interrupts(sc);
2664 
2665 	/* really make sure rfkill handshake bits are cleared */
2666 	/* maybe we should write a few times more?  just to make sure */
2667 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2668 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2669 
2670 	/* Load the given image to the HW */
2671 	return iwm_load_firmware(sc, ucode_type);
2672 }
2673 
2674 static int
2675 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2676 {
2677 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2678 		.valid = htole32(valid_tx_ant),
2679 	};
2680 
2681 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2682 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2683 }
2684 
2685 static int
2686 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2687 {
2688 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2689 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2690 
2691 	/* Set parameters */
2692 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2693 	phy_cfg_cmd.calib_control.event_trigger =
2694 	    sc->sc_default_calib[ucode_type].event_trigger;
2695 	phy_cfg_cmd.calib_control.flow_trigger =
2696 	    sc->sc_default_calib[ucode_type].flow_trigger;
2697 
2698 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2699 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2700 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2701 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2702 }
2703 
2704 static int
2705 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2706 	enum iwm_ucode_type ucode_type)
2707 {
2708 	enum iwm_ucode_type old_type = sc->sc_uc_current;
2709 	int error;
2710 
2711 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2712 		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2713 			error);
2714 		return error;
2715 	}
2716 
2717 	sc->sc_uc_current = ucode_type;
2718 	error = iwm_start_fw(sc, ucode_type);
2719 	if (error) {
2720 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2721 		sc->sc_uc_current = old_type;
2722 		return error;
2723 	}
2724 
2725 	error = iwm_post_alive(sc);
2726 	if (error) {
2727 		device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2728 	}
2729 	return error;
2730 }
2731 
2732 /*
2733  * mvm misc bits
2734  */
2735 
2736 static int
2737 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2738 {
2739 	int error;
2740 
2741 	/* do not operate with rfkill switch turned on */
2742 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2743 		device_printf(sc->sc_dev,
2744 		    "radio is disabled by hardware switch\n");
2745 		return EPERM;
2746 	}
2747 
2748 	sc->sc_init_complete = 0;
2749 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2750 	    IWM_UCODE_TYPE_INIT)) != 0) {
2751 		device_printf(sc->sc_dev, "failed to load init firmware\n");
2752 		return error;
2753 	}
2754 
2755 	if (justnvm) {
2756 		if ((error = iwm_nvm_init(sc)) != 0) {
2757 			device_printf(sc->sc_dev, "failed to read nvm\n");
2758 			return error;
2759 		}
2760 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2761 
2762 		return 0;
2763 	}
2764 
2765 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2766 		device_printf(sc->sc_dev,
2767 		    "failed to send bt coex configuration: %d\n", error);
2768 		return error;
2769 	}
2770 
2771 	/* Init Smart FIFO. */
2772 	error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2773 	if (error != 0)
2774 		return error;
2775 
2776 	/* Send TX valid antennas before triggering calibrations */
2777 	if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2778 		device_printf(sc->sc_dev,
2779 		    "failed to send antennas before calibration: %d\n", error);
2780 		return error;
2781 	}
2782 
2783 	/*
2784 	 * Send phy configurations command to init uCode
2785 	 * to start the 16.0 uCode init image internal calibrations.
2786 	 */
2787 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2788 		device_printf(sc->sc_dev,
2789 		    "%s: failed to run internal calibration: %d\n",
2790 		    __func__, error);
2791 		return error;
2792 	}
2793 
2794 	/*
2795 	 * Nothing to do but wait for the init complete notification
2796 	 * from the firmware
2797 	 */
2798 	while (!sc->sc_init_complete) {
2799 #if defined(__DragonFly__)
2800 		error = lksleep(&sc->sc_init_complete, &sc->sc_lk,
2801 				 0, "iwminit", 2*hz);
2802 #else
2803 		error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2804 				 0, "iwminit", 2*hz);
2805 #endif
2806 		if (error) {
2807 			device_printf(sc->sc_dev, "init complete failed: %d\n",
2808 				sc->sc_init_complete);
2809 			break;
2810 		}
2811 	}
2812 
2813 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2814 	    sc->sc_init_complete ? "" : "not ");
2815 
2816 	return error;
2817 }
2818 
2819 /*
2820  * receive side
2821  */
2822 
2823 /* (re)stock rx ring, called at init-time and at runtime */
2824 static int
2825 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2826 {
2827 	struct iwm_rx_ring *ring = &sc->rxq;
2828 	struct iwm_rx_data *data = &ring->data[idx];
2829 	struct mbuf *m;
2830 	bus_dmamap_t dmamap = NULL;
2831 	bus_dma_segment_t seg;
2832 	int nsegs, error;
2833 
2834 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2835 	if (m == NULL)
2836 		return ENOBUFS;
2837 
2838 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2839 #if defined(__DragonFly__)
2840 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
2841 	    m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2842 #else
2843 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2844 	    &seg, &nsegs, BUS_DMA_NOWAIT);
2845 #endif
2846 	if (error != 0) {
2847 		device_printf(sc->sc_dev,
2848 		    "%s: can't map mbuf, error %d\n", __func__, error);
2849 		goto fail;
2850 	}
2851 
2852 	if (data->m != NULL)
2853 		bus_dmamap_unload(ring->data_dmat, data->map);
2854 
2855 	/* Swap ring->spare_map with data->map */
2856 	dmamap = data->map;
2857 	data->map = ring->spare_map;
2858 	ring->spare_map = dmamap;
2859 
2860 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2861 	data->m = m;
2862 
2863 	/* Update RX descriptor. */
2864 	KKASSERT((seg.ds_addr & 255) == 0);
2865 	ring->desc[idx] = htole32(seg.ds_addr >> 8);
2866 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2867 	    BUS_DMASYNC_PREWRITE);
2868 
2869 	return 0;
2870 fail:
2871 	m_freem(m);
2872 	return error;
2873 }
2874 
2875 #define IWM_RSSI_OFFSET 50
2876 static int
2877 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2878 {
2879 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2880 	uint32_t agc_a, agc_b;
2881 	uint32_t val;
2882 
2883 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2884 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2885 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2886 
2887 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2888 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2889 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2890 
2891 	/*
2892 	 * dBm = rssi dB - agc dB - constant.
2893 	 * Higher AGC (higher radio gain) means lower signal.
2894 	 */
2895 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2896 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2897 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2898 
2899 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2900 	    "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2901 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2902 
2903 	return max_rssi_dbm;
2904 }
2905 
2906 /*
2907  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2908  * values are reported by the fw as positive values - need to negate
2909  * to obtain their dBM.  Account for missing antennas by replacing 0
2910  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2911  */
2912 static int
2913 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2914 {
2915 	int energy_a, energy_b, energy_c, max_energy;
2916 	uint32_t val;
2917 
2918 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2919 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2920 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
2921 	energy_a = energy_a ? -energy_a : -256;
2922 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2923 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
2924 	energy_b = energy_b ? -energy_b : -256;
2925 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2926 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
2927 	energy_c = energy_c ? -energy_c : -256;
2928 	max_energy = MAX(energy_a, energy_b);
2929 	max_energy = MAX(max_energy, energy_c);
2930 
2931 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2932 	    "energy In A %d B %d C %d , and max %d\n",
2933 	    energy_a, energy_b, energy_c, max_energy);
2934 
2935 	return max_energy;
2936 }
2937 
2938 static void
2939 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2940 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2941 {
2942 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2943 
2944 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2945 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2946 
2947 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2948 }
2949 
2950 /*
2951  * Retrieve the average noise (in dBm) among receivers.
2952  */
2953 static int
2954 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2955 {
2956 	int i, total, nbant, noise;
2957 
2958 	total = nbant = noise = 0;
2959 	for (i = 0; i < 3; i++) {
2960 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2961 		if (noise) {
2962 			total += noise;
2963 			nbant++;
2964 		}
2965 	}
2966 
2967 	/* There should be at least one antenna but check anyway. */
2968 	return (nbant == 0) ? -127 : (total / nbant) - 107;
2969 }
2970 
2971 /*
2972  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2973  *
2974  * Handles the actual data of the Rx packet from the fw
2975  */
2976 static void
2977 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2978 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2979 {
2980 	struct ieee80211com *ic = &sc->sc_ic;
2981 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2982 	struct ieee80211_frame *wh;
2983 	struct ieee80211_node *ni;
2984 	struct ieee80211_rx_stats rxs;
2985 	struct mbuf *m;
2986 	struct iwm_rx_phy_info *phy_info;
2987 	struct iwm_rx_mpdu_res_start *rx_res;
2988 	uint32_t len;
2989 	uint32_t rx_pkt_status;
2990 	int rssi;
2991 
2992 	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2993 
2994 	phy_info = &sc->sc_last_phy_info;
2995 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2996 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2997 	len = le16toh(rx_res->byte_count);
2998 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2999 
3000 	m = data->m;
3001 	m->m_data = pkt->data + sizeof(*rx_res);
3002 	m->m_pkthdr.len = m->m_len = len;
3003 
3004 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3005 		device_printf(sc->sc_dev,
3006 		    "dsp size out of range [0,20]: %d\n",
3007 		    phy_info->cfg_phy_cnt);
3008 		return;
3009 	}
3010 
3011 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3012 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3013 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3014 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3015 		return; /* drop */
3016 	}
3017 
3018 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3019 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3020 	} else {
3021 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
3022 	}
3023 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
3024 	rssi = MIN(rssi, sc->sc_max_rssi);	/* clip to max. 100% */
3025 
3026 	/* replenish ring for the buffer we're going to feed to the sharks */
3027 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3028 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3029 		    __func__);
3030 		return;
3031 	}
3032 
3033 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3034 
3035 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3036 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3037 	    __func__,
3038 	    le16toh(phy_info->channel),
3039 	    le16toh(phy_info->phy_flags));
3040 
3041 	/*
3042 	 * Populate an RX state struct with the provided information.
3043 	 */
3044 	bzero(&rxs, sizeof(rxs));
3045 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3046 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3047 	rxs.c_ieee = le16toh(phy_info->channel);
3048 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3049 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3050 	} else {
3051 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3052 	}
3053 	rxs.rssi = rssi - sc->sc_noise;
3054 	rxs.nf = sc->sc_noise;
3055 
3056 	if (ieee80211_radiotap_active_vap(vap)) {
3057 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3058 
3059 		tap->wr_flags = 0;
3060 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3061 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3062 		tap->wr_chan_freq = htole16(rxs.c_freq);
3063 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3064 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3065 		tap->wr_dbm_antsignal = (int8_t)rssi;
3066 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3067 		tap->wr_tsft = phy_info->system_timestamp;
3068 		switch (phy_info->rate) {
3069 		/* CCK rates. */
3070 		case  10: tap->wr_rate =   2; break;
3071 		case  20: tap->wr_rate =   4; break;
3072 		case  55: tap->wr_rate =  11; break;
3073 		case 110: tap->wr_rate =  22; break;
3074 		/* OFDM rates. */
3075 		case 0xd: tap->wr_rate =  12; break;
3076 		case 0xf: tap->wr_rate =  18; break;
3077 		case 0x5: tap->wr_rate =  24; break;
3078 		case 0x7: tap->wr_rate =  36; break;
3079 		case 0x9: tap->wr_rate =  48; break;
3080 		case 0xb: tap->wr_rate =  72; break;
3081 		case 0x1: tap->wr_rate =  96; break;
3082 		case 0x3: tap->wr_rate = 108; break;
3083 		/* Unknown rate: should not happen. */
3084 		default:  tap->wr_rate =   0;
3085 		}
3086 	}
3087 
3088 	IWM_UNLOCK(sc);
3089 	if (ni != NULL) {
3090 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3091 		ieee80211_input_mimo(ni, m, &rxs);
3092 		ieee80211_free_node(ni);
3093 	} else {
3094 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3095 		ieee80211_input_mimo_all(ic, m, &rxs);
3096 	}
3097 	IWM_LOCK(sc);
3098 }
3099 
3100 static int
3101 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3102 	struct iwm_node *in)
3103 {
3104 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3105 	struct ieee80211_node *ni = &in->in_ni;
3106 	struct ieee80211vap *vap = ni->ni_vap;
3107 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3108 	int failack = tx_resp->failure_frame;
3109 
3110 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3111 
3112 	/* Update rate control statistics. */
3113 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3114 	    __func__,
3115 	    (int) le16toh(tx_resp->status.status),
3116 	    (int) le16toh(tx_resp->status.sequence),
3117 	    tx_resp->frame_count,
3118 	    tx_resp->bt_kill_count,
3119 	    tx_resp->failure_rts,
3120 	    tx_resp->failure_frame,
3121 	    le32toh(tx_resp->initial_rate),
3122 	    (int) le16toh(tx_resp->wireless_media_time));
3123 
3124 	if (status != IWM_TX_STATUS_SUCCESS &&
3125 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3126 		ieee80211_ratectl_tx_complete(vap, ni,
3127 		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3128 		return (1);
3129 	} else {
3130 		ieee80211_ratectl_tx_complete(vap, ni,
3131 		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3132 		return (0);
3133 	}
3134 }
3135 
3136 static void
3137 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3138 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3139 {
3140 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3141 	int idx = cmd_hdr->idx;
3142 	int qid = cmd_hdr->qid;
3143 	struct iwm_tx_ring *ring = &sc->txq[qid];
3144 	struct iwm_tx_data *txd = &ring->data[idx];
3145 	struct iwm_node *in = txd->in;
3146 	struct mbuf *m = txd->m;
3147 	int status;
3148 
3149 	KASSERT(txd->done == 0, ("txd not done"));
3150 	KASSERT(txd->in != NULL, ("txd without node"));
3151 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3152 
3153 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3154 
3155 	sc->sc_tx_timer = 0;
3156 
3157 	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3158 
3159 	/* Unmap and free mbuf. */
3160 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3161 	bus_dmamap_unload(ring->data_dmat, txd->map);
3162 
3163 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3164 	    "free txd %p, in %p\n", txd, txd->in);
3165 	txd->done = 1;
3166 	txd->m = NULL;
3167 	txd->in = NULL;
3168 
3169 	ieee80211_tx_complete(&in->in_ni, m, status);
3170 
3171 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3172 		sc->qfullmsk &= ~(1 << ring->qid);
3173 		if (sc->qfullmsk == 0) {
3174 			/*
3175 			 * Well, we're in interrupt context, but then again
3176 			 * I guess net80211 does all sorts of stunts in
3177 			 * interrupt context, so maybe this is no biggie.
3178 			 */
3179 			iwm_start(sc);
3180 		}
3181 	}
3182 }
3183 
3184 /*
3185  * transmit side
3186  */
3187 
3188 /*
3189  * Process a "command done" firmware notification.  This is where we wakeup
3190  * processes waiting for a synchronous command completion.
3191  * from if_iwn
3192  */
3193 static void
3194 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3195 {
3196 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3197 	struct iwm_tx_data *data;
3198 
3199 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3200 		return;	/* Not a command ack. */
3201 	}
3202 
3203 	data = &ring->data[pkt->hdr.idx];
3204 
3205 	/* If the command was mapped in an mbuf, free it. */
3206 	if (data->m != NULL) {
3207 		bus_dmamap_sync(ring->data_dmat, data->map,
3208 		    BUS_DMASYNC_POSTWRITE);
3209 		bus_dmamap_unload(ring->data_dmat, data->map);
3210 		m_freem(data->m);
3211 		data->m = NULL;
3212 	}
3213 	wakeup(&ring->desc[pkt->hdr.idx]);
3214 }
3215 
3216 #if 0
3217 /*
3218  * necessary only for block ack mode
3219  */
3220 void
3221 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3222 	uint16_t len)
3223 {
3224 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3225 	uint16_t w_val;
3226 
3227 	scd_bc_tbl = sc->sched_dma.vaddr;
3228 
3229 	len += 8; /* magic numbers came naturally from paris */
3230 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3231 		len = roundup(len, 4) / 4;
3232 
3233 	w_val = htole16(sta_id << 12 | len);
3234 
3235 	/* Update TX scheduler. */
3236 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3237 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3238 	    BUS_DMASYNC_PREWRITE);
3239 
3240 	/* I really wonder what this is ?!? */
3241 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3242 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3243 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3244 		    BUS_DMASYNC_PREWRITE);
3245 	}
3246 }
3247 #endif
3248 
3249 /*
3250  * Take an 802.11 (non-n) rate, find the relevant rate
3251  * table entry.  return the index into in_ridx[].
3252  *
3253  * The caller then uses that index back into in_ridx
3254  * to figure out the rate index programmed /into/
3255  * the firmware for this given node.
3256  */
3257 static int
3258 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3259     uint8_t rate)
3260 {
3261 	int i;
3262 	uint8_t r;
3263 
3264 	for (i = 0; i < nitems(in->in_ridx); i++) {
3265 		r = iwm_rates[in->in_ridx[i]].rate;
3266 		if (rate == r)
3267 			return (i);
3268 	}
3269 	/* XXX Return the first */
3270 	/* XXX TODO: have it return the /lowest/ */
3271 	return (0);
3272 }
3273 
3274 /*
3275  * Fill in the rate related information for a transmit command.
3276  */
3277 static const struct iwm_rate *
3278 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3279 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3280 {
3281 	struct ieee80211com *ic = &sc->sc_ic;
3282 	struct ieee80211_node *ni = &in->in_ni;
3283 	const struct iwm_rate *rinfo;
3284 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3285 	int ridx, rate_flags;
3286 
3287 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3288 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3289 
3290 	/*
3291 	 * XXX TODO: everything about the rate selection here is terrible!
3292 	 */
3293 
3294 	if (type == IEEE80211_FC0_TYPE_DATA) {
3295 		int i;
3296 		/* for data frames, use RS table */
3297 		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3298 		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3299 		ridx = in->in_ridx[i];
3300 
3301 		/* This is the index into the programmed table */
3302 		tx->initial_rate_index = i;
3303 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3304 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3305 		    "%s: start with i=%d, txrate %d\n",
3306 		    __func__, i, iwm_rates[ridx].rate);
3307 	} else {
3308 		/*
3309 		 * For non-data, use the lowest supported rate for the given
3310 		 * operational mode.
3311 		 *
3312 		 * Note: there may not be any rate control information available.
3313 		 * This driver currently assumes if we're transmitting data
3314 		 * frames, use the rate control table.  Grr.
3315 		 *
3316 		 * XXX TODO: use the configured rate for the traffic type!
3317 		 * XXX TODO: this should be per-vap, not curmode; as we later
3318 		 * on we'll want to handle off-channel stuff (eg TDLS).
3319 		 */
3320 		if (ic->ic_curmode == IEEE80211_MODE_11A) {
3321 			/*
3322 			 * XXX this assumes the mode is either 11a or not 11a;
3323 			 * definitely won't work for 11n.
3324 			 */
3325 			ridx = IWM_RIDX_OFDM;
3326 		} else {
3327 			ridx = IWM_RIDX_CCK;
3328 		}
3329 	}
3330 
3331 	rinfo = &iwm_rates[ridx];
3332 
3333 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3334 	    __func__, ridx,
3335 	    rinfo->rate,
3336 	    !! (IWM_RIDX_IS_CCK(ridx))
3337 	    );
3338 
3339 	/* XXX TODO: hard-coded TX antenna? */
3340 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3341 	if (IWM_RIDX_IS_CCK(ridx))
3342 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3343 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3344 
3345 	return rinfo;
3346 }
3347 
3348 #define TB0_SIZE 16
3349 static int
3350 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3351 {
3352 	struct ieee80211com *ic = &sc->sc_ic;
3353 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3354 	struct iwm_node *in = IWM_NODE(ni);
3355 	struct iwm_tx_ring *ring;
3356 	struct iwm_tx_data *data;
3357 	struct iwm_tfd *desc;
3358 	struct iwm_device_cmd *cmd;
3359 	struct iwm_tx_cmd *tx;
3360 	struct ieee80211_frame *wh;
3361 	struct ieee80211_key *k = NULL;
3362 #if !defined(__DragonFly__)
3363 	struct mbuf *m1;
3364 #endif
3365 	const struct iwm_rate *rinfo;
3366 	uint32_t flags;
3367 	u_int hdrlen;
3368 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3369 	int nsegs;
3370 	uint8_t tid, type;
3371 	int i, totlen, error, pad;
3372 
3373 	wh = mtod(m, struct ieee80211_frame *);
3374 	hdrlen = ieee80211_anyhdrsize(wh);
3375 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3376 	tid = 0;
3377 	ring = &sc->txq[ac];
3378 	desc = &ring->desc[ring->cur];
3379 	memset(desc, 0, sizeof(*desc));
3380 	data = &ring->data[ring->cur];
3381 
3382 	/* Fill out iwm_tx_cmd to send to the firmware */
3383 	cmd = &ring->cmd[ring->cur];
3384 	cmd->hdr.code = IWM_TX_CMD;
3385 	cmd->hdr.flags = 0;
3386 	cmd->hdr.qid = ring->qid;
3387 	cmd->hdr.idx = ring->cur;
3388 
3389 	tx = (void *)cmd->data;
3390 	memset(tx, 0, sizeof(*tx));
3391 
3392 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3393 
3394 	/* Encrypt the frame if need be. */
3395 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3396 		/* Retrieve key for TX && do software encryption. */
3397 		k = ieee80211_crypto_encap(ni, m);
3398 		if (k == NULL) {
3399 			m_freem(m);
3400 			return (ENOBUFS);
3401 		}
3402 		/* 802.11 header may have moved. */
3403 		wh = mtod(m, struct ieee80211_frame *);
3404 	}
3405 
3406 	if (ieee80211_radiotap_active_vap(vap)) {
3407 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3408 
3409 		tap->wt_flags = 0;
3410 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3411 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3412 		tap->wt_rate = rinfo->rate;
3413 		if (k != NULL)
3414 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3415 		ieee80211_radiotap_tx(vap, m);
3416 	}
3417 
3418 
3419 	totlen = m->m_pkthdr.len;
3420 
3421 	flags = 0;
3422 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3423 		flags |= IWM_TX_CMD_FLG_ACK;
3424 	}
3425 
3426 	if (type == IEEE80211_FC0_TYPE_DATA
3427 	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3428 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3429 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3430 	}
3431 
3432 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3433 	    type != IEEE80211_FC0_TYPE_DATA)
3434 		tx->sta_id = sc->sc_aux_sta.sta_id;
3435 	else
3436 		tx->sta_id = IWM_STATION_ID;
3437 
3438 	if (type == IEEE80211_FC0_TYPE_MGT) {
3439 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3440 
3441 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3442 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3443 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3444 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3445 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3446 		} else {
3447 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3448 		}
3449 	} else {
3450 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3451 	}
3452 
3453 	if (hdrlen & 3) {
3454 		/* First segment length must be a multiple of 4. */
3455 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3456 		pad = 4 - (hdrlen & 3);
3457 	} else
3458 		pad = 0;
3459 
3460 	tx->driver_txop = 0;
3461 	tx->next_frame_len = 0;
3462 
3463 	tx->len = htole16(totlen);
3464 	tx->tid_tspec = tid;
3465 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3466 
3467 	/* Set physical address of "scratch area". */
3468 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3469 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3470 
3471 	/* Copy 802.11 header in TX command. */
3472 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3473 
3474 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3475 
3476 	tx->sec_ctl = 0;
3477 	tx->tx_flags |= htole32(flags);
3478 
3479 	/* Trim 802.11 header. */
3480 	m_adj(m, hdrlen);
3481 #if defined(__DragonFly__)
3482 	error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3483 					    segs, IWM_MAX_SCATTER - 2,
3484 					    &nsegs, BUS_DMA_NOWAIT);
3485 #else
3486 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3487 	    segs, &nsegs, BUS_DMA_NOWAIT);
3488 #endif
3489 	if (error != 0) {
3490 #if defined(__DragonFly__)
3491 		device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3492 		    error);
3493 		m_freem(m);
3494 		return error;
3495 #else
3496 		if (error != EFBIG) {
3497 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3498 			    error);
3499 			m_freem(m);
3500 			return error;
3501 		}
3502 		/* Too many DMA segments, linearize mbuf. */
3503 		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3504 		if (m1 == NULL) {
3505 			device_printf(sc->sc_dev,
3506 			    "%s: could not defrag mbuf\n", __func__);
3507 			m_freem(m);
3508 			return (ENOBUFS);
3509 		}
3510 		m = m1;
3511 
3512 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3513 		    segs, &nsegs, BUS_DMA_NOWAIT);
3514 		if (error != 0) {
3515 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3516 			    error);
3517 			m_freem(m);
3518 			return error;
3519 		}
3520 #endif
3521 	}
3522 	data->m = m;
3523 	data->in = in;
3524 	data->done = 0;
3525 
3526 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3527 	    "sending txd %p, in %p\n", data, data->in);
3528 	KASSERT(data->in != NULL, ("node is NULL"));
3529 
3530 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3531 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3532 	    ring->qid, ring->cur, totlen, nsegs,
3533 	    le32toh(tx->tx_flags),
3534 	    le32toh(tx->rate_n_flags),
3535 	    tx->initial_rate_index
3536 	    );
3537 
3538 	/* Fill TX descriptor. */
3539 	desc->num_tbs = 2 + nsegs;
3540 
3541 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3542 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3543 	    (TB0_SIZE << 4);
3544 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3545 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3546 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3547 	      + hdrlen + pad - TB0_SIZE) << 4);
3548 
3549 	/* Other DMA segments are for data payload. */
3550 	for (i = 0; i < nsegs; i++) {
3551 		seg = &segs[i];
3552 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3553 		desc->tbs[i+2].hi_n_len = \
3554 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3555 		    | ((seg->ds_len) << 4);
3556 	}
3557 
3558 	bus_dmamap_sync(ring->data_dmat, data->map,
3559 	    BUS_DMASYNC_PREWRITE);
3560 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3561 	    BUS_DMASYNC_PREWRITE);
3562 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3563 	    BUS_DMASYNC_PREWRITE);
3564 
3565 #if 0
3566 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3567 #endif
3568 
3569 	/* Kick TX ring. */
3570 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3571 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3572 
3573 	/* Mark TX ring as full if we reach a certain threshold. */
3574 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3575 		sc->qfullmsk |= 1 << ring->qid;
3576 	}
3577 
3578 	return 0;
3579 }
3580 
3581 static int
3582 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3583     const struct ieee80211_bpf_params *params)
3584 {
3585 	struct ieee80211com *ic = ni->ni_ic;
3586 	struct iwm_softc *sc = ic->ic_softc;
3587 	int error = 0;
3588 
3589 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3590 	    "->%s begin\n", __func__);
3591 
3592 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3593 		m_freem(m);
3594 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3595 		    "<-%s not RUNNING\n", __func__);
3596 		return (ENETDOWN);
3597         }
3598 
3599 	IWM_LOCK(sc);
3600 	/* XXX fix this */
3601         if (params == NULL) {
3602 		error = iwm_tx(sc, m, ni, 0);
3603 	} else {
3604 		error = iwm_tx(sc, m, ni, 0);
3605 	}
3606 	sc->sc_tx_timer = 5;
3607 	IWM_UNLOCK(sc);
3608 
3609         return (error);
3610 }
3611 
3612 /*
3613  * mvm/tx.c
3614  */
3615 
3616 #if 0
3617 /*
3618  * Note that there are transports that buffer frames before they reach
3619  * the firmware. This means that after flush_tx_path is called, the
3620  * queue might not be empty. The race-free way to handle this is to:
3621  * 1) set the station as draining
3622  * 2) flush the Tx path
3623  * 3) wait for the transport queues to be empty
3624  */
3625 int
3626 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3627 {
3628 	struct iwm_tx_path_flush_cmd flush_cmd = {
3629 		.queues_ctl = htole32(tfd_msk),
3630 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3631 	};
3632 	int ret;
3633 
3634 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3635 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3636 	    sizeof(flush_cmd), &flush_cmd);
3637 	if (ret)
3638                 device_printf(sc->sc_dev,
3639 		    "Flushing tx queue failed: %d\n", ret);
3640 	return ret;
3641 }
3642 #endif
3643 
3644 static int
3645 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3646 	struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3647 {
3648 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3649 	    cmd, status);
3650 }
3651 
3652 /* send station add/update command to firmware */
3653 static int
3654 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3655 {
3656 	struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3657 	int ret;
3658 	uint32_t status;
3659 
3660 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3661 
3662 	add_sta_cmd.sta_id = IWM_STATION_ID;
3663 	add_sta_cmd.mac_id_n_color
3664 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3665 	        IWM_DEFAULT_COLOR));
3666 	if (!update) {
3667 		int ac;
3668 		for (ac = 0; ac < WME_NUM_AC; ac++) {
3669 			add_sta_cmd.tfd_queue_msk |=
3670 			    htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3671 		}
3672 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3673 	}
3674 	add_sta_cmd.add_modify = update ? 1 : 0;
3675 	add_sta_cmd.station_flags_msk
3676 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3677 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
3678 	if (update)
3679 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3680 
3681 	status = IWM_ADD_STA_SUCCESS;
3682 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3683 	if (ret)
3684 		return ret;
3685 
3686 	switch (status) {
3687 	case IWM_ADD_STA_SUCCESS:
3688 		break;
3689 	default:
3690 		ret = EIO;
3691 		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3692 		break;
3693 	}
3694 
3695 	return ret;
3696 }
3697 
3698 static int
3699 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3700 {
3701 	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3702 }
3703 
3704 static int
3705 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3706 {
3707 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3708 }
3709 
3710 static int
3711 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3712 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3713 {
3714 	struct iwm_mvm_add_sta_cmd_v7 cmd;
3715 	int ret;
3716 	uint32_t status;
3717 
3718 	memset(&cmd, 0, sizeof(cmd));
3719 	cmd.sta_id = sta->sta_id;
3720 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3721 
3722 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3723 	cmd.tid_disable_tx = htole16(0xffff);
3724 
3725 	if (addr)
3726 		IEEE80211_ADDR_COPY(cmd.addr, addr);
3727 
3728 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3729 	if (ret)
3730 		return ret;
3731 
3732 	switch (status) {
3733 	case IWM_ADD_STA_SUCCESS:
3734 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3735 		    "%s: Internal station added.\n", __func__);
3736 		return 0;
3737 	default:
3738 		device_printf(sc->sc_dev,
3739 		    "%s: Add internal station failed, status=0x%x\n",
3740 		    __func__, status);
3741 		ret = EIO;
3742 		break;
3743 	}
3744 	return ret;
3745 }
3746 
3747 static int
3748 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3749 {
3750 	int ret;
3751 
3752 	sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3753 	sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3754 
3755 	ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3756 	if (ret)
3757 		return ret;
3758 
3759 	ret = iwm_mvm_add_int_sta_common(sc,
3760 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3761 
3762 	if (ret)
3763 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3764 	return ret;
3765 }
3766 
3767 static int
3768 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3769 {
3770 	struct iwm_time_quota_cmd cmd;
3771 	int i, idx, ret, num_active_macs, quota, quota_rem;
3772 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3773 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3774 	uint16_t id;
3775 
3776 	memset(&cmd, 0, sizeof(cmd));
3777 
3778 	/* currently, PHY ID == binding ID */
3779 	if (in) {
3780 		id = in->in_phyctxt->id;
3781 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3782 		colors[id] = in->in_phyctxt->color;
3783 
3784 		if (1)
3785 			n_ifs[id] = 1;
3786 	}
3787 
3788 	/*
3789 	 * The FW's scheduling session consists of
3790 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3791 	 * equally between all the bindings that require quota
3792 	 */
3793 	num_active_macs = 0;
3794 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3795 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3796 		num_active_macs += n_ifs[i];
3797 	}
3798 
3799 	quota = 0;
3800 	quota_rem = 0;
3801 	if (num_active_macs) {
3802 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3803 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3804 	}
3805 
3806 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3807 		if (colors[i] < 0)
3808 			continue;
3809 
3810 		cmd.quotas[idx].id_and_color =
3811 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3812 
3813 		if (n_ifs[i] <= 0) {
3814 			cmd.quotas[idx].quota = htole32(0);
3815 			cmd.quotas[idx].max_duration = htole32(0);
3816 		} else {
3817 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3818 			cmd.quotas[idx].max_duration = htole32(0);
3819 		}
3820 		idx++;
3821 	}
3822 
3823 	/* Give the remainder of the session to the first binding */
3824 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3825 
3826 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3827 	    sizeof(cmd), &cmd);
3828 	if (ret)
3829 		device_printf(sc->sc_dev,
3830 		    "%s: Failed to send quota: %d\n", __func__, ret);
3831 	return ret;
3832 }
3833 
3834 /*
3835  * ieee80211 routines
3836  */
3837 
3838 /*
3839  * Change to AUTH state in 80211 state machine.  Roughly matches what
3840  * Linux does in bss_info_changed().
3841  */
3842 static int
3843 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3844 {
3845 	struct ieee80211_node *ni;
3846 	struct iwm_node *in;
3847 	struct iwm_vap *iv = IWM_VAP(vap);
3848 	uint32_t duration;
3849 	int error;
3850 
3851 	/*
3852 	 * XXX i have a feeling that the vap node is being
3853 	 * freed from underneath us. Grr.
3854 	 */
3855 	ni = ieee80211_ref_node(vap->iv_bss);
3856 	in = IWM_NODE(ni);
3857 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3858 	    "%s: called; vap=%p, bss ni=%p\n",
3859 	    __func__,
3860 	    vap,
3861 	    ni);
3862 
3863 	in->in_assoc = 0;
3864 
3865 	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3866 	if (error != 0)
3867 		return error;
3868 
3869 	error = iwm_allow_mcast(vap, sc);
3870 	if (error) {
3871 		device_printf(sc->sc_dev,
3872 		    "%s: failed to set multicast\n", __func__);
3873 		goto out;
3874 	}
3875 
3876 	/*
3877 	 * This is where it deviates from what Linux does.
3878 	 *
3879 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3880 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3881 	 * and always does a mac_ctx_changed().
3882 	 *
3883 	 * The openbsd port doesn't attempt to do that - it reset things
3884 	 * at odd states and does the add here.
3885 	 *
3886 	 * So, until the state handling is fixed (ie, we never reset
3887 	 * the NIC except for a firmware failure, which should drag
3888 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3889 	 * contexts that are required), let's do a dirty hack here.
3890 	 */
3891 	if (iv->is_uploaded) {
3892 		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3893 			device_printf(sc->sc_dev,
3894 			    "%s: failed to update MAC\n", __func__);
3895 			goto out;
3896 		}
3897 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3898 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3899 			device_printf(sc->sc_dev,
3900 			    "%s: failed update phy ctxt\n", __func__);
3901 			goto out;
3902 		}
3903 		in->in_phyctxt = &sc->sc_phyctxt[0];
3904 
3905 		if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3906 			device_printf(sc->sc_dev,
3907 			    "%s: binding update cmd\n", __func__);
3908 			goto out;
3909 		}
3910 		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3911 			device_printf(sc->sc_dev,
3912 			    "%s: failed to update sta\n", __func__);
3913 			goto out;
3914 		}
3915 	} else {
3916 		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3917 			device_printf(sc->sc_dev,
3918 			    "%s: failed to add MAC\n", __func__);
3919 			goto out;
3920 		}
3921 		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3922 		    in->in_ni.ni_chan, 1, 1)) != 0) {
3923 			device_printf(sc->sc_dev,
3924 			    "%s: failed add phy ctxt!\n", __func__);
3925 			error = ETIMEDOUT;
3926 			goto out;
3927 		}
3928 		in->in_phyctxt = &sc->sc_phyctxt[0];
3929 
3930 		if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3931 			device_printf(sc->sc_dev,
3932 			    "%s: binding add cmd\n", __func__);
3933 			goto out;
3934 		}
3935 		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3936 			device_printf(sc->sc_dev,
3937 			    "%s: failed to add sta\n", __func__);
3938 			goto out;
3939 		}
3940 	}
3941 
3942 	/*
3943 	 * Prevent the FW from wandering off channel during association
3944 	 * by "protecting" the session with a time event.
3945 	 */
3946 	/* XXX duration is in units of TU, not MS */
3947 	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3948 	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3949 	DELAY(100);
3950 
3951 	error = 0;
3952 out:
3953 	ieee80211_free_node(ni);
3954 	return (error);
3955 }
3956 
3957 static int
3958 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3959 {
3960 	struct iwm_node *in = IWM_NODE(vap->iv_bss);
3961 	int error;
3962 
3963 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3964 		device_printf(sc->sc_dev,
3965 		    "%s: failed to update STA\n", __func__);
3966 		return error;
3967 	}
3968 
3969 	in->in_assoc = 1;
3970 	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3971 		device_printf(sc->sc_dev,
3972 		    "%s: failed to update MAC\n", __func__);
3973 		return error;
3974 	}
3975 
3976 	return 0;
3977 }
3978 
3979 static int
3980 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3981 {
3982 	/*
3983 	 * Ok, so *technically* the proper set of calls for going
3984 	 * from RUN back to SCAN is:
3985 	 *
3986 	 * iwm_mvm_power_mac_disable(sc, in);
3987 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3988 	 * iwm_mvm_rm_sta(sc, in);
3989 	 * iwm_mvm_update_quotas(sc, NULL);
3990 	 * iwm_mvm_mac_ctxt_changed(sc, in);
3991 	 * iwm_mvm_binding_remove_vif(sc, in);
3992 	 * iwm_mvm_mac_ctxt_remove(sc, in);
3993 	 *
3994 	 * However, that freezes the device not matter which permutations
3995 	 * and modifications are attempted.  Obviously, this driver is missing
3996 	 * something since it works in the Linux driver, but figuring out what
3997 	 * is missing is a little more complicated.  Now, since we're going
3998 	 * back to nothing anyway, we'll just do a complete device reset.
3999 	 * Up your's, device!
4000 	 */
4001 	/* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
4002 	iwm_stop_device(sc);
4003 	iwm_init_hw(sc);
4004 	if (in)
4005 		in->in_assoc = 0;
4006 	return 0;
4007 
4008 #if 0
4009 	int error;
4010 
4011 	iwm_mvm_power_mac_disable(sc, in);
4012 
4013 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4014 		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4015 		return error;
4016 	}
4017 
4018 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4019 		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4020 		return error;
4021 	}
4022 	error = iwm_mvm_rm_sta(sc, in);
4023 	in->in_assoc = 0;
4024 	iwm_mvm_update_quotas(sc, NULL);
4025 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4026 		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4027 		return error;
4028 	}
4029 	iwm_mvm_binding_remove_vif(sc, in);
4030 
4031 	iwm_mvm_mac_ctxt_remove(sc, in);
4032 
4033 	return error;
4034 #endif
4035 }
4036 
4037 static struct ieee80211_node *
4038 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4039 {
4040 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4041 	    M_INTWAIT | M_ZERO);
4042 }
4043 
4044 static void
4045 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4046 {
4047 	struct ieee80211_node *ni = &in->in_ni;
4048 	struct iwm_lq_cmd *lq = &in->in_lq;
4049 	int nrates = ni->ni_rates.rs_nrates;
4050 	int i, ridx, tab = 0;
4051 	int txant = 0;
4052 
4053 	if (nrates > nitems(lq->rs_table)) {
4054 		device_printf(sc->sc_dev,
4055 		    "%s: node supports %d rates, driver handles "
4056 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4057 		return;
4058 	}
4059 	if (nrates == 0) {
4060 		device_printf(sc->sc_dev,
4061 		    "%s: node supports 0 rates, odd!\n", __func__);
4062 		return;
4063 	}
4064 
4065 	/*
4066 	 * XXX .. and most of iwm_node is not initialised explicitly;
4067 	 * it's all just 0x0 passed to the firmware.
4068 	 */
4069 
4070 	/* first figure out which rates we should support */
4071 	/* XXX TODO: this isn't 11n aware /at all/ */
4072 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4073 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4074 	    "%s: nrates=%d\n", __func__, nrates);
4075 
4076 	/*
4077 	 * Loop over nrates and populate in_ridx from the highest
4078 	 * rate to the lowest rate.  Remember, in_ridx[] has
4079 	 * IEEE80211_RATE_MAXSIZE entries!
4080 	 */
4081 	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4082 		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4083 
4084 		/* Map 802.11 rate to HW rate index. */
4085 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4086 			if (iwm_rates[ridx].rate == rate)
4087 				break;
4088 		if (ridx > IWM_RIDX_MAX) {
4089 			device_printf(sc->sc_dev,
4090 			    "%s: WARNING: device rate for %d not found!\n",
4091 			    __func__, rate);
4092 		} else {
4093 			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4094 			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4095 			    __func__,
4096 			    i,
4097 			    rate,
4098 			    ridx);
4099 			in->in_ridx[i] = ridx;
4100 		}
4101 	}
4102 
4103 	/* then construct a lq_cmd based on those */
4104 	memset(lq, 0, sizeof(*lq));
4105 	lq->sta_id = IWM_STATION_ID;
4106 
4107 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4108 	if (ni->ni_flags & IEEE80211_NODE_HT)
4109 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4110 
4111 	/*
4112 	 * are these used? (we don't do SISO or MIMO)
4113 	 * need to set them to non-zero, though, or we get an error.
4114 	 */
4115 	lq->single_stream_ant_msk = 1;
4116 	lq->dual_stream_ant_msk = 1;
4117 
4118 	/*
4119 	 * Build the actual rate selection table.
4120 	 * The lowest bits are the rates.  Additionally,
4121 	 * CCK needs bit 9 to be set.  The rest of the bits
4122 	 * we add to the table select the tx antenna
4123 	 * Note that we add the rates in the highest rate first
4124 	 * (opposite of ni_rates).
4125 	 */
4126 	/*
4127 	 * XXX TODO: this should be looping over the min of nrates
4128 	 * and LQ_MAX_RETRY_NUM.  Sigh.
4129 	 */
4130 	for (i = 0; i < nrates; i++) {
4131 		int nextant;
4132 
4133 		if (txant == 0)
4134 			txant = iwm_fw_valid_tx_ant(sc);
4135 		nextant = 1<<(ffs(txant)-1);
4136 		txant &= ~nextant;
4137 
4138 		/*
4139 		 * Map the rate id into a rate index into
4140 		 * our hardware table containing the
4141 		 * configuration to use for this rate.
4142 		 */
4143 		ridx = in->in_ridx[i];
4144 		tab = iwm_rates[ridx].plcp;
4145 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4146 		if (IWM_RIDX_IS_CCK(ridx))
4147 			tab |= IWM_RATE_MCS_CCK_MSK;
4148 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4149 		    "station rate i=%d, rate=%d, hw=%x\n",
4150 		    i, iwm_rates[ridx].rate, tab);
4151 		lq->rs_table[i] = htole32(tab);
4152 	}
4153 	/* then fill the rest with the lowest possible rate */
4154 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4155 		KASSERT(tab != 0, ("invalid tab"));
4156 		lq->rs_table[i] = htole32(tab);
4157 	}
4158 }
4159 
4160 static int
4161 iwm_media_change(struct ifnet *ifp)
4162 {
4163 	struct ieee80211vap *vap = ifp->if_softc;
4164 	struct ieee80211com *ic = vap->iv_ic;
4165 	struct iwm_softc *sc = ic->ic_softc;
4166 	int error;
4167 
4168 	error = ieee80211_media_change(ifp);
4169 	if (error != ENETRESET)
4170 		return error;
4171 
4172 	IWM_LOCK(sc);
4173 	if (ic->ic_nrunning > 0) {
4174 		iwm_stop(sc);
4175 		iwm_init(sc);
4176 	}
4177 	IWM_UNLOCK(sc);
4178 	return error;
4179 }
4180 
4181 
4182 static int
4183 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4184 {
4185 	struct iwm_vap *ivp = IWM_VAP(vap);
4186 	struct ieee80211com *ic = vap->iv_ic;
4187 	struct iwm_softc *sc = ic->ic_softc;
4188 	struct iwm_node *in;
4189 	int error;
4190 
4191 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4192 	    "switching state %s -> %s\n",
4193 	    ieee80211_state_name[vap->iv_state],
4194 	    ieee80211_state_name[nstate]);
4195 	IEEE80211_UNLOCK(ic);
4196 	IWM_LOCK(sc);
4197 
4198 	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4199 		iwm_led_blink_stop(sc);
4200 
4201 	/* disable beacon filtering if we're hopping out of RUN */
4202 	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4203 		iwm_mvm_disable_beacon_filter(sc);
4204 
4205 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4206 			in->in_assoc = 0;
4207 
4208 		iwm_release(sc, NULL);
4209 
4210 		/*
4211 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4212 		 * above then the card will be completely reinitialized,
4213 		 * so the driver must do everything necessary to bring the card
4214 		 * from INIT to SCAN.
4215 		 *
4216 		 * Additionally, upon receiving deauth frame from AP,
4217 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4218 		 * state. This will also fail with this driver, so bring the FSM
4219 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4220 		 *
4221 		 * XXX TODO: fix this for FreeBSD!
4222 		 */
4223 		if (nstate == IEEE80211_S_SCAN ||
4224 		    nstate == IEEE80211_S_AUTH ||
4225 		    nstate == IEEE80211_S_ASSOC) {
4226 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4227 			    "Force transition to INIT; MGT=%d\n", arg);
4228 			IWM_UNLOCK(sc);
4229 			IEEE80211_LOCK(ic);
4230 			/* Always pass arg as -1 since we can't Tx right now. */
4231 			/*
4232 			 * XXX arg is just ignored anyway when transitioning
4233 			 *     to IEEE80211_S_INIT.
4234 			 */
4235 			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4236 			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4237 			    "Going INIT->SCAN\n");
4238 			nstate = IEEE80211_S_SCAN;
4239 			IEEE80211_UNLOCK(ic);
4240 			IWM_LOCK(sc);
4241 		}
4242 	}
4243 
4244 	switch (nstate) {
4245 	case IEEE80211_S_INIT:
4246 		break;
4247 
4248 	case IEEE80211_S_AUTH:
4249 		if ((error = iwm_auth(vap, sc)) != 0) {
4250 			device_printf(sc->sc_dev,
4251 			    "%s: could not move to auth state: %d\n",
4252 			    __func__, error);
4253 			break;
4254 		}
4255 		break;
4256 
4257 	case IEEE80211_S_ASSOC:
4258 		if ((error = iwm_assoc(vap, sc)) != 0) {
4259 			device_printf(sc->sc_dev,
4260 			    "%s: failed to associate: %d\n", __func__,
4261 			    error);
4262 			break;
4263 		}
4264 		break;
4265 
4266 	case IEEE80211_S_RUN:
4267 	{
4268 		struct iwm_host_cmd cmd = {
4269 			.id = IWM_LQ_CMD,
4270 			.len = { sizeof(in->in_lq), },
4271 			.flags = IWM_CMD_SYNC,
4272 		};
4273 
4274 		/* Update the association state, now we have it all */
4275 		/* (eg associd comes in at this point */
4276 		error = iwm_assoc(vap, sc);
4277 		if (error != 0) {
4278 			device_printf(sc->sc_dev,
4279 			    "%s: failed to update association state: %d\n",
4280 			    __func__,
4281 			    error);
4282 			break;
4283 		}
4284 
4285 		in = IWM_NODE(vap->iv_bss);
4286 		iwm_mvm_power_mac_update_mode(sc, in);
4287 		iwm_mvm_enable_beacon_filter(sc, in);
4288 		iwm_mvm_update_quotas(sc, in);
4289 		iwm_setrates(sc, in);
4290 
4291 		cmd.data[0] = &in->in_lq;
4292 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4293 			device_printf(sc->sc_dev,
4294 			    "%s: IWM_LQ_CMD failed\n", __func__);
4295 		}
4296 
4297 		iwm_mvm_led_enable(sc);
4298 		break;
4299 	}
4300 
4301 	default:
4302 		break;
4303 	}
4304 	IWM_UNLOCK(sc);
4305 	IEEE80211_LOCK(ic);
4306 
4307 	return (ivp->iv_newstate(vap, nstate, arg));
4308 }
4309 
4310 void
4311 iwm_endscan_cb(void *arg, int pending)
4312 {
4313 	struct iwm_softc *sc = arg;
4314 	struct ieee80211com *ic = &sc->sc_ic;
4315 
4316 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4317 	    "%s: scan ended\n",
4318 	    __func__);
4319 
4320 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4321 }
4322 
4323 /*
4324  * Aging and idle timeouts for the different possible scenarios
4325  * in default configuration
4326  */
4327 static const uint32_t
4328 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4329 	{
4330 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4331 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4332 	},
4333 	{
4334 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4335 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4336 	},
4337 	{
4338 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4339 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4340 	},
4341 	{
4342 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4343 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4344 	},
4345 	{
4346 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4347 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4348 	},
4349 };
4350 
4351 /*
4352  * Aging and idle timeouts for the different possible scenarios
4353  * in single BSS MAC configuration.
4354  */
4355 static const uint32_t
4356 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4357 	{
4358 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4359 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4360 	},
4361 	{
4362 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4363 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4364 	},
4365 	{
4366 		htole32(IWM_SF_MCAST_AGING_TIMER),
4367 		htole32(IWM_SF_MCAST_IDLE_TIMER)
4368 	},
4369 	{
4370 		htole32(IWM_SF_BA_AGING_TIMER),
4371 		htole32(IWM_SF_BA_IDLE_TIMER)
4372 	},
4373 	{
4374 		htole32(IWM_SF_TX_RE_AGING_TIMER),
4375 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4376 	},
4377 };
4378 
4379 static void
4380 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4381     struct ieee80211_node *ni)
4382 {
4383 	int i, j, watermark;
4384 
4385 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4386 
4387 	/*
4388 	 * If we are in association flow - check antenna configuration
4389 	 * capabilities of the AP station, and choose the watermark accordingly.
4390 	 */
4391 	if (ni) {
4392 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4393 #ifdef notyet
4394 			if (ni->ni_rxmcs[2] != 0)
4395 				watermark = IWM_SF_W_MARK_MIMO3;
4396 			else if (ni->ni_rxmcs[1] != 0)
4397 				watermark = IWM_SF_W_MARK_MIMO2;
4398 			else
4399 #endif
4400 				watermark = IWM_SF_W_MARK_SISO;
4401 		} else {
4402 			watermark = IWM_SF_W_MARK_LEGACY;
4403 		}
4404 	/* default watermark value for unassociated mode. */
4405 	} else {
4406 		watermark = IWM_SF_W_MARK_MIMO2;
4407 	}
4408 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4409 
4410 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4411 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4412 			sf_cmd->long_delay_timeouts[i][j] =
4413 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4414 		}
4415 	}
4416 
4417 	if (ni) {
4418 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4419 		       sizeof(iwm_sf_full_timeout));
4420 	} else {
4421 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4422 		       sizeof(iwm_sf_full_timeout_def));
4423 	}
4424 }
4425 
4426 static int
4427 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4428 {
4429 	struct ieee80211com *ic = &sc->sc_ic;
4430 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4431 	struct iwm_sf_cfg_cmd sf_cmd = {
4432 		.state = htole32(IWM_SF_FULL_ON),
4433 	};
4434 	int ret = 0;
4435 
4436 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4437 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4438 
4439 	switch (new_state) {
4440 	case IWM_SF_UNINIT:
4441 	case IWM_SF_INIT_OFF:
4442 		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4443 		break;
4444 	case IWM_SF_FULL_ON:
4445 		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4446 		break;
4447 	default:
4448 		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4449 		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4450 			  new_state);
4451 		return EINVAL;
4452 	}
4453 
4454 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4455 				   sizeof(sf_cmd), &sf_cmd);
4456 	return ret;
4457 }
4458 
4459 static int
4460 iwm_send_bt_init_conf(struct iwm_softc *sc)
4461 {
4462 	struct iwm_bt_coex_cmd bt_cmd;
4463 
4464 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4465 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4466 
4467 	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4468 	    &bt_cmd);
4469 }
4470 
4471 static int
4472 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4473 {
4474 	struct iwm_mcc_update_cmd mcc_cmd;
4475 	struct iwm_host_cmd hcmd = {
4476 		.id = IWM_MCC_UPDATE_CMD,
4477 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4478 		.data = { &mcc_cmd },
4479 	};
4480 	int ret;
4481 #ifdef IWM_DEBUG
4482 	struct iwm_rx_packet *pkt;
4483 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4484 	struct iwm_mcc_update_resp *mcc_resp;
4485 	int n_channels;
4486 	uint16_t mcc;
4487 #endif
4488 	int resp_v2 = isset(sc->sc_enabled_capa,
4489 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4490 
4491 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4492 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4493 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4494 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4495 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4496 	else
4497 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4498 
4499 	if (resp_v2)
4500 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4501 	else
4502 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4503 
4504 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4505 	    "send MCC update to FW with '%c%c' src = %d\n",
4506 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4507 
4508 	ret = iwm_send_cmd(sc, &hcmd);
4509 	if (ret)
4510 		return ret;
4511 
4512 #ifdef IWM_DEBUG
4513 	pkt = hcmd.resp_pkt;
4514 
4515 	/* Extract MCC response */
4516 	if (resp_v2) {
4517 		mcc_resp = (void *)pkt->data;
4518 		mcc = mcc_resp->mcc;
4519 		n_channels =  le32toh(mcc_resp->n_channels);
4520 	} else {
4521 		mcc_resp_v1 = (void *)pkt->data;
4522 		mcc = mcc_resp_v1->mcc;
4523 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4524 	}
4525 
4526 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4527 	if (mcc == 0)
4528 		mcc = 0x3030;  /* "00" - world */
4529 
4530 	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4531 	    "regulatory domain '%c%c' (%d channels available)\n",
4532 	    mcc >> 8, mcc & 0xff, n_channels);
4533 #endif
4534 	iwm_free_resp(sc, &hcmd);
4535 
4536 	return 0;
4537 }
4538 
4539 static void
4540 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4541 {
4542 	struct iwm_host_cmd cmd = {
4543 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4544 		.len = { sizeof(uint32_t), },
4545 		.data = { &backoff, },
4546 	};
4547 
4548 	if (iwm_send_cmd(sc, &cmd) != 0) {
4549 		device_printf(sc->sc_dev,
4550 		    "failed to change thermal tx backoff\n");
4551 	}
4552 }
4553 
4554 static int
4555 iwm_init_hw(struct iwm_softc *sc)
4556 {
4557 	struct ieee80211com *ic = &sc->sc_ic;
4558 	int error, i, ac;
4559 
4560 	if ((error = iwm_start_hw(sc)) != 0) {
4561 		kprintf("iwm_start_hw: failed %d\n", error);
4562 		return error;
4563 	}
4564 
4565 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4566 		kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4567 		return error;
4568 	}
4569 
4570 	/*
4571 	 * should stop and start HW since that INIT
4572 	 * image just loaded
4573 	 */
4574 	iwm_stop_device(sc);
4575 	if ((error = iwm_start_hw(sc)) != 0) {
4576 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4577 		return error;
4578 	}
4579 
4580 	/* omstart, this time with the regular firmware */
4581 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4582 	if (error) {
4583 		device_printf(sc->sc_dev, "could not load firmware\n");
4584 		goto error;
4585 	}
4586 
4587 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4588 		device_printf(sc->sc_dev, "bt init conf failed\n");
4589 		goto error;
4590 	}
4591 
4592 	if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4593 		device_printf(sc->sc_dev, "antenna config failed\n");
4594 		goto error;
4595 	}
4596 
4597 	/* Send phy db control command and then phy db calibration*/
4598 	if ((error = iwm_send_phy_db_data(sc)) != 0) {
4599 		device_printf(sc->sc_dev, "phy_db_data failed\n");
4600 		goto error;
4601 	}
4602 
4603 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4604 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4605 		goto error;
4606 	}
4607 
4608 	/* Add auxiliary station for scanning */
4609 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4610 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4611 		goto error;
4612 	}
4613 
4614 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4615 		/*
4616 		 * The channel used here isn't relevant as it's
4617 		 * going to be overwritten in the other flows.
4618 		 * For now use the first channel we have.
4619 		 */
4620 		if ((error = iwm_mvm_phy_ctxt_add(sc,
4621 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4622 			goto error;
4623 	}
4624 
4625 	/* Initialize tx backoffs to the minimum. */
4626 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4627 		iwm_mvm_tt_tx_backoff(sc, 0);
4628 
4629 	error = iwm_mvm_power_update_device(sc);
4630 	if (error)
4631 		goto error;
4632 
4633 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4634 		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4635 			goto error;
4636 	}
4637 
4638 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4639 		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4640 			goto error;
4641 	}
4642 
4643 	/* Enable Tx queues. */
4644 	for (ac = 0; ac < WME_NUM_AC; ac++) {
4645 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4646 		    iwm_mvm_ac_to_tx_fifo[ac]);
4647 		if (error)
4648 			goto error;
4649 	}
4650 
4651 	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4652 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4653 		goto error;
4654 	}
4655 
4656 	return 0;
4657 
4658  error:
4659 	iwm_stop_device(sc);
4660 	return error;
4661 }
4662 
4663 /* Allow multicast from our BSSID. */
4664 static int
4665 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4666 {
4667 	struct ieee80211_node *ni = vap->iv_bss;
4668 	struct iwm_mcast_filter_cmd *cmd;
4669 	size_t size;
4670 	int error;
4671 
4672 	size = roundup(sizeof(*cmd), 4);
4673 	cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4674 	if (cmd == NULL)
4675 		return ENOMEM;
4676 	cmd->filter_own = 1;
4677 	cmd->port_id = 0;
4678 	cmd->count = 0;
4679 	cmd->pass_all = 1;
4680 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4681 
4682 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4683 	    IWM_CMD_SYNC, size, cmd);
4684 	kfree(cmd, M_DEVBUF);
4685 
4686 	return (error);
4687 }
4688 
4689 /*
4690  * ifnet interfaces
4691  */
4692 
4693 static void
4694 iwm_init(struct iwm_softc *sc)
4695 {
4696 	int error;
4697 
4698 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4699 		return;
4700 	}
4701 	sc->sc_generation++;
4702 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4703 
4704 	if ((error = iwm_init_hw(sc)) != 0) {
4705 		kprintf("iwm_init_hw failed %d\n", error);
4706 		iwm_stop(sc);
4707 		return;
4708 	}
4709 
4710 	/*
4711 	 * Ok, firmware loaded and we are jogging
4712 	 */
4713 	sc->sc_flags |= IWM_FLAG_HW_INITED;
4714 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4715 }
4716 
4717 static int
4718 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4719 {
4720 	struct iwm_softc *sc;
4721 	int error;
4722 
4723 	sc = ic->ic_softc;
4724 
4725 	IWM_LOCK(sc);
4726 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4727 		IWM_UNLOCK(sc);
4728 		return (ENXIO);
4729 	}
4730 	error = mbufq_enqueue(&sc->sc_snd, m);
4731 	if (error) {
4732 		IWM_UNLOCK(sc);
4733 		return (error);
4734 	}
4735 	iwm_start(sc);
4736 	IWM_UNLOCK(sc);
4737 	return (0);
4738 }
4739 
4740 /*
4741  * Dequeue packets from sendq and call send.
4742  */
4743 static void
4744 iwm_start(struct iwm_softc *sc)
4745 {
4746 	struct ieee80211_node *ni;
4747 	struct mbuf *m;
4748 	int ac = 0;
4749 
4750 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4751 	while (sc->qfullmsk == 0 &&
4752 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4753 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4754 		if (iwm_tx(sc, m, ni, ac) != 0) {
4755 			if_inc_counter(ni->ni_vap->iv_ifp,
4756 			    IFCOUNTER_OERRORS, 1);
4757 			ieee80211_free_node(ni);
4758 			continue;
4759 		}
4760 		sc->sc_tx_timer = 15;
4761 	}
4762 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4763 }
4764 
4765 static void
4766 iwm_stop(struct iwm_softc *sc)
4767 {
4768 
4769 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4770 	sc->sc_flags |= IWM_FLAG_STOPPED;
4771 	sc->sc_generation++;
4772 	iwm_led_blink_stop(sc);
4773 	sc->sc_tx_timer = 0;
4774 	iwm_stop_device(sc);
4775 }
4776 
4777 static void
4778 iwm_watchdog(void *arg)
4779 {
4780 	struct iwm_softc *sc = arg;
4781 
4782 	if (sc->sc_tx_timer > 0) {
4783 		if (--sc->sc_tx_timer == 0) {
4784 			device_printf(sc->sc_dev, "device timeout\n");
4785 #ifdef IWM_DEBUG
4786 			iwm_nic_error(sc);
4787 #endif
4788 			iwm_stop(sc);
4789 #if defined(__DragonFly__)
4790 			++sc->sc_ic.ic_oerrors;
4791 #else
4792 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4793 #endif
4794 			return;
4795 		}
4796 	}
4797 	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4798 }
4799 
4800 static void
4801 iwm_parent(struct ieee80211com *ic)
4802 {
4803 	struct iwm_softc *sc = ic->ic_softc;
4804 	int startall = 0;
4805 
4806 	IWM_LOCK(sc);
4807 	if (ic->ic_nrunning > 0) {
4808 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4809 			iwm_init(sc);
4810 			startall = 1;
4811 		}
4812 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4813 		iwm_stop(sc);
4814 	IWM_UNLOCK(sc);
4815 	if (startall)
4816 		ieee80211_start_all(ic);
4817 }
4818 
4819 /*
4820  * The interrupt side of things
4821  */
4822 
4823 /*
4824  * error dumping routines are from iwlwifi/mvm/utils.c
4825  */
4826 
4827 /*
4828  * Note: This structure is read from the device with IO accesses,
4829  * and the reading already does the endian conversion. As it is
4830  * read with uint32_t-sized accesses, any members with a different size
4831  * need to be ordered correctly though!
4832  */
4833 struct iwm_error_event_table {
4834 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4835 	uint32_t error_id;		/* type of error */
4836 	uint32_t trm_hw_status0;	/* TRM HW status */
4837 	uint32_t trm_hw_status1;	/* TRM HW status */
4838 	uint32_t blink2;		/* branch link */
4839 	uint32_t ilink1;		/* interrupt link */
4840 	uint32_t ilink2;		/* interrupt link */
4841 	uint32_t data1;		/* error-specific data */
4842 	uint32_t data2;		/* error-specific data */
4843 	uint32_t data3;		/* error-specific data */
4844 	uint32_t bcon_time;		/* beacon timer */
4845 	uint32_t tsf_low;		/* network timestamp function timer */
4846 	uint32_t tsf_hi;		/* network timestamp function timer */
4847 	uint32_t gp1;		/* GP1 timer register */
4848 	uint32_t gp2;		/* GP2 timer register */
4849 	uint32_t fw_rev_type;	/* firmware revision type */
4850 	uint32_t major;		/* uCode version major */
4851 	uint32_t minor;		/* uCode version minor */
4852 	uint32_t hw_ver;		/* HW Silicon version */
4853 	uint32_t brd_ver;		/* HW board version */
4854 	uint32_t log_pc;		/* log program counter */
4855 	uint32_t frame_ptr;		/* frame pointer */
4856 	uint32_t stack_ptr;		/* stack pointer */
4857 	uint32_t hcmd;		/* last host command header */
4858 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4859 				 * rxtx_flag */
4860 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4861 				 * host_flag */
4862 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4863 				 * enc_flag */
4864 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4865 				 * time_flag */
4866 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4867 				 * wico interrupt */
4868 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4869 	uint32_t wait_event;		/* wait event() caller address */
4870 	uint32_t l2p_control;	/* L2pControlField */
4871 	uint32_t l2p_duration;	/* L2pDurationField */
4872 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4873 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4874 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4875 				 * (LMPM_PMG_SEL) */
4876 	uint32_t u_timestamp;	/* indicate when the date and time of the
4877 				 * compilation */
4878 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4879 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4880 
4881 /*
4882  * UMAC error struct - relevant starting from family 8000 chip.
4883  * Note: This structure is read from the device with IO accesses,
4884  * and the reading already does the endian conversion. As it is
4885  * read with u32-sized accesses, any members with a different size
4886  * need to be ordered correctly though!
4887  */
4888 struct iwm_umac_error_event_table {
4889 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4890 	uint32_t error_id;	/* type of error */
4891 	uint32_t blink1;	/* branch link */
4892 	uint32_t blink2;	/* branch link */
4893 	uint32_t ilink1;	/* interrupt link */
4894 	uint32_t ilink2;	/* interrupt link */
4895 	uint32_t data1;		/* error-specific data */
4896 	uint32_t data2;		/* error-specific data */
4897 	uint32_t data3;		/* error-specific data */
4898 	uint32_t umac_major;
4899 	uint32_t umac_minor;
4900 	uint32_t frame_pointer;	/* core register 27*/
4901 	uint32_t stack_pointer;	/* core register 28 */
4902 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4903 	uint32_t nic_isr_pref;	/* ISR status register */
4904 } __packed;
4905 
4906 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4907 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4908 
4909 #ifdef IWM_DEBUG
4910 struct {
4911 	const char *name;
4912 	uint8_t num;
4913 } advanced_lookup[] = {
4914 	{ "NMI_INTERRUPT_WDG", 0x34 },
4915 	{ "SYSASSERT", 0x35 },
4916 	{ "UCODE_VERSION_MISMATCH", 0x37 },
4917 	{ "BAD_COMMAND", 0x38 },
4918 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4919 	{ "FATAL_ERROR", 0x3D },
4920 	{ "NMI_TRM_HW_ERR", 0x46 },
4921 	{ "NMI_INTERRUPT_TRM", 0x4C },
4922 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4923 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4924 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4925 	{ "NMI_INTERRUPT_HOST", 0x66 },
4926 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
4927 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
4928 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4929 	{ "ADVANCED_SYSASSERT", 0 },
4930 };
4931 
4932 static const char *
4933 iwm_desc_lookup(uint32_t num)
4934 {
4935 	int i;
4936 
4937 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4938 		if (advanced_lookup[i].num == num)
4939 			return advanced_lookup[i].name;
4940 
4941 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4942 	return advanced_lookup[i].name;
4943 }
4944 
4945 static void
4946 iwm_nic_umac_error(struct iwm_softc *sc)
4947 {
4948 	struct iwm_umac_error_event_table table;
4949 	uint32_t base;
4950 
4951 	base = sc->sc_uc.uc_umac_error_event_table;
4952 
4953 	if (base < 0x800000) {
4954 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4955 		    base);
4956 		return;
4957 	}
4958 
4959 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4960 		device_printf(sc->sc_dev, "reading errlog failed\n");
4961 		return;
4962 	}
4963 
4964 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4965 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4966 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4967 		    sc->sc_flags, table.valid);
4968 	}
4969 
4970 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4971 		iwm_desc_lookup(table.error_id));
4972 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4973 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4974 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4975 	    table.ilink1);
4976 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4977 	    table.ilink2);
4978 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4979 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4980 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4981 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
4982 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
4983 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
4984 	    table.frame_pointer);
4985 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
4986 	    table.stack_pointer);
4987 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
4988 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
4989 	    table.nic_isr_pref);
4990 }
4991 
4992 /*
4993  * Support for dumping the error log seemed like a good idea ...
4994  * but it's mostly hex junk and the only sensible thing is the
4995  * hw/ucode revision (which we know anyway).  Since it's here,
4996  * I'll just leave it in, just in case e.g. the Intel guys want to
4997  * help us decipher some "ADVANCED_SYSASSERT" later.
4998  */
4999 static void
5000 iwm_nic_error(struct iwm_softc *sc)
5001 {
5002 	struct iwm_error_event_table table;
5003 	uint32_t base;
5004 
5005 	device_printf(sc->sc_dev, "dumping device error log\n");
5006 	base = sc->sc_uc.uc_error_event_table;
5007 	if (base < 0x800000) {
5008 		device_printf(sc->sc_dev,
5009 		    "Invalid error log pointer 0x%08x\n", base);
5010 		return;
5011 	}
5012 
5013 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5014 		device_printf(sc->sc_dev, "reading errlog failed\n");
5015 		return;
5016 	}
5017 
5018 	if (!table.valid) {
5019 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5020 		return;
5021 	}
5022 
5023 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5024 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5025 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5026 		    sc->sc_flags, table.valid);
5027 	}
5028 
5029 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5030 	    iwm_desc_lookup(table.error_id));
5031 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5032 	    table.trm_hw_status0);
5033 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5034 	    table.trm_hw_status1);
5035 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5036 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5037 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5038 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5039 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5040 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5041 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5042 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5043 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5044 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5045 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5046 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5047 	    table.fw_rev_type);
5048 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5049 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5050 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5051 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5052 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5053 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5054 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5055 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5056 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5057 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5058 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5059 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5060 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5061 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5062 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5063 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5064 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5065 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5066 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5067 
5068 	if (sc->sc_uc.uc_umac_error_event_table)
5069 		iwm_nic_umac_error(sc);
5070 }
5071 #endif
5072 
5073 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
5074 do {									\
5075 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5076 	_var_ = (void *)((_pkt_)+1);					\
5077 } while (/*CONSTCOND*/0)
5078 
5079 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
5080 do {									\
5081 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5082 	_ptr_ = (void *)((_pkt_)+1);					\
5083 } while (/*CONSTCOND*/0)
5084 
5085 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5086 
5087 /*
5088  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5089  * Basic structure from if_iwn
5090  */
5091 static void
5092 iwm_notif_intr(struct iwm_softc *sc)
5093 {
5094 	uint16_t hw;
5095 
5096 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5097 	    BUS_DMASYNC_POSTREAD);
5098 
5099 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5100 
5101 	/*
5102 	 * Process responses
5103 	 */
5104 	while (sc->rxq.cur != hw) {
5105 		struct iwm_rx_ring *ring = &sc->rxq;
5106 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5107 		struct iwm_rx_packet *pkt;
5108 		struct iwm_cmd_response *cresp;
5109 		int qid, idx, code;
5110 
5111 		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5112 		    BUS_DMASYNC_POSTREAD);
5113 		pkt = mtod(data->m, struct iwm_rx_packet *);
5114 
5115 		qid = pkt->hdr.qid & ~0x80;
5116 		idx = pkt->hdr.idx;
5117 
5118 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5119 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5120 		    "rx packet qid=%d idx=%d type=%x %d %d\n",
5121 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5122 
5123 		/*
5124 		 * randomly get these from the firmware, no idea why.
5125 		 * they at least seem harmless, so just ignore them for now
5126 		 */
5127 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5128 		    || pkt->len_n_flags == htole32(0x55550000))) {
5129 			ADVANCE_RXQ(sc);
5130 			continue;
5131 		}
5132 
5133 		switch (code) {
5134 		case IWM_REPLY_RX_PHY_CMD:
5135 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5136 			break;
5137 
5138 		case IWM_REPLY_RX_MPDU_CMD:
5139 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5140 			break;
5141 
5142 		case IWM_TX_CMD:
5143 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
5144 			break;
5145 
5146 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5147 			struct iwm_missed_beacons_notif *resp;
5148 			int missed;
5149 
5150 			/* XXX look at mac_id to determine interface ID */
5151 			struct ieee80211com *ic = &sc->sc_ic;
5152 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5153 
5154 			SYNC_RESP_STRUCT(resp, pkt);
5155 			missed = le32toh(resp->consec_missed_beacons);
5156 
5157 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5158 			    "%s: MISSED_BEACON: mac_id=%d, "
5159 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5160 			    "num_rx=%d\n",
5161 			    __func__,
5162 			    le32toh(resp->mac_id),
5163 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5164 			    le32toh(resp->consec_missed_beacons),
5165 			    le32toh(resp->num_expected_beacons),
5166 			    le32toh(resp->num_recvd_beacons));
5167 
5168 			/* Be paranoid */
5169 			if (vap == NULL)
5170 				break;
5171 
5172 			/* XXX no net80211 locking? */
5173 			if (vap->iv_state == IEEE80211_S_RUN &&
5174 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5175 				if (missed > vap->iv_bmissthreshold) {
5176 					/* XXX bad locking; turn into task */
5177 					IWM_UNLOCK(sc);
5178 					ieee80211_beacon_miss(ic);
5179 					IWM_LOCK(sc);
5180 				}
5181 			}
5182 
5183 			break; }
5184 
5185 		case IWM_MFUART_LOAD_NOTIFICATION:
5186 			break;
5187 
5188 		case IWM_MVM_ALIVE: {
5189 			struct iwm_mvm_alive_resp_v1 *resp1;
5190 			struct iwm_mvm_alive_resp_v2 *resp2;
5191 			struct iwm_mvm_alive_resp_v3 *resp3;
5192 
5193 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5194 				SYNC_RESP_STRUCT(resp1, pkt);
5195 				sc->sc_uc.uc_error_event_table
5196 				    = le32toh(resp1->error_event_table_ptr);
5197 				sc->sc_uc.uc_log_event_table
5198 				    = le32toh(resp1->log_event_table_ptr);
5199 				sc->sched_base = le32toh(resp1->scd_base_ptr);
5200 				if (resp1->status == IWM_ALIVE_STATUS_OK)
5201 					sc->sc_uc.uc_ok = 1;
5202 				else
5203 					sc->sc_uc.uc_ok = 0;
5204 			}
5205 
5206 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5207 				SYNC_RESP_STRUCT(resp2, pkt);
5208 				sc->sc_uc.uc_error_event_table
5209 				    = le32toh(resp2->error_event_table_ptr);
5210 				sc->sc_uc.uc_log_event_table
5211 				    = le32toh(resp2->log_event_table_ptr);
5212 				sc->sched_base = le32toh(resp2->scd_base_ptr);
5213 				sc->sc_uc.uc_umac_error_event_table
5214 				    = le32toh(resp2->error_info_addr);
5215 				if (resp2->status == IWM_ALIVE_STATUS_OK)
5216 					sc->sc_uc.uc_ok = 1;
5217 				else
5218 					sc->sc_uc.uc_ok = 0;
5219 			}
5220 
5221 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5222 				SYNC_RESP_STRUCT(resp3, pkt);
5223 				sc->sc_uc.uc_error_event_table
5224 				    = le32toh(resp3->error_event_table_ptr);
5225 				sc->sc_uc.uc_log_event_table
5226 				    = le32toh(resp3->log_event_table_ptr);
5227 				sc->sched_base = le32toh(resp3->scd_base_ptr);
5228 				sc->sc_uc.uc_umac_error_event_table
5229 				    = le32toh(resp3->error_info_addr);
5230 				if (resp3->status == IWM_ALIVE_STATUS_OK)
5231 					sc->sc_uc.uc_ok = 1;
5232 				else
5233 					sc->sc_uc.uc_ok = 0;
5234 			}
5235 
5236 			sc->sc_uc.uc_intr = 1;
5237 			wakeup(&sc->sc_uc);
5238 			break; }
5239 
5240 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
5241 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
5242 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
5243 
5244 			iwm_phy_db_set_section(sc, phy_db_notif);
5245 
5246 			break; }
5247 
5248 		case IWM_STATISTICS_NOTIFICATION: {
5249 			struct iwm_notif_statistics *stats;
5250 			SYNC_RESP_STRUCT(stats, pkt);
5251 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5252 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
5253 			break; }
5254 
5255 		case IWM_NVM_ACCESS_CMD:
5256 		case IWM_MCC_UPDATE_CMD:
5257 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5258 				bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5259 				    BUS_DMASYNC_POSTREAD);
5260 				memcpy(sc->sc_cmd_resp,
5261 				    pkt, sizeof(sc->sc_cmd_resp));
5262 			}
5263 			break;
5264 
5265 		case IWM_MCC_CHUB_UPDATE_CMD: {
5266 			struct iwm_mcc_chub_notif *notif;
5267 			SYNC_RESP_STRUCT(notif, pkt);
5268 
5269 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5270 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5271 			sc->sc_fw_mcc[2] = '\0';
5272 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5273 			    "fw source %d sent CC '%s'\n",
5274 			    notif->source_id, sc->sc_fw_mcc);
5275 			break; }
5276 
5277 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5278 			break;
5279 
5280 		case IWM_PHY_CONFIGURATION_CMD:
5281 		case IWM_TX_ANT_CONFIGURATION_CMD:
5282 		case IWM_ADD_STA:
5283 		case IWM_MAC_CONTEXT_CMD:
5284 		case IWM_REPLY_SF_CFG_CMD:
5285 		case IWM_POWER_TABLE_CMD:
5286 		case IWM_PHY_CONTEXT_CMD:
5287 		case IWM_BINDING_CONTEXT_CMD:
5288 		case IWM_TIME_EVENT_CMD:
5289 		case IWM_SCAN_REQUEST_CMD:
5290 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5291 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5292 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5293 		case IWM_REPLY_BEACON_FILTERING_CMD:
5294 		case IWM_MAC_PM_POWER_TABLE:
5295 		case IWM_TIME_QUOTA_CMD:
5296 		case IWM_REMOVE_STA:
5297 		case IWM_TXPATH_FLUSH:
5298 		case IWM_LQ_CMD:
5299 		case IWM_BT_CONFIG:
5300 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5301 			SYNC_RESP_STRUCT(cresp, pkt);
5302 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5303 				memcpy(sc->sc_cmd_resp,
5304 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5305 			}
5306 			break;
5307 
5308 		/* ignore */
5309 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5310 			break;
5311 
5312 		case IWM_INIT_COMPLETE_NOTIF:
5313 			sc->sc_init_complete = 1;
5314 			wakeup(&sc->sc_init_complete);
5315 			break;
5316 
5317 		case IWM_SCAN_OFFLOAD_COMPLETE: {
5318 			struct iwm_periodic_scan_complete *notif;
5319 			SYNC_RESP_STRUCT(notif, pkt);
5320 
5321 			break; }
5322 
5323 		case IWM_SCAN_ITERATION_COMPLETE: {
5324 			struct iwm_lmac_scan_complete_notif *notif;
5325 			SYNC_RESP_STRUCT(notif, pkt);
5326 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5327 			break; }
5328 
5329 		case IWM_SCAN_COMPLETE_UMAC: {
5330 			struct iwm_umac_scan_complete *notif;
5331 			SYNC_RESP_STRUCT(notif, pkt);
5332 
5333 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5334 			    "UMAC scan complete, status=0x%x\n",
5335 			    notif->status);
5336 #if 0	/* XXX This would be a duplicate scan end call */
5337 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5338 #endif
5339 			break;
5340 		}
5341 
5342 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5343 			struct iwm_umac_scan_iter_complete_notif *notif;
5344 			SYNC_RESP_STRUCT(notif, pkt);
5345 
5346 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5347 			    "complete, status=0x%x, %d channels scanned\n",
5348 			    notif->status, notif->scanned_channels);
5349 			taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5350 			break;
5351 		}
5352 
5353 		case IWM_REPLY_ERROR: {
5354 			struct iwm_error_resp *resp;
5355 			SYNC_RESP_STRUCT(resp, pkt);
5356 
5357 			device_printf(sc->sc_dev,
5358 			    "firmware error 0x%x, cmd 0x%x\n",
5359 			    le32toh(resp->error_type),
5360 			    resp->cmd_id);
5361 			break; }
5362 
5363 		case IWM_TIME_EVENT_NOTIFICATION: {
5364 			struct iwm_time_event_notif *notif;
5365 			SYNC_RESP_STRUCT(notif, pkt);
5366 
5367 			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5368 			    "TE notif status = 0x%x action = 0x%x\n",
5369 			    notif->status, notif->action);
5370 			break; }
5371 
5372 		case IWM_MCAST_FILTER_CMD:
5373 			break;
5374 
5375 		case IWM_SCD_QUEUE_CFG: {
5376 			struct iwm_scd_txq_cfg_rsp *rsp;
5377 			SYNC_RESP_STRUCT(rsp, pkt);
5378 
5379 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5380 			    "queue cfg token=0x%x sta_id=%d "
5381 			    "tid=%d scd_queue=%d\n",
5382 			    rsp->token, rsp->sta_id, rsp->tid,
5383 			    rsp->scd_queue);
5384 			break;
5385 		}
5386 
5387 		default:
5388 			device_printf(sc->sc_dev,
5389 			    "frame %d/%d %x UNHANDLED (this should "
5390 			    "not happen)\n", qid, idx,
5391 			    pkt->len_n_flags);
5392 			break;
5393 		}
5394 
5395 		/*
5396 		 * Why test bit 0x80?  The Linux driver:
5397 		 *
5398 		 * There is one exception:  uCode sets bit 15 when it
5399 		 * originates the response/notification, i.e. when the
5400 		 * response/notification is not a direct response to a
5401 		 * command sent by the driver.  For example, uCode issues
5402 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5403 		 * it is not a direct response to any driver command.
5404 		 *
5405 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5406 		 * uses a slightly different format for pkt->hdr, and "qid"
5407 		 * is actually the upper byte of a two-byte field.
5408 		 */
5409 		if (!(pkt->hdr.qid & (1 << 7))) {
5410 			iwm_cmd_done(sc, pkt);
5411 		}
5412 
5413 		ADVANCE_RXQ(sc);
5414 	}
5415 
5416 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5417 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5418 
5419 	/*
5420 	 * Tell the firmware what we have processed.
5421 	 * Seems like the hardware gets upset unless we align
5422 	 * the write by 8??
5423 	 */
5424 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5425 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5426 }
5427 
5428 static void
5429 iwm_intr(void *arg)
5430 {
5431 	struct iwm_softc *sc = arg;
5432 	int handled = 0;
5433 	int r1, r2, rv = 0;
5434 	int isperiodic = 0;
5435 
5436 #if defined(__DragonFly__)
5437 	if (sc->sc_mem == NULL) {
5438 		kprintf("iwm_intr: detached\n");
5439 		return;
5440 	}
5441 #endif
5442 	IWM_LOCK(sc);
5443 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5444 
5445 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5446 		uint32_t *ict = sc->ict_dma.vaddr;
5447 		int tmp;
5448 
5449 		tmp = htole32(ict[sc->ict_cur]);
5450 		if (!tmp)
5451 			goto out_ena;
5452 
5453 		/*
5454 		 * ok, there was something.  keep plowing until we have all.
5455 		 */
5456 		r1 = r2 = 0;
5457 		while (tmp) {
5458 			r1 |= tmp;
5459 			ict[sc->ict_cur] = 0;
5460 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5461 			tmp = htole32(ict[sc->ict_cur]);
5462 		}
5463 
5464 		/* this is where the fun begins.  don't ask */
5465 		if (r1 == 0xffffffff)
5466 			r1 = 0;
5467 
5468 		/* i am not expected to understand this */
5469 		if (r1 & 0xc0000)
5470 			r1 |= 0x8000;
5471 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5472 	} else {
5473 		r1 = IWM_READ(sc, IWM_CSR_INT);
5474 		/* "hardware gone" (where, fishing?) */
5475 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5476 			goto out;
5477 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5478 	}
5479 	if (r1 == 0 && r2 == 0) {
5480 		goto out_ena;
5481 	}
5482 
5483 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5484 
5485 	/* ignored */
5486 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5487 
5488 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5489 		int i;
5490 		struct ieee80211com *ic = &sc->sc_ic;
5491 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5492 
5493 #ifdef IWM_DEBUG
5494 		iwm_nic_error(sc);
5495 #endif
5496 		/* Dump driver status (TX and RX rings) while we're here. */
5497 		device_printf(sc->sc_dev, "driver status:\n");
5498 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5499 			struct iwm_tx_ring *ring = &sc->txq[i];
5500 			device_printf(sc->sc_dev,
5501 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5502 			    "queued=%-3d\n",
5503 			    i, ring->qid, ring->cur, ring->queued);
5504 		}
5505 		device_printf(sc->sc_dev,
5506 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5507 		device_printf(sc->sc_dev,
5508 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5509 
5510 		/* Don't stop the device; just do a VAP restart */
5511 		IWM_UNLOCK(sc);
5512 
5513 		if (vap == NULL) {
5514 			kprintf("%s: null vap\n", __func__);
5515 			return;
5516 		}
5517 
5518 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5519 		    "restarting\n", __func__, vap->iv_state);
5520 
5521 		/* XXX TODO: turn this into a callout/taskqueue */
5522 		ieee80211_restart_all(ic);
5523 		return;
5524 	}
5525 
5526 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5527 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5528 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5529 		iwm_stop(sc);
5530 		rv = 1;
5531 		goto out;
5532 	}
5533 
5534 	/* firmware chunk loaded */
5535 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5536 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5537 		handled |= IWM_CSR_INT_BIT_FH_TX;
5538 		sc->sc_fw_chunk_done = 1;
5539 		wakeup(&sc->sc_fw);
5540 	}
5541 
5542 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5543 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5544 		if (iwm_check_rfkill(sc)) {
5545 			device_printf(sc->sc_dev,
5546 			    "%s: rfkill switch, disabling interface\n",
5547 			    __func__);
5548 			iwm_stop(sc);
5549 		}
5550 	}
5551 
5552 	/*
5553 	 * The Linux driver uses periodic interrupts to avoid races.
5554 	 * We cargo-cult like it's going out of fashion.
5555 	 */
5556 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5557 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5558 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5559 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5560 			IWM_WRITE_1(sc,
5561 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5562 		isperiodic = 1;
5563 	}
5564 
5565 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5566 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5567 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5568 
5569 		iwm_notif_intr(sc);
5570 
5571 		/* enable periodic interrupt, see above */
5572 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5573 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5574 			    IWM_CSR_INT_PERIODIC_ENA);
5575 	}
5576 
5577 	if (__predict_false(r1 & ~handled))
5578 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5579 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5580 	rv = 1;
5581 
5582  out_ena:
5583 	iwm_restore_interrupts(sc);
5584  out:
5585 	IWM_UNLOCK(sc);
5586 	return;
5587 }
5588 
5589 /*
5590  * Autoconf glue-sniffing
5591  */
5592 #define	PCI_VENDOR_INTEL		0x8086
5593 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5594 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5595 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5596 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5597 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5598 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5599 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5600 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5601 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5602 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5603 
5604 static const struct iwm_devices {
5605 	uint16_t	device;
5606 	const char	*name;
5607 } iwm_devices[] = {
5608 	{ PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5609 	{ PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5610 	{ PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5611 	{ PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5612 	{ PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5613 	{ PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5614 	{ PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5615 	{ PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5616 	{ PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5617 	{ PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5618 };
5619 
5620 static int
5621 iwm_probe(device_t dev)
5622 {
5623 	int i;
5624 
5625 	for (i = 0; i < nitems(iwm_devices); i++) {
5626 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5627 		    pci_get_device(dev) == iwm_devices[i].device) {
5628 			device_set_desc(dev, iwm_devices[i].name);
5629 			return (BUS_PROBE_DEFAULT);
5630 		}
5631 	}
5632 
5633 	return (ENXIO);
5634 }
5635 
5636 static int
5637 iwm_dev_check(device_t dev)
5638 {
5639 	struct iwm_softc *sc;
5640 
5641 	sc = device_get_softc(dev);
5642 
5643 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5644 	switch (pci_get_device(dev)) {
5645 	case PCI_PRODUCT_INTEL_WL_3160_1:
5646 	case PCI_PRODUCT_INTEL_WL_3160_2:
5647 		sc->sc_fwname = "iwm3160fw";
5648 		sc->host_interrupt_operation_mode = 1;
5649 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5650 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5651 		return (0);
5652 	case PCI_PRODUCT_INTEL_WL_3165_1:
5653 	case PCI_PRODUCT_INTEL_WL_3165_2:
5654 		sc->sc_fwname = "iwm7265fw";
5655 		sc->host_interrupt_operation_mode = 0;
5656 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5657 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5658 		return (0);
5659 	case PCI_PRODUCT_INTEL_WL_7260_1:
5660 	case PCI_PRODUCT_INTEL_WL_7260_2:
5661 		sc->sc_fwname = "iwm7260fw";
5662 		sc->host_interrupt_operation_mode = 1;
5663 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5664 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5665 		return (0);
5666 	case PCI_PRODUCT_INTEL_WL_7265_1:
5667 	case PCI_PRODUCT_INTEL_WL_7265_2:
5668 		sc->sc_fwname = "iwm7265fw";
5669 		sc->host_interrupt_operation_mode = 0;
5670 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5671 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5672 		return (0);
5673 	case PCI_PRODUCT_INTEL_WL_8260_1:
5674 	case PCI_PRODUCT_INTEL_WL_8260_2:
5675 		sc->sc_fwname = "iwm8000Cfw";
5676 		sc->host_interrupt_operation_mode = 0;
5677 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5678 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5679 		return (0);
5680 	default:
5681 		device_printf(dev, "unknown adapter type\n");
5682 		return ENXIO;
5683 	}
5684 }
5685 
5686 static int
5687 iwm_pci_attach(device_t dev)
5688 {
5689 	struct iwm_softc *sc;
5690 	int count, error, rid;
5691 	uint16_t reg;
5692 #if defined(__DragonFly__)
5693 	int irq_flags;
5694 #endif
5695 
5696 	sc = device_get_softc(dev);
5697 
5698 	/* Clear device-specific "PCI retry timeout" register (41h). */
5699 	reg = pci_read_config(dev, 0x40, sizeof(reg));
5700 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5701 
5702 	/* Enable bus-mastering and hardware bug workaround. */
5703 	pci_enable_busmaster(dev);
5704 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5705 	/* if !MSI */
5706 	if (reg & PCIM_STATUS_INTxSTATE) {
5707 		reg &= ~PCIM_STATUS_INTxSTATE;
5708 	}
5709 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5710 
5711 	rid = PCIR_BAR(0);
5712 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5713 	    RF_ACTIVE);
5714 	if (sc->sc_mem == NULL) {
5715 		device_printf(sc->sc_dev, "can't map mem space\n");
5716 		return (ENXIO);
5717 	}
5718 	sc->sc_st = rman_get_bustag(sc->sc_mem);
5719 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5720 
5721 	/* Install interrupt handler. */
5722 	count = 1;
5723 	rid = 0;
5724 #if defined(__DragonFly__)
5725 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5726 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5727 #else
5728 	if (pci_alloc_msi(dev, &count) == 0)
5729 		rid = 1;
5730 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5731 	    (rid != 0 ? 0 : RF_SHAREABLE));
5732 #endif
5733 	if (sc->sc_irq == NULL) {
5734 		device_printf(dev, "can't map interrupt\n");
5735 			return (ENXIO);
5736 	}
5737 #if defined(__DragonFly__)
5738 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5739 			       iwm_intr, sc, &sc->sc_ih,
5740 			       &wlan_global_serializer);
5741 #else
5742 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5743 	    NULL, iwm_intr, sc, &sc->sc_ih);
5744 #endif
5745 	if (sc->sc_ih == NULL) {
5746 		device_printf(dev, "can't establish interrupt");
5747 #if defined(__DragonFly__)
5748                 pci_release_msi(dev);
5749 #endif
5750 			return (ENXIO);
5751 	}
5752 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5753 
5754 	return (0);
5755 }
5756 
5757 static void
5758 iwm_pci_detach(device_t dev)
5759 {
5760 	struct iwm_softc *sc = device_get_softc(dev);
5761 
5762 	if (sc->sc_irq != NULL) {
5763 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5764 		bus_release_resource(dev, SYS_RES_IRQ,
5765 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5766 		pci_release_msi(dev);
5767 #if defined(__DragonFly__)
5768 		sc->sc_irq = NULL;
5769 #endif
5770         }
5771 	if (sc->sc_mem != NULL) {
5772 		bus_release_resource(dev, SYS_RES_MEMORY,
5773 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5774 #if defined(__DragonFly__)
5775 		sc->sc_mem = NULL;
5776 #endif
5777 	}
5778 }
5779 
5780 
5781 
5782 static int
5783 iwm_attach(device_t dev)
5784 {
5785 	struct iwm_softc *sc = device_get_softc(dev);
5786 	struct ieee80211com *ic = &sc->sc_ic;
5787 	int error;
5788 	int txq_i, i;
5789 
5790 	sc->sc_dev = dev;
5791 	IWM_LOCK_INIT(sc);
5792 	mbufq_init(&sc->sc_snd, ifqmaxlen);
5793 #if defined(__DragonFly__)
5794 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5795 #else
5796 	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5797 #endif
5798 	callout_init(&sc->sc_led_blink_to);
5799 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5800 	sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5801             taskqueue_thread_enqueue, &sc->sc_tq);
5802 #if defined(__DragonFly__)
5803 	error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
5804 					-1, "iwm_taskq");
5805 #else
5806         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5807 #endif
5808         if (error != 0) {
5809                 device_printf(dev, "can't start threads, error %d\n",
5810 		    error);
5811 		goto fail;
5812         }
5813 
5814 	/* PCI attach */
5815 	error = iwm_pci_attach(dev);
5816 	if (error != 0)
5817 		goto fail;
5818 
5819 	sc->sc_wantresp = -1;
5820 
5821 	/* Check device type */
5822 	error = iwm_dev_check(dev);
5823 	if (error != 0)
5824 		goto fail;
5825 
5826 	/*
5827 	 * We now start fiddling with the hardware
5828 	 */
5829 	/*
5830 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5831 	 * changed, and now the revision step also includes bit 0-1 (no more
5832 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5833 	 * in the old format.
5834 	 */
5835 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5836 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5837 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5838 
5839 	if (iwm_prepare_card_hw(sc) != 0) {
5840 		device_printf(dev, "could not initialize hardware\n");
5841 		goto fail;
5842 	}
5843 
5844 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5845 		int ret;
5846 		uint32_t hw_step;
5847 
5848 		/*
5849 		 * In order to recognize C step the driver should read the
5850 		 * chip version id located at the AUX bus MISC address.
5851 		 */
5852 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5853 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5854 		DELAY(2);
5855 
5856 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5857 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5858 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5859 				   25000);
5860 		if (ret < 0) {
5861 			device_printf(sc->sc_dev,
5862 			    "Failed to wake up the nic\n");
5863 			goto fail;
5864 		}
5865 
5866 		if (iwm_nic_lock(sc)) {
5867 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5868 			hw_step |= IWM_ENABLE_WFPM;
5869 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5870 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5871 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5872 			if (hw_step == 0x3)
5873 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5874 						(IWM_SILICON_C_STEP << 2);
5875 			iwm_nic_unlock(sc);
5876 		} else {
5877 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5878 			goto fail;
5879 		}
5880 	}
5881 
5882 	/* Allocate DMA memory for firmware transfers. */
5883 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5884 		device_printf(dev, "could not allocate memory for firmware\n");
5885 		goto fail;
5886 	}
5887 
5888 	/* Allocate "Keep Warm" page. */
5889 	if ((error = iwm_alloc_kw(sc)) != 0) {
5890 		device_printf(dev, "could not allocate keep warm page\n");
5891 		goto fail;
5892 	}
5893 
5894 	/* We use ICT interrupts */
5895 	if ((error = iwm_alloc_ict(sc)) != 0) {
5896 		device_printf(dev, "could not allocate ICT table\n");
5897 		goto fail;
5898 	}
5899 
5900 	/* Allocate TX scheduler "rings". */
5901 	if ((error = iwm_alloc_sched(sc)) != 0) {
5902 		device_printf(dev, "could not allocate TX scheduler rings\n");
5903 		goto fail;
5904 	}
5905 
5906 	/* Allocate TX rings */
5907 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5908 		if ((error = iwm_alloc_tx_ring(sc,
5909 		    &sc->txq[txq_i], txq_i)) != 0) {
5910 			device_printf(dev,
5911 			    "could not allocate TX ring %d\n",
5912 			    txq_i);
5913 			goto fail;
5914 		}
5915 	}
5916 
5917 	/* Allocate RX ring. */
5918 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5919 		device_printf(dev, "could not allocate RX ring\n");
5920 		goto fail;
5921 	}
5922 
5923 	/* Clear pending interrupts. */
5924 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5925 
5926 	ic->ic_softc = sc;
5927 	ic->ic_name = device_get_nameunit(sc->sc_dev);
5928 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
5929 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
5930 
5931 	/* Set device capabilities. */
5932 	ic->ic_caps =
5933 	    IEEE80211_C_STA |
5934 	    IEEE80211_C_WPA |		/* WPA/RSN */
5935 	    IEEE80211_C_WME |
5936 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
5937 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
5938 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
5939 	    ;
5940 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5941 		sc->sc_phyctxt[i].id = i;
5942 		sc->sc_phyctxt[i].color = 0;
5943 		sc->sc_phyctxt[i].ref = 0;
5944 		sc->sc_phyctxt[i].channel = NULL;
5945 	}
5946 
5947 	/* Max RSSI */
5948 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5949 	sc->sc_preinit_hook.ich_func = iwm_preinit;
5950 	sc->sc_preinit_hook.ich_arg = sc;
5951 	sc->sc_preinit_hook.ich_desc = "iwm";
5952 	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5953 		device_printf(dev, "config_intrhook_establish failed\n");
5954 		goto fail;
5955 	}
5956 
5957 #ifdef IWM_DEBUG
5958 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5959 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5960 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5961 #endif
5962 
5963 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5964 	    "<-%s\n", __func__);
5965 
5966 	return 0;
5967 
5968 	/* Free allocated memory if something failed during attachment. */
5969 fail:
5970 	iwm_detach_local(sc, 0);
5971 
5972 	return ENXIO;
5973 }
5974 
5975 static int
5976 iwm_is_valid_ether_addr(uint8_t *addr)
5977 {
5978 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5979 
5980 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5981 		return (FALSE);
5982 
5983 	return (TRUE);
5984 }
5985 
5986 static int
5987 iwm_update_edca(struct ieee80211com *ic)
5988 {
5989 	struct iwm_softc *sc = ic->ic_softc;
5990 
5991 	device_printf(sc->sc_dev, "%s: called\n", __func__);
5992 	return (0);
5993 }
5994 
5995 static void
5996 iwm_preinit(void *arg)
5997 {
5998 	struct iwm_softc *sc = arg;
5999 	device_t dev = sc->sc_dev;
6000 	struct ieee80211com *ic = &sc->sc_ic;
6001 	int error;
6002 
6003 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6004 	    "->%s\n", __func__);
6005 
6006 	IWM_LOCK(sc);
6007 	if ((error = iwm_start_hw(sc)) != 0) {
6008 		device_printf(dev, "could not initialize hardware\n");
6009 		IWM_UNLOCK(sc);
6010 		goto fail;
6011 	}
6012 
6013 	error = iwm_run_init_mvm_ucode(sc, 1);
6014 	iwm_stop_device(sc);
6015 	if (error) {
6016 		IWM_UNLOCK(sc);
6017 		goto fail;
6018 	}
6019 	device_printf(dev,
6020 	    "hw rev 0x%x, fw ver %s, address %s\n",
6021 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6022 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
6023 
6024 	/* not all hardware can do 5GHz band */
6025 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
6026 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6027 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6028 	IWM_UNLOCK(sc);
6029 
6030 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6031 	    ic->ic_channels);
6032 
6033 	/*
6034 	 * At this point we've committed - if we fail to do setup,
6035 	 * we now also have to tear down the net80211 state.
6036 	 */
6037 	ieee80211_ifattach(ic);
6038 	ic->ic_vap_create = iwm_vap_create;
6039 	ic->ic_vap_delete = iwm_vap_delete;
6040 	ic->ic_raw_xmit = iwm_raw_xmit;
6041 	ic->ic_node_alloc = iwm_node_alloc;
6042 	ic->ic_scan_start = iwm_scan_start;
6043 	ic->ic_scan_end = iwm_scan_end;
6044 	ic->ic_update_mcast = iwm_update_mcast;
6045 	ic->ic_getradiocaps = iwm_init_channel_map;
6046 	ic->ic_set_channel = iwm_set_channel;
6047 	ic->ic_scan_curchan = iwm_scan_curchan;
6048 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6049 	ic->ic_wme.wme_update = iwm_update_edca;
6050 	ic->ic_parent = iwm_parent;
6051 	ic->ic_transmit = iwm_transmit;
6052 	iwm_radiotap_attach(sc);
6053 	if (bootverbose)
6054 		ieee80211_announce(ic);
6055 
6056 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6057 	    "<-%s\n", __func__);
6058 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6059 
6060 	return;
6061 fail:
6062 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6063 	iwm_detach_local(sc, 0);
6064 }
6065 
6066 /*
6067  * Attach the interface to 802.11 radiotap.
6068  */
6069 static void
6070 iwm_radiotap_attach(struct iwm_softc *sc)
6071 {
6072         struct ieee80211com *ic = &sc->sc_ic;
6073 
6074 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6075 	    "->%s begin\n", __func__);
6076         ieee80211_radiotap_attach(ic,
6077             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6078                 IWM_TX_RADIOTAP_PRESENT,
6079             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6080                 IWM_RX_RADIOTAP_PRESENT);
6081 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6082 	    "->%s end\n", __func__);
6083 }
6084 
6085 static struct ieee80211vap *
6086 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6087     enum ieee80211_opmode opmode, int flags,
6088     const uint8_t bssid[IEEE80211_ADDR_LEN],
6089     const uint8_t mac[IEEE80211_ADDR_LEN])
6090 {
6091 	struct iwm_vap *ivp;
6092 	struct ieee80211vap *vap;
6093 
6094 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6095 		return NULL;
6096 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6097 	vap = &ivp->iv_vap;
6098 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6099 	vap->iv_bmissthreshold = 10;            /* override default */
6100 	/* Override with driver methods. */
6101 	ivp->iv_newstate = vap->iv_newstate;
6102 	vap->iv_newstate = iwm_newstate;
6103 
6104 	ieee80211_ratectl_init(vap);
6105 	/* Complete setup. */
6106 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6107 	    mac);
6108 	ic->ic_opmode = opmode;
6109 
6110 	return vap;
6111 }
6112 
6113 static void
6114 iwm_vap_delete(struct ieee80211vap *vap)
6115 {
6116 	struct iwm_vap *ivp = IWM_VAP(vap);
6117 
6118 	ieee80211_ratectl_deinit(vap);
6119 	ieee80211_vap_detach(vap);
6120 	kfree(ivp, M_80211_VAP);
6121 }
6122 
6123 static void
6124 iwm_scan_start(struct ieee80211com *ic)
6125 {
6126 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6127 	struct iwm_softc *sc = ic->ic_softc;
6128 	int error;
6129 
6130 	IWM_LOCK(sc);
6131 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6132 		error = iwm_mvm_umac_scan(sc);
6133 	else
6134 		error = iwm_mvm_lmac_scan(sc);
6135 	if (error != 0) {
6136 		device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6137 		IWM_UNLOCK(sc);
6138 		ieee80211_cancel_scan(vap);
6139 	} else {
6140 		iwm_led_blink_start(sc);
6141 		IWM_UNLOCK(sc);
6142 	}
6143 }
6144 
6145 static void
6146 iwm_scan_end(struct ieee80211com *ic)
6147 {
6148 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6149 	struct iwm_softc *sc = ic->ic_softc;
6150 
6151 	IWM_LOCK(sc);
6152 	iwm_led_blink_stop(sc);
6153 	if (vap->iv_state == IEEE80211_S_RUN)
6154 		iwm_mvm_led_enable(sc);
6155 	IWM_UNLOCK(sc);
6156 }
6157 
6158 static void
6159 iwm_update_mcast(struct ieee80211com *ic)
6160 {
6161 }
6162 
6163 static void
6164 iwm_set_channel(struct ieee80211com *ic)
6165 {
6166 }
6167 
6168 static void
6169 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6170 {
6171 }
6172 
6173 static void
6174 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6175 {
6176 	return;
6177 }
6178 
6179 void
6180 iwm_init_task(void *arg1)
6181 {
6182 	struct iwm_softc *sc = arg1;
6183 
6184 	IWM_LOCK(sc);
6185 	while (sc->sc_flags & IWM_FLAG_BUSY) {
6186 #if defined(__DragonFly__)
6187 		lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6188 #else
6189 		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6190 #endif
6191 }
6192 	sc->sc_flags |= IWM_FLAG_BUSY;
6193 	iwm_stop(sc);
6194 	if (sc->sc_ic.ic_nrunning > 0)
6195 		iwm_init(sc);
6196 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6197 	wakeup(&sc->sc_flags);
6198 	IWM_UNLOCK(sc);
6199 }
6200 
6201 static int
6202 iwm_resume(device_t dev)
6203 {
6204 	struct iwm_softc *sc = device_get_softc(dev);
6205 	int do_reinit = 0;
6206 	uint16_t reg;
6207 
6208 	/* Clear device-specific "PCI retry timeout" register (41h). */
6209 	reg = pci_read_config(dev, 0x40, sizeof(reg));
6210 	pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6211 	iwm_init_task(device_get_softc(dev));
6212 
6213 	IWM_LOCK(sc);
6214 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6215 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6216 		do_reinit = 1;
6217 	}
6218 	IWM_UNLOCK(sc);
6219 
6220 	if (do_reinit)
6221 		ieee80211_resume_all(&sc->sc_ic);
6222 
6223 	return 0;
6224 }
6225 
6226 static int
6227 iwm_suspend(device_t dev)
6228 {
6229 	int do_stop = 0;
6230 	struct iwm_softc *sc = device_get_softc(dev);
6231 
6232 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6233 
6234 	ieee80211_suspend_all(&sc->sc_ic);
6235 
6236 	if (do_stop) {
6237 		IWM_LOCK(sc);
6238 		iwm_stop(sc);
6239 		sc->sc_flags |= IWM_FLAG_SCANNING;
6240 		IWM_UNLOCK(sc);
6241 	}
6242 
6243 	return (0);
6244 }
6245 
6246 static int
6247 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6248 {
6249 	struct iwm_fw_info *fw = &sc->sc_fw;
6250 	device_t dev = sc->sc_dev;
6251 	int i;
6252 
6253 	if (sc->sc_tq) {
6254 #if defined(__DragonFly__)
6255 		/* doesn't exist for DFly, DFly drains tasks on free */
6256 #else
6257 		taskqueue_drain_all(sc->sc_tq);
6258 #endif
6259 		taskqueue_free(sc->sc_tq);
6260 #if defined(__DragonFly__)
6261 		sc->sc_tq = NULL;
6262 #endif
6263 	}
6264 	callout_drain(&sc->sc_led_blink_to);
6265 	callout_drain(&sc->sc_watchdog_to);
6266 	iwm_stop_device(sc);
6267 	if (do_net80211) {
6268 		ieee80211_ifdetach(&sc->sc_ic);
6269 	}
6270 
6271 	iwm_phy_db_free(sc);
6272 
6273 	/* Free descriptor rings */
6274 	iwm_free_rx_ring(sc, &sc->rxq);
6275 	for (i = 0; i < nitems(sc->txq); i++)
6276 		iwm_free_tx_ring(sc, &sc->txq[i]);
6277 
6278 	/* Free firmware */
6279 	if (fw->fw_fp != NULL)
6280 		iwm_fw_info_free(fw);
6281 
6282 	/* Free scheduler */
6283 	iwm_free_sched(sc);
6284 	if (sc->ict_dma.vaddr != NULL)
6285 		iwm_free_ict(sc);
6286 	if (sc->kw_dma.vaddr != NULL)
6287 		iwm_free_kw(sc);
6288 	if (sc->fw_dma.vaddr != NULL)
6289 		iwm_free_fwmem(sc);
6290 
6291 	/* Finished with the hardware - detach things */
6292 	iwm_pci_detach(dev);
6293 
6294 	mbufq_drain(&sc->sc_snd);
6295 	IWM_LOCK_DESTROY(sc);
6296 
6297 	return (0);
6298 }
6299 
6300 static int
6301 iwm_detach(device_t dev)
6302 {
6303 	struct iwm_softc *sc = device_get_softc(dev);
6304 
6305 	return (iwm_detach_local(sc, 1));
6306 }
6307 
6308 static device_method_t iwm_pci_methods[] = {
6309         /* Device interface */
6310         DEVMETHOD(device_probe,         iwm_probe),
6311         DEVMETHOD(device_attach,        iwm_attach),
6312         DEVMETHOD(device_detach,        iwm_detach),
6313         DEVMETHOD(device_suspend,       iwm_suspend),
6314         DEVMETHOD(device_resume,        iwm_resume),
6315 
6316         DEVMETHOD_END
6317 };
6318 
6319 static driver_t iwm_pci_driver = {
6320         "iwm",
6321         iwm_pci_methods,
6322         sizeof (struct iwm_softc)
6323 };
6324 
6325 static devclass_t iwm_devclass;
6326 
6327 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6328 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6329 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6330 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6331