xref: /dragonfly/sys/dev/netif/iwm/if_iwm.c (revision 7d84b73d)
1 /*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 /*-
20  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
21  * which were used as the reference documentation for this implementation.
22  *
23  * Driver version we are currently based off of is
24  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  *
35  * This program is free software; you can redistribute it and/or modify
36  * it under the terms of version 2 of the GNU General Public License as
37  * published by the Free Software Foundation.
38  *
39  * This program is distributed in the hope that it will be useful, but
40  * WITHOUT ANY WARRANTY; without even the implied warranty of
41  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
42  * General Public License for more details.
43  *
44  * You should have received a copy of the GNU General Public License
45  * along with this program; if not, write to the Free Software
46  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
47  * USA
48  *
49  * The full GNU General Public License is included in this distribution
50  * in the file called COPYING.
51  *
52  * Contact Information:
53  *  Intel Linux Wireless <ilw@linux.intel.com>
54  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
55  *
56  *
57  * BSD LICENSE
58  *
59  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions
64  * are met:
65  *
66  *  * Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  *  * Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in
70  *    the documentation and/or other materials provided with the
71  *    distribution.
72  *  * Neither the name Intel Corporation nor the names of its
73  *    contributors may be used to endorse or promote products derived
74  *    from this software without specific prior written permission.
75  *
76  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
77  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
78  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
79  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
80  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
81  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
82  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
83  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
84  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
85  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
86  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
87  */
88 
89 /*-
90  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
91  *
92  * Permission to use, copy, modify, and distribute this software for any
93  * purpose with or without fee is hereby granted, provided that the above
94  * copyright notice and this permission notice appear in all copies.
95  *
96  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
97  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
98  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
99  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
100  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
101  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
102  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
103  */
104 /*
105  *                             DragonFly work
106  *
107  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
108  *      changes to remove per-device network interface (DragonFly has not
109  *      caught up to that yet on the WLAN side).
110  *
111  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
112  *     malloc -> kmalloc       (in particular, changing improper M_NOWAIT
113  *                             specifications to M_INTWAIT.  We still don't
114  *                             understand why FreeBSD uses M_NOWAIT for
115  *                             critical must-not-fail kmalloc()s).
116  *     free -> kfree
117  *     printf -> kprintf
118  *     (bug fix) memset in iwm_reset_rx_ring.
119  *     (debug)   added several kprintf()s on error
120  *
121  *     header file paths (DFly allows localized path specifications).
122  *     minor header file differences.
123  *
124  * Comprehensive list of adjustments for DragonFly #ifdef'd:
125  *     (safety)  added register read-back serialization in iwm_reset_rx_ring().
126  *     packet counters
127  *     msleep -> lksleep
128  *     mtx -> lk  (mtx functions -> lockmgr functions)
129  *     callout differences
130  *     taskqueue differences
131  *     MSI differences
132  *     bus_setup_intr() differences
133  *     minor PCI config register naming differences
134  */
135 #include <sys/param.h>
136 #include <sys/bus.h>
137 #include <sys/endian.h>
138 #include <sys/firmware.h>
139 #include <sys/kernel.h>
140 #include <sys/malloc.h>
141 #include <sys/mbuf.h>
142 #include <sys/module.h>
143 #include <sys/rman.h>
144 #include <sys/sysctl.h>
145 #include <sys/linker.h>
146 
147 #include <machine/endian.h>
148 
149 #include <bus/pci/pcivar.h>
150 #include <bus/pci/pcireg.h>
151 
152 #include <net/bpf.h>
153 
154 #include <net/if.h>
155 #include <net/if_var.h>
156 #include <net/if_arp.h>
157 #include <net/if_dl.h>
158 #include <net/if_media.h>
159 #include <net/if_types.h>
160 
161 #include <netinet/in.h>
162 #include <netinet/in_systm.h>
163 #include <netinet/if_ether.h>
164 #include <netinet/ip.h>
165 
166 #include <netproto/802_11/ieee80211_var.h>
167 #include <netproto/802_11/ieee80211_regdomain.h>
168 #include <netproto/802_11/ieee80211_ratectl.h>
169 #include <netproto/802_11/ieee80211_radiotap.h>
170 
171 #include "if_iwmreg.h"
172 #include "if_iwmvar.h"
173 #include "if_iwm_config.h"
174 #include "if_iwm_debug.h"
175 #include "if_iwm_notif_wait.h"
176 #include "if_iwm_util.h"
177 #include "if_iwm_binding.h"
178 #include "if_iwm_phy_db.h"
179 #include "if_iwm_mac_ctxt.h"
180 #include "if_iwm_phy_ctxt.h"
181 #include "if_iwm_time_event.h"
182 #include "if_iwm_power.h"
183 #include "if_iwm_scan.h"
184 #include "if_iwm_sf.h"
185 #include "if_iwm_sta.h"
186 
187 #include "if_iwm_pcie_trans.h"
188 #include "if_iwm_led.h"
189 #include "if_iwm_fw.h"
190 
191 #if defined(__DragonFly__)
192 #define mtodo(m, off)	mtodoff((m), void *, (off))
193 #endif
194 
195 const uint8_t iwm_nvm_channels[] = {
196 	/* 2.4 GHz */
197 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
198 	/* 5 GHz */
199 	36, 40, 44, 48, 52, 56, 60, 64,
200 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
201 	149, 153, 157, 161, 165
202 };
203 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
204     "IWM_NUM_CHANNELS is too small");
205 
206 const uint8_t iwm_nvm_channels_8000[] = {
207 	/* 2.4 GHz */
208 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
209 	/* 5 GHz */
210 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
211 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
212 	149, 153, 157, 161, 165, 169, 173, 177, 181
213 };
214 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
215     "IWM_NUM_CHANNELS_8000 is too small");
216 
217 #define IWM_NUM_2GHZ_CHANNELS	14
218 #define IWM_N_HW_ADDR_MASK	0xF
219 
220 /*
221  * XXX For now, there's simply a fixed set of rate table entries
222  * that are populated.
223  */
224 const struct iwm_rate {
225 	uint8_t rate;
226 	uint8_t plcp;
227 } iwm_rates[] = {
228 	{   2,	IWM_RATE_1M_PLCP  },
229 	{   4,	IWM_RATE_2M_PLCP  },
230 	{  11,	IWM_RATE_5M_PLCP  },
231 	{  22,	IWM_RATE_11M_PLCP },
232 	{  12,	IWM_RATE_6M_PLCP  },
233 	{  18,	IWM_RATE_9M_PLCP  },
234 	{  24,	IWM_RATE_12M_PLCP },
235 	{  36,	IWM_RATE_18M_PLCP },
236 	{  48,	IWM_RATE_24M_PLCP },
237 	{  72,	IWM_RATE_36M_PLCP },
238 	{  96,	IWM_RATE_48M_PLCP },
239 	{ 108,	IWM_RATE_54M_PLCP },
240 };
241 #define IWM_RIDX_CCK	0
242 #define IWM_RIDX_OFDM	4
243 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
244 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
245 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
246 
247 struct iwm_nvm_section {
248 	uint16_t length;
249 	uint8_t *data;
250 };
251 
252 #define IWM_UCODE_ALIVE_TIMEOUT	hz
253 #define IWM_UCODE_CALIB_TIMEOUT	(2*hz)
254 
255 struct iwm_alive_data {
256 	int valid;
257 	uint32_t scd_base_addr;
258 };
259 
260 static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
261 static int	iwm_firmware_store_section(struct iwm_softc *,
262                                            enum iwm_ucode_type,
263                                            const uint8_t *, size_t);
264 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
265 static void	iwm_fw_info_free(struct iwm_fw_info *);
266 static int	iwm_read_firmware(struct iwm_softc *);
267 static int	iwm_alloc_fwmem(struct iwm_softc *);
268 static int	iwm_alloc_sched(struct iwm_softc *);
269 static int	iwm_alloc_kw(struct iwm_softc *);
270 static int	iwm_alloc_ict(struct iwm_softc *);
271 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
275                                   int);
276 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void	iwm_enable_interrupts(struct iwm_softc *);
279 static void	iwm_restore_interrupts(struct iwm_softc *);
280 static void	iwm_disable_interrupts(struct iwm_softc *);
281 static void	iwm_ict_reset(struct iwm_softc *);
282 static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
283 static void	iwm_stop_device(struct iwm_softc *);
284 static void	iwm_nic_config(struct iwm_softc *);
285 static int	iwm_nic_rx_init(struct iwm_softc *);
286 static int	iwm_nic_tx_init(struct iwm_softc *);
287 static int	iwm_nic_init(struct iwm_softc *);
288 static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
289 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
290                                    uint16_t, uint8_t *, uint16_t *);
291 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
292 				     uint16_t *, uint32_t);
293 static uint32_t	iwm_eeprom_channel_flags(uint16_t);
294 static void	iwm_add_channel_band(struct iwm_softc *,
295 		    struct ieee80211_channel[], int, int *, int, size_t,
296 		    const uint8_t[]);
297 static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
298 		    struct ieee80211_channel[]);
299 static struct iwm_nvm_data *
300 	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
301 			   const uint16_t *, const uint16_t *,
302 			   const uint16_t *, const uint16_t *,
303 			   const uint16_t *);
304 static void	iwm_free_nvm_data(struct iwm_nvm_data *);
305 static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
306 					       struct iwm_nvm_data *,
307 					       const uint16_t *,
308 					       const uint16_t *);
309 static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
310 			    const uint16_t *);
311 static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
312 static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
313 				  const uint16_t *);
314 static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
315 				   const uint16_t *);
316 static void	iwm_set_radio_cfg(const struct iwm_softc *,
317 				  struct iwm_nvm_data *, uint32_t);
318 static struct iwm_nvm_data *
319 	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
320 static int	iwm_nvm_init(struct iwm_softc *);
321 static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
322 				      const struct iwm_fw_desc *);
323 static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
324 					     bus_addr_t, uint32_t);
325 static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
326 						const struct iwm_fw_img *,
327 						int, int *);
328 static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
329 					   const struct iwm_fw_img *,
330 					   int, int *);
331 static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
332 					       const struct iwm_fw_img *);
333 static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
334 					  const struct iwm_fw_img *);
335 static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
336 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
337 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
338 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
339                                               enum iwm_ucode_type);
340 static int	iwm_run_init_ucode(struct iwm_softc *, int);
341 static int	iwm_config_ltr(struct iwm_softc *sc);
342 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
343 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
344                                       struct iwm_rx_packet *);
345 static int	iwm_get_noise(struct iwm_softc *,
346 		    const struct iwm_statistics_rx_non_phy *);
347 static void	iwm_handle_rx_statistics(struct iwm_softc *,
348 		    struct iwm_rx_packet *);
349 static bool	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
350 		    uint32_t, bool);
351 static int	iwm_rx_tx_cmd_single(struct iwm_softc *,
352                                          struct iwm_rx_packet *,
353 				         struct iwm_node *);
354 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
355 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
356 #if 0
357 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
358                                  uint16_t);
359 #endif
360 static const struct iwm_rate *
361 	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
362 			struct mbuf *, struct iwm_tx_cmd *);
363 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
364                        struct ieee80211_node *, int);
365 static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
366 			     const struct ieee80211_bpf_params *);
367 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
368 static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
369 static struct ieee80211_node *
370 		iwm_node_alloc(struct ieee80211vap *,
371 		               const uint8_t[IEEE80211_ADDR_LEN]);
372 static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
373 static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
374 static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
375 static int	iwm_media_change(struct ifnet *);
376 static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
377 static void	iwm_endscan_cb(void *, int);
378 static int	iwm_send_bt_init_conf(struct iwm_softc *);
379 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
380 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
381 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
382 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
383 static int	iwm_init_hw(struct iwm_softc *);
384 static void	iwm_init(struct iwm_softc *);
385 static void	iwm_start(struct iwm_softc *);
386 static void	iwm_stop(struct iwm_softc *);
387 static void	iwm_watchdog(void *);
388 static void	iwm_parent(struct ieee80211com *);
389 #ifdef IWM_DEBUG
390 static const char *
391 		iwm_desc_lookup(uint32_t);
392 static void	iwm_nic_error(struct iwm_softc *);
393 static void	iwm_nic_umac_error(struct iwm_softc *);
394 #endif
395 static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
396 static void	iwm_notif_intr(struct iwm_softc *);
397 static void	iwm_intr(void *);
398 static int	iwm_attach(device_t);
399 static int	iwm_is_valid_ether_addr(uint8_t *);
400 static void	iwm_preinit(void *);
401 static int	iwm_detach_local(struct iwm_softc *sc, int);
402 static void	iwm_init_task(void *);
403 static void	iwm_radiotap_attach(struct iwm_softc *);
404 static struct ieee80211vap *
405 		iwm_vap_create(struct ieee80211com *,
406 		               const char [IFNAMSIZ], int,
407 		               enum ieee80211_opmode, int,
408 		               const uint8_t [IEEE80211_ADDR_LEN],
409 		               const uint8_t [IEEE80211_ADDR_LEN]);
410 static void	iwm_vap_delete(struct ieee80211vap *);
411 static void	iwm_xmit_queue_drain(struct iwm_softc *);
412 static void	iwm_scan_start(struct ieee80211com *);
413 static void	iwm_scan_end(struct ieee80211com *);
414 static void	iwm_update_mcast(struct ieee80211com *);
415 static void	iwm_set_channel(struct ieee80211com *);
416 static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
417 static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
418 static int	iwm_detach(device_t);
419 
420 #if defined(__DragonFly__)
421 static int     iwm_msi_enable = 1;
422 
423 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
424 #endif
425 
426 static int	iwm_lar_disable = 0;
427 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
428 
429 /*
430  * Firmware parser.
431  */
432 
433 static int
434 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
435 {
436 	const struct iwm_fw_cscheme_list *l = (const void *)data;
437 
438 	if (dlen < sizeof(*l) ||
439 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
440 		return EINVAL;
441 
442 	/* we don't actually store anything for now, always use s/w crypto */
443 
444 	return 0;
445 }
446 
447 static int
448 iwm_firmware_store_section(struct iwm_softc *sc,
449     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
450 {
451 	struct iwm_fw_img *fws;
452 	struct iwm_fw_desc *fwone;
453 
454 	if (type >= IWM_UCODE_TYPE_MAX)
455 		return EINVAL;
456 	if (dlen < sizeof(uint32_t))
457 		return EINVAL;
458 
459 	fws = &sc->sc_fw.img[type];
460 	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
461 		return EINVAL;
462 
463 	fwone = &fws->sec[fws->fw_count];
464 
465 	/* first 32bit are device load offset */
466 	memcpy(&fwone->offset, data, sizeof(uint32_t));
467 
468 	/* rest is data */
469 	fwone->data = data + sizeof(uint32_t);
470 	fwone->len = dlen - sizeof(uint32_t);
471 
472 	fws->fw_count++;
473 
474 	return 0;
475 }
476 
477 #define IWM_DEFAULT_SCAN_CHANNELS 40
478 
479 /* iwlwifi: iwl-drv.c */
480 struct iwm_tlv_calib_data {
481 	uint32_t ucode_type;
482 	struct iwm_tlv_calib_ctrl calib;
483 } __packed;
484 
485 static int
486 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
487 {
488 	const struct iwm_tlv_calib_data *def_calib = data;
489 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
490 
491 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
492 		device_printf(sc->sc_dev,
493 		    "Wrong ucode_type %u for default "
494 		    "calibration.\n", ucode_type);
495 		return EINVAL;
496 	}
497 
498 	sc->sc_default_calib[ucode_type].flow_trigger =
499 	    def_calib->calib.flow_trigger;
500 	sc->sc_default_calib[ucode_type].event_trigger =
501 	    def_calib->calib.event_trigger;
502 
503 	return 0;
504 }
505 
506 static int
507 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
508 			struct iwm_ucode_capabilities *capa)
509 {
510 	const struct iwm_ucode_api *ucode_api = (const void *)data;
511 	uint32_t api_index = le32toh(ucode_api->api_index);
512 	uint32_t api_flags = le32toh(ucode_api->api_flags);
513 	int i;
514 
515 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
516 		device_printf(sc->sc_dev,
517 		    "api flags index %d larger than supported by driver\n",
518 		    api_index);
519 		/* don't return an error so we can load FW that has more bits */
520 		return 0;
521 	}
522 
523 	for (i = 0; i < 32; i++) {
524 		if (api_flags & (1U << i))
525 			setbit(capa->enabled_api, i + 32 * api_index);
526 	}
527 
528 	return 0;
529 }
530 
531 static int
532 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
533 			   struct iwm_ucode_capabilities *capa)
534 {
535 	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
536 	uint32_t api_index = le32toh(ucode_capa->api_index);
537 	uint32_t api_flags = le32toh(ucode_capa->api_capa);
538 	int i;
539 
540 	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
541 		device_printf(sc->sc_dev,
542 		    "capa flags index %d larger than supported by driver\n",
543 		    api_index);
544 		/* don't return an error so we can load FW that has more bits */
545 		return 0;
546 	}
547 
548 	for (i = 0; i < 32; i++) {
549 		if (api_flags & (1U << i))
550 			setbit(capa->enabled_capa, i + 32 * api_index);
551 	}
552 
553 	return 0;
554 }
555 
556 static void
557 iwm_fw_info_free(struct iwm_fw_info *fw)
558 {
559 	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
560 	fw->fw_fp = NULL;
561 	memset(fw->img, 0, sizeof(fw->img));
562 }
563 
564 static int
565 iwm_read_firmware(struct iwm_softc *sc)
566 {
567 	struct iwm_fw_info *fw = &sc->sc_fw;
568 	const struct iwm_tlv_ucode_header *uhdr;
569 	const struct iwm_ucode_tlv *tlv;
570 	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
571 	enum iwm_ucode_tlv_type tlv_type;
572 	const struct firmware *fwp;
573 	const uint8_t *data;
574 	uint32_t tlv_len;
575 	uint32_t usniffer_img;
576 	const uint8_t *tlv_data;
577 	uint32_t paging_mem_size;
578 	int num_of_cpus;
579 	int error = 0;
580 	size_t len;
581 
582 	/*
583 	 * Load firmware into driver memory.
584 	 * fw_fp will be set.
585 	 */
586 	fwp = firmware_get(sc->cfg->fw_name);
587 	if (fwp == NULL) {
588 		device_printf(sc->sc_dev,
589 		    "could not read firmware %s (error %d)\n",
590 		    sc->cfg->fw_name, error);
591 		goto out;
592 	}
593 	fw->fw_fp = fwp;
594 
595 	/* (Re-)Initialize default values. */
596 	capa->flags = 0;
597 	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
598 	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
599 	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
600 	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
601 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
602 
603 	/*
604 	 * Parse firmware contents
605 	 */
606 
607 	uhdr = (const void *)fw->fw_fp->data;
608 	if (*(const uint32_t *)fw->fw_fp->data != 0
609 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
610 		device_printf(sc->sc_dev, "invalid firmware %s\n",
611 		    sc->cfg->fw_name);
612 		error = EINVAL;
613 		goto out;
614 	}
615 
616 	ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
617 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
618 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
619 	    IWM_UCODE_API(le32toh(uhdr->ver)));
620 	data = uhdr->data;
621 	len = fw->fw_fp->datasize - sizeof(*uhdr);
622 
623 	while (len >= sizeof(*tlv)) {
624 		len -= sizeof(*tlv);
625 		tlv = (const void *)data;
626 
627 		tlv_len = le32toh(tlv->length);
628 		tlv_type = le32toh(tlv->type);
629 		tlv_data = tlv->data;
630 
631 		if (len < tlv_len) {
632 			device_printf(sc->sc_dev,
633 			    "firmware too short: %zu bytes\n",
634 			    len);
635 			error = EINVAL;
636 			goto parse_out;
637 		}
638 		len -= roundup2(tlv_len, 4);
639 		data += sizeof(*tlv) + roundup2(tlv_len, 4);
640 
641 		switch ((int)tlv_type) {
642 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
643 			if (tlv_len != sizeof(uint32_t)) {
644 				device_printf(sc->sc_dev,
645 				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
646 				    __func__, tlv_len);
647 				error = EINVAL;
648 				goto parse_out;
649 			}
650 			capa->max_probe_length =
651 			    le32_to_cpup((const uint32_t *)tlv_data);
652 			/* limit it to something sensible */
653 			if (capa->max_probe_length >
654 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
655 				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
656 				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
657 				    "ridiculous\n", __func__);
658 				error = EINVAL;
659 				goto parse_out;
660 			}
661 			break;
662 		case IWM_UCODE_TLV_PAN:
663 			if (tlv_len) {
664 				device_printf(sc->sc_dev,
665 				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
666 				    __func__, tlv_len);
667 				error = EINVAL;
668 				goto parse_out;
669 			}
670 			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
671 			break;
672 		case IWM_UCODE_TLV_FLAGS:
673 			if (tlv_len < sizeof(uint32_t)) {
674 				device_printf(sc->sc_dev,
675 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
676 				    __func__, tlv_len);
677 				error = EINVAL;
678 				goto parse_out;
679 			}
680 			if (tlv_len % sizeof(uint32_t)) {
681 				device_printf(sc->sc_dev,
682 				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
683 				    __func__, tlv_len);
684 				error = EINVAL;
685 				goto parse_out;
686 			}
687 			/*
688 			 * Apparently there can be many flags, but Linux driver
689 			 * parses only the first one, and so do we.
690 			 *
691 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
692 			 * Intentional or a bug?  Observations from
693 			 * current firmware file:
694 			 *  1) TLV_PAN is parsed first
695 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
696 			 * ==> this resets TLV_PAN to itself... hnnnk
697 			 */
698 			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
699 			break;
700 		case IWM_UCODE_TLV_CSCHEME:
701 			if ((error = iwm_store_cscheme(sc,
702 			    tlv_data, tlv_len)) != 0) {
703 				device_printf(sc->sc_dev,
704 				    "%s: iwm_store_cscheme(): returned %d\n",
705 				    __func__, error);
706 				goto parse_out;
707 			}
708 			break;
709 		case IWM_UCODE_TLV_NUM_OF_CPU:
710 			if (tlv_len != sizeof(uint32_t)) {
711 				device_printf(sc->sc_dev,
712 				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
713 				    __func__, tlv_len);
714 				error = EINVAL;
715 				goto parse_out;
716 			}
717 			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
718 			if (num_of_cpus == 2) {
719 				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
720 					TRUE;
721 				fw->img[IWM_UCODE_INIT].is_dual_cpus =
722 					TRUE;
723 				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
724 					TRUE;
725 			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
726 				device_printf(sc->sc_dev,
727 				    "%s: Driver supports only 1 or 2 CPUs\n",
728 				    __func__);
729 				error = EINVAL;
730 				goto parse_out;
731 			}
732 			break;
733 		case IWM_UCODE_TLV_SEC_RT:
734 			if ((error = iwm_firmware_store_section(sc,
735 			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
736 				device_printf(sc->sc_dev,
737 				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
738 				    __func__, error);
739 				goto parse_out;
740 			}
741 			break;
742 		case IWM_UCODE_TLV_SEC_INIT:
743 			if ((error = iwm_firmware_store_section(sc,
744 			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
745 				device_printf(sc->sc_dev,
746 				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
747 				    __func__, error);
748 				goto parse_out;
749 			}
750 			break;
751 		case IWM_UCODE_TLV_SEC_WOWLAN:
752 			if ((error = iwm_firmware_store_section(sc,
753 			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
754 				device_printf(sc->sc_dev,
755 				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
756 				    __func__, error);
757 				goto parse_out;
758 			}
759 			break;
760 		case IWM_UCODE_TLV_DEF_CALIB:
761 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
762 				device_printf(sc->sc_dev,
763 				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
764 				    __func__, tlv_len,
765 				    sizeof(struct iwm_tlv_calib_data));
766 				error = EINVAL;
767 				goto parse_out;
768 			}
769 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
770 				device_printf(sc->sc_dev,
771 				    "%s: iwm_set_default_calib() failed: %d\n",
772 				    __func__, error);
773 				goto parse_out;
774 			}
775 			break;
776 		case IWM_UCODE_TLV_PHY_SKU:
777 			if (tlv_len != sizeof(uint32_t)) {
778 				error = EINVAL;
779 				device_printf(sc->sc_dev,
780 				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
781 				    __func__, tlv_len);
782 				goto parse_out;
783 			}
784 			sc->sc_fw.phy_config =
785 			    le32_to_cpup((const uint32_t *)tlv_data);
786 			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
787 						  IWM_FW_PHY_CFG_TX_CHAIN) >>
788 						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
789 			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
790 						  IWM_FW_PHY_CFG_RX_CHAIN) >>
791 						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
792 			break;
793 
794 		case IWM_UCODE_TLV_API_CHANGES_SET: {
795 			if (tlv_len != sizeof(struct iwm_ucode_api)) {
796 				error = EINVAL;
797 				goto parse_out;
798 			}
799 			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
800 				error = EINVAL;
801 				goto parse_out;
802 			}
803 			break;
804 		}
805 
806 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
807 			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
808 				error = EINVAL;
809 				goto parse_out;
810 			}
811 			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
812 				error = EINVAL;
813 				goto parse_out;
814 			}
815 			break;
816 		}
817 
818 		case IWM_UCODE_TLV_CMD_VERSIONS:
819 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
820 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
821 			/* ignore, not used by current driver */
822 			break;
823 
824 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
825 			if ((error = iwm_firmware_store_section(sc,
826 			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
827 			    tlv_len)) != 0)
828 				goto parse_out;
829 			break;
830 
831 		case IWM_UCODE_TLV_PAGING:
832 			if (tlv_len != sizeof(uint32_t)) {
833 				error = EINVAL;
834 				goto parse_out;
835 			}
836 			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
837 
838 			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
839 			    "%s: Paging: paging enabled (size = %u bytes)\n",
840 			    __func__, paging_mem_size);
841 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
842 				device_printf(sc->sc_dev,
843 					"%s: Paging: driver supports up to %u bytes for paging image\n",
844 					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
845 				error = EINVAL;
846 				goto out;
847 			}
848 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
849 				device_printf(sc->sc_dev,
850 				    "%s: Paging: image isn't multiple %u\n",
851 				    __func__, IWM_FW_PAGING_SIZE);
852 				error = EINVAL;
853 				goto out;
854 			}
855 
856 			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
857 			    paging_mem_size;
858 			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
859 			sc->sc_fw.img[usniffer_img].paging_mem_size =
860 			    paging_mem_size;
861 			break;
862 
863 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
864 			if (tlv_len != sizeof(uint32_t)) {
865 				error = EINVAL;
866 				goto parse_out;
867 			}
868 			capa->n_scan_channels =
869 			    le32_to_cpup((const uint32_t *)tlv_data);
870 			break;
871 
872 		case IWM_UCODE_TLV_FW_VERSION:
873 			if (tlv_len != sizeof(uint32_t) * 3) {
874 				error = EINVAL;
875 				goto parse_out;
876 			}
877 			ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
878 			    "%u.%u.%u",
879 			    le32toh(((const uint32_t *)tlv_data)[0]),
880 			    le32toh(((const uint32_t *)tlv_data)[1]),
881 			    le32toh(((const uint32_t *)tlv_data)[2]));
882 			break;
883 
884 		case IWM_UCODE_TLV_FW_MEM_SEG:
885 			break;
886 
887 		default:
888 			device_printf(sc->sc_dev,
889 			    "%s: unknown firmware section %d, abort\n",
890 			    __func__, tlv_type);
891 			error = EINVAL;
892 			goto parse_out;
893 		}
894 	}
895 
896 	KASSERT(error == 0, ("unhandled error"));
897 
898  parse_out:
899 	if (error) {
900 		device_printf(sc->sc_dev, "firmware parse error %d, "
901 		    "section type %d\n", error, tlv_type);
902 	}
903 
904  out:
905 	if (error) {
906 		if (fw->fw_fp != NULL)
907 			iwm_fw_info_free(fw);
908 	}
909 
910 	return error;
911 }
912 
913 /*
914  * DMA resource routines
915  */
916 
917 /* fwmem is used to load firmware onto the card */
918 static int
919 iwm_alloc_fwmem(struct iwm_softc *sc)
920 {
921 	/* Must be aligned on a 16-byte boundary. */
922 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
923 	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
924 }
925 
926 /* tx scheduler rings.  not used? */
927 static int
928 iwm_alloc_sched(struct iwm_softc *sc)
929 {
930 	/* TX scheduler rings must be aligned on a 1KB boundary. */
931 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
932 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
933 }
934 
935 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
936 static int
937 iwm_alloc_kw(struct iwm_softc *sc)
938 {
939 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
940 }
941 
942 /* interrupt cause table */
943 static int
944 iwm_alloc_ict(struct iwm_softc *sc)
945 {
946 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
947 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
948 }
949 
950 static int
951 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
952 {
953 	bus_size_t size;
954 	size_t descsz;
955 	int count, i, error;
956 
957 	ring->cur = 0;
958 	if (sc->cfg->mqrx_supported) {
959 		count = IWM_RX_MQ_RING_COUNT;
960 		descsz = sizeof(uint64_t);
961 	} else {
962 		count = IWM_RX_LEGACY_RING_COUNT;
963 		descsz = sizeof(uint32_t);
964 	}
965 
966 	/* Allocate RX descriptors (256-byte aligned). */
967 	size = count * descsz;
968 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
969 	    256);
970 	if (error != 0) {
971 		device_printf(sc->sc_dev,
972 		    "could not allocate RX ring DMA memory\n");
973 		goto fail;
974 	}
975 	ring->desc = ring->free_desc_dma.vaddr;
976 
977 	/* Allocate RX status area (16-byte aligned). */
978 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
979 	    sizeof(*ring->stat), 16);
980 	if (error != 0) {
981 		device_printf(sc->sc_dev,
982 		    "could not allocate RX status DMA memory\n");
983 		goto fail;
984 	}
985 	ring->stat = ring->stat_dma.vaddr;
986 
987 	if (sc->cfg->mqrx_supported) {
988 		size = count * sizeof(uint32_t);
989 		error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
990 		    size, 256);
991 		if (error != 0) {
992 			device_printf(sc->sc_dev,
993 			    "could not allocate RX ring DMA memory\n");
994 			goto fail;
995 		}
996 	}
997 
998         /* Create RX buffer DMA tag. */
999 #if defined(__DragonFly__)
1000 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1001 				   0,
1002 				   BUS_SPACE_MAXADDR_32BIT,
1003 				   BUS_SPACE_MAXADDR,
1004 				   IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1005 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1006 #else
1007         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1008             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1009             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1010 #endif
1011         if (error != 0) {
1012                 device_printf(sc->sc_dev,
1013                     "%s: could not create RX buf DMA tag, error %d\n",
1014                     __func__, error);
1015                 goto fail;
1016         }
1017 
1018 	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1019 	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1020 	if (error != 0) {
1021 		device_printf(sc->sc_dev,
1022 		    "%s: could not create RX buf DMA map, error %d\n",
1023 		    __func__, error);
1024 		goto fail;
1025 	}
1026 
1027 	/*
1028 	 * Allocate and map RX buffers.
1029 	 */
1030 	for (i = 0; i < count; i++) {
1031 		struct iwm_rx_data *data = &ring->data[i];
1032 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1033 		if (error != 0) {
1034 			device_printf(sc->sc_dev,
1035 			    "%s: could not create RX buf DMA map, error %d\n",
1036 			    __func__, error);
1037 			goto fail;
1038 		}
1039 		data->m = NULL;
1040 
1041 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1042 			goto fail;
1043 		}
1044 	}
1045 	return 0;
1046 
1047 fail:	iwm_free_rx_ring(sc, ring);
1048 	return error;
1049 }
1050 
1051 static void
1052 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1053 {
1054 	/* Reset the ring state */
1055 	ring->cur = 0;
1056 
1057 	/*
1058 	 * The hw rx ring index in shared memory must also be cleared,
1059 	 * otherwise the discrepancy can cause reprocessing chaos.
1060 	 */
1061 	if (sc->rxq.stat)
1062 		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1063 }
1064 
1065 static void
1066 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1067 {
1068 	int count, i;
1069 
1070 	iwm_dma_contig_free(&ring->free_desc_dma);
1071 	iwm_dma_contig_free(&ring->stat_dma);
1072 	iwm_dma_contig_free(&ring->used_desc_dma);
1073 
1074 	count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1075 	    IWM_RX_LEGACY_RING_COUNT;
1076 
1077 	for (i = 0; i < count; i++) {
1078 		struct iwm_rx_data *data = &ring->data[i];
1079 
1080 		if (data->m != NULL) {
1081 			bus_dmamap_sync(ring->data_dmat, data->map,
1082 			    BUS_DMASYNC_POSTREAD);
1083 			bus_dmamap_unload(ring->data_dmat, data->map);
1084 			m_freem(data->m);
1085 			data->m = NULL;
1086 		}
1087 		if (data->map != NULL) {
1088 			bus_dmamap_destroy(ring->data_dmat, data->map);
1089 			data->map = NULL;
1090 		}
1091 	}
1092 	if (ring->spare_map != NULL) {
1093 		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1094 		ring->spare_map = NULL;
1095 	}
1096 	if (ring->data_dmat != NULL) {
1097 		bus_dma_tag_destroy(ring->data_dmat);
1098 		ring->data_dmat = NULL;
1099 	}
1100 }
1101 
1102 static int
1103 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1104 {
1105 	bus_addr_t paddr;
1106 	bus_size_t size;
1107 	size_t maxsize;
1108 	int nsegments;
1109 	int i, error;
1110 
1111 	ring->qid = qid;
1112 	ring->queued = 0;
1113 	ring->cur = 0;
1114 
1115 	/* Allocate TX descriptors (256-byte aligned). */
1116 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1117 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1118 	if (error != 0) {
1119 		device_printf(sc->sc_dev,
1120 		    "could not allocate TX ring DMA memory\n");
1121 		goto fail;
1122 	}
1123 	ring->desc = ring->desc_dma.vaddr;
1124 
1125 	/*
1126 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1127 	 * to allocate commands space for other rings.
1128 	 */
1129 	if (qid > IWM_CMD_QUEUE)
1130 		return 0;
1131 
1132 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1133 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1134 	if (error != 0) {
1135 		device_printf(sc->sc_dev,
1136 		    "could not allocate TX cmd DMA memory\n");
1137 		goto fail;
1138 	}
1139 	ring->cmd = ring->cmd_dma.vaddr;
1140 
1141 	/* FW commands may require more mapped space than packets. */
1142 	if (qid == IWM_CMD_QUEUE) {
1143 		maxsize = IWM_RBUF_SIZE;
1144 		nsegments = 1;
1145 	} else {
1146 		maxsize = MCLBYTES;
1147 		nsegments = IWM_MAX_SCATTER - 2;
1148 	}
1149 
1150 #if defined(__DragonFly__)
1151 	error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1152 				   0,
1153 				   BUS_SPACE_MAXADDR_32BIT,
1154 				   BUS_SPACE_MAXADDR,
1155 				   maxsize, nsegments, maxsize,
1156 				   BUS_DMA_NOWAIT, &ring->data_dmat);
1157 #else
1158 	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1159 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1160             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1161 #endif
1162 	if (error != 0) {
1163 		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1164 		goto fail;
1165 	}
1166 
1167 	paddr = ring->cmd_dma.paddr;
1168 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1169 		struct iwm_tx_data *data = &ring->data[i];
1170 
1171 		data->cmd_paddr = paddr;
1172 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1173 		    + offsetof(struct iwm_tx_cmd, scratch);
1174 		paddr += sizeof(struct iwm_device_cmd);
1175 
1176 		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1177 		if (error != 0) {
1178 			device_printf(sc->sc_dev,
1179 			    "could not create TX buf DMA map\n");
1180 			goto fail;
1181 		}
1182 	}
1183 	KASSERT(paddr == ring->cmd_dma.paddr + size,
1184 	    ("invalid physical address"));
1185 	return 0;
1186 
1187 fail:	iwm_free_tx_ring(sc, ring);
1188 	return error;
1189 }
1190 
1191 static void
1192 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1193 {
1194 	int i;
1195 
1196 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1197 		struct iwm_tx_data *data = &ring->data[i];
1198 
1199 		if (data->m != NULL) {
1200 			bus_dmamap_sync(ring->data_dmat, data->map,
1201 			    BUS_DMASYNC_POSTWRITE);
1202 			bus_dmamap_unload(ring->data_dmat, data->map);
1203 			m_freem(data->m);
1204 			data->m = NULL;
1205 		}
1206 	}
1207 	/* Clear TX descriptors. */
1208 	memset(ring->desc, 0, ring->desc_dma.size);
1209 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1210 	    BUS_DMASYNC_PREWRITE);
1211 	sc->qfullmsk &= ~(1 << ring->qid);
1212 	ring->queued = 0;
1213 	ring->cur = 0;
1214 
1215 	if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1216 		iwm_pcie_clear_cmd_in_flight(sc);
1217 }
1218 
1219 static void
1220 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1221 {
1222 	int i;
1223 
1224 	iwm_dma_contig_free(&ring->desc_dma);
1225 	iwm_dma_contig_free(&ring->cmd_dma);
1226 
1227 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1228 		struct iwm_tx_data *data = &ring->data[i];
1229 
1230 		if (data->m != NULL) {
1231 			bus_dmamap_sync(ring->data_dmat, data->map,
1232 			    BUS_DMASYNC_POSTWRITE);
1233 			bus_dmamap_unload(ring->data_dmat, data->map);
1234 			m_freem(data->m);
1235 			data->m = NULL;
1236 		}
1237 		if (data->map != NULL) {
1238 			bus_dmamap_destroy(ring->data_dmat, data->map);
1239 			data->map = NULL;
1240 		}
1241 	}
1242 	if (ring->data_dmat != NULL) {
1243 		bus_dma_tag_destroy(ring->data_dmat);
1244 		ring->data_dmat = NULL;
1245 	}
1246 }
1247 
1248 /*
1249  * High-level hardware frobbing routines
1250  */
1251 
1252 static void
1253 iwm_enable_interrupts(struct iwm_softc *sc)
1254 {
1255 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1256 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1257 }
1258 
1259 static void
1260 iwm_restore_interrupts(struct iwm_softc *sc)
1261 {
1262 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1263 }
1264 
1265 static void
1266 iwm_disable_interrupts(struct iwm_softc *sc)
1267 {
1268 	/* disable interrupts */
1269 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1270 
1271 	/* acknowledge all interrupts */
1272 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1273 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1274 }
1275 
1276 static void
1277 iwm_ict_reset(struct iwm_softc *sc)
1278 {
1279 	iwm_disable_interrupts(sc);
1280 
1281 	/* Reset ICT table. */
1282 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1283 	sc->ict_cur = 0;
1284 
1285 	/* Set physical address of ICT table (4KB aligned). */
1286 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1287 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1288 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1289 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1290 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1291 
1292 	/* Switch to ICT interrupt mode in driver. */
1293 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1294 
1295 	/* Re-enable interrupts. */
1296 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1297 	iwm_enable_interrupts(sc);
1298 }
1299 
1300 /* iwlwifi pcie/trans.c */
1301 
1302 /*
1303  * Since this .. hard-resets things, it's time to actually
1304  * mark the first vap (if any) as having no mac context.
1305  * It's annoying, but since the driver is potentially being
1306  * stop/start'ed whilst active (thanks openbsd port!) we
1307  * have to correctly track this.
1308  */
1309 static void
1310 iwm_stop_device(struct iwm_softc *sc)
1311 {
1312 	struct ieee80211com *ic = &sc->sc_ic;
1313 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1314 	int chnl, qid;
1315 	uint32_t mask = 0;
1316 
1317 	/* tell the device to stop sending interrupts */
1318 	iwm_disable_interrupts(sc);
1319 
1320 	/*
1321 	 * FreeBSD-local: mark the first vap as not-uploaded,
1322 	 * so the next transition through auth/assoc
1323 	 * will correctly populate the MAC context.
1324 	 */
1325 	if (vap) {
1326 		struct iwm_vap *iv = IWM_VAP(vap);
1327 		iv->phy_ctxt = NULL;
1328 		iv->is_uploaded = 0;
1329 	}
1330 	sc->sc_firmware_state = 0;
1331 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1332 
1333 	/* device going down, Stop using ICT table */
1334 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1335 
1336 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1337 
1338 	if (iwm_nic_lock(sc)) {
1339 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1340 
1341 		/* Stop each Tx DMA channel */
1342 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1343 			IWM_WRITE(sc,
1344 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1345 			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1346 		}
1347 
1348 		/* Wait for DMA channels to be idle */
1349 		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1350 		    5000)) {
1351 			device_printf(sc->sc_dev,
1352 			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1353 			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1354 		}
1355 		iwm_nic_unlock(sc);
1356 	}
1357 	iwm_pcie_rx_stop(sc);
1358 
1359 	/* Stop RX ring. */
1360 	iwm_reset_rx_ring(sc, &sc->rxq);
1361 
1362 	/* Reset all TX rings. */
1363 	for (qid = 0; qid < nitems(sc->txq); qid++)
1364 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1365 
1366 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1367 		/* Power-down device's busmaster DMA clocks */
1368 		if (iwm_nic_lock(sc)) {
1369 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1370 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1371 			iwm_nic_unlock(sc);
1372 		}
1373 		DELAY(5);
1374 	}
1375 
1376 	/* Make sure (redundant) we've released our request to stay awake */
1377 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1378 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1379 
1380 	/* Stop the device, and put it in low power state */
1381 	iwm_apm_stop(sc);
1382 
1383 	/* stop and reset the on-board processor */
1384 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1385 	DELAY(5000);
1386 
1387 	/*
1388 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1389 	 */
1390 	iwm_disable_interrupts(sc);
1391 
1392 	/*
1393 	 * Even if we stop the HW, we still want the RF kill
1394 	 * interrupt
1395 	 */
1396 	iwm_enable_rfkill_int(sc);
1397 	iwm_check_rfkill(sc);
1398 
1399 	iwm_prepare_card_hw(sc);
1400 }
1401 
1402 /* iwlwifi: mvm/ops.c */
1403 static void
1404 iwm_nic_config(struct iwm_softc *sc)
1405 {
1406 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1407 	uint32_t reg_val = 0;
1408 	uint32_t phy_config = iwm_get_phy_config(sc);
1409 
1410 	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1411 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1412 	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1413 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1414 	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1415 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1416 
1417 	/* SKU control */
1418 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1419 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1420 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1421 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1422 
1423 	/* radio configuration */
1424 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1425 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1426 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1427 
1428 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1429 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1430 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1431 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1432 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1433 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1434 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1435 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1436 	    reg_val);
1437 
1438 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1439 	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1440 	    radio_cfg_step, radio_cfg_dash);
1441 
1442 	/*
1443 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1444 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1445 	 * to lose ownership and not being able to obtain it back.
1446 	 */
1447 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1448 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1449 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1450 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1451 	}
1452 }
1453 
1454 static int
1455 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1456 {
1457 	int enabled;
1458 
1459 	if (!iwm_nic_lock(sc))
1460 		return EBUSY;
1461 
1462 	/* Stop RX DMA. */
1463 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1464 	/* Disable RX used and free queue operation. */
1465 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1466 
1467 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1468 	    sc->rxq.free_desc_dma.paddr);
1469 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1470 	    sc->rxq.used_desc_dma.paddr);
1471 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1472 	    sc->rxq.stat_dma.paddr);
1473 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1474 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1475 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1476 
1477 	/* We configure only queue 0 for now. */
1478 	enabled = ((1 << 0) << 16) | (1 << 0);
1479 
1480 	/* Enable RX DMA, 4KB buffer size. */
1481 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1482 	    IWM_RFH_DMA_EN_ENABLE_VAL |
1483 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
1484 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1485 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1486 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1487 
1488 	/* Enable RX DMA snooping. */
1489 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1490 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1491 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1492 	    (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1493 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1494 
1495 	/* Enable the configured queue(s). */
1496 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1497 
1498 	iwm_nic_unlock(sc);
1499 
1500 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1501 
1502 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1503 
1504 	return (0);
1505 }
1506 
1507 static int
1508 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1509 {
1510 
1511 	/* Stop Rx DMA */
1512 	iwm_pcie_rx_stop(sc);
1513 
1514 	if (!iwm_nic_lock(sc))
1515 		return EBUSY;
1516 
1517 	/* reset and flush pointers */
1518 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1519 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1520 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1521 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1522 
1523 	/* Set physical address of RX ring (256-byte aligned). */
1524 	IWM_WRITE(sc,
1525 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1526 	    sc->rxq.free_desc_dma.paddr >> 8);
1527 
1528 	/* Set physical address of RX status (16-byte aligned). */
1529 	IWM_WRITE(sc,
1530 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1531 
1532 #if defined(__DragonFly__)
1533 	/* Force serialization (probably not needed but don't trust the HW) */
1534 	IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1535 #endif
1536 
1537 
1538 	/* Enable Rx DMA
1539 	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1540 	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1541 	 *      the credit mechanism in 5000 HW RX FIFO
1542 	 * Direct rx interrupts to hosts
1543 	 * Rx buffer size 4 or 8k or 12k
1544 	 * RB timeout 0x10
1545 	 * 256 RBDs
1546 	 */
1547 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1548 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1549 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1550 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1551 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1552 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1553 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1554 
1555 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1556 
1557 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1558 	if (sc->cfg->host_interrupt_operation_mode)
1559 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1560 
1561 	iwm_nic_unlock(sc);
1562 
1563 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1564 
1565 	return 0;
1566 }
1567 
1568 static int
1569 iwm_nic_rx_init(struct iwm_softc *sc)
1570 {
1571 	if (sc->cfg->mqrx_supported)
1572 		return iwm_nic_rx_mq_init(sc);
1573 	else
1574 		return iwm_nic_rx_legacy_init(sc);
1575 }
1576 
1577 static int
1578 iwm_nic_tx_init(struct iwm_softc *sc)
1579 {
1580 	int qid;
1581 
1582 	if (!iwm_nic_lock(sc))
1583 		return EBUSY;
1584 
1585 	/* Deactivate TX scheduler. */
1586 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1587 
1588 	/* Set physical address of "keep warm" page (16-byte aligned). */
1589 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1590 
1591 	/* Initialize TX rings. */
1592 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1593 		struct iwm_tx_ring *txq = &sc->txq[qid];
1594 
1595 		/* Set physical address of TX ring (256-byte aligned). */
1596 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1597 		    txq->desc_dma.paddr >> 8);
1598 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1599 		    "%s: loading ring %d descriptors (%p) at %lx\n",
1600 		    __func__,
1601 		    qid, txq->desc,
1602 		    (unsigned long) (txq->desc_dma.paddr >> 8));
1603 	}
1604 
1605 	iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1606 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1607 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1608 
1609 	iwm_nic_unlock(sc);
1610 
1611 	return 0;
1612 }
1613 
1614 static int
1615 iwm_nic_init(struct iwm_softc *sc)
1616 {
1617 	int error;
1618 
1619 	iwm_apm_init(sc);
1620 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1621 		iwm_set_pwr(sc);
1622 
1623 	iwm_nic_config(sc);
1624 
1625 	if ((error = iwm_nic_rx_init(sc)) != 0)
1626 		return error;
1627 
1628 	/*
1629 	 * Ditto for TX, from iwn
1630 	 */
1631 	if ((error = iwm_nic_tx_init(sc)) != 0)
1632 		return error;
1633 
1634 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1635 	    "%s: shadow registers enabled\n", __func__);
1636 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1637 
1638 	return 0;
1639 }
1640 
1641 int
1642 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1643 {
1644 	int qmsk;
1645 
1646 	qmsk = 1 << qid;
1647 
1648 	if (!iwm_nic_lock(sc)) {
1649 		device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1650 		    __func__, qid);
1651 		return EBUSY;
1652 	}
1653 
1654 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1655 
1656 	if (qid == IWM_CMD_QUEUE) {
1657 		/* Disable the scheduler. */
1658 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1659 
1660 		/* Stop the TX queue prior to configuration. */
1661 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1662 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1663 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1664 
1665 		iwm_nic_unlock(sc);
1666 
1667 		/* Disable aggregations for this queue. */
1668 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1669 
1670 		if (!iwm_nic_lock(sc)) {
1671 			device_printf(sc->sc_dev,
1672 			    "%s: cannot enable txq %d\n", __func__, qid);
1673 			return EBUSY;
1674 		}
1675 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1676 		iwm_nic_unlock(sc);
1677 
1678 		iwm_write_mem32(sc,
1679 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1680 		/* Set scheduler window size and frame limit. */
1681 		iwm_write_mem32(sc,
1682 		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1683 		    sizeof(uint32_t),
1684 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1685 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1686 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1687 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1688 
1689 		if (!iwm_nic_lock(sc)) {
1690 			device_printf(sc->sc_dev,
1691 			    "%s: cannot enable txq %d\n", __func__, qid);
1692 			return EBUSY;
1693 		}
1694 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1695 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1696 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1697 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1698 		    IWM_SCD_QUEUE_STTS_REG_MSK);
1699 
1700 		/* Enable the scheduler for this queue. */
1701 		iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1702 	} else {
1703 		struct iwm_scd_txq_cfg_cmd cmd;
1704 		int error;
1705 
1706 		iwm_nic_unlock(sc);
1707 
1708 		memset(&cmd, 0, sizeof(cmd));
1709 		cmd.scd_queue = qid;
1710 		cmd.enable = 1;
1711 		cmd.sta_id = sta_id;
1712 		cmd.tx_fifo = fifo;
1713 		cmd.aggregate = 0;
1714 		cmd.window = IWM_FRAME_LIMIT;
1715 
1716 		error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1717 		    sizeof(cmd), &cmd);
1718 		if (error) {
1719 			device_printf(sc->sc_dev,
1720 			    "cannot enable txq %d\n", qid);
1721 			return error;
1722 		}
1723 
1724 		if (!iwm_nic_lock(sc))
1725 			return EBUSY;
1726 	}
1727 
1728 	iwm_nic_unlock(sc);
1729 
1730 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1731 	    __func__, qid, fifo);
1732 
1733 	return 0;
1734 }
1735 
1736 static int
1737 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1738 {
1739 	int error, chnl;
1740 
1741 	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1742 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1743 
1744 	if (!iwm_nic_lock(sc))
1745 		return EBUSY;
1746 
1747 	iwm_ict_reset(sc);
1748 
1749 	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1750 	if (scd_base_addr != 0 &&
1751 	    scd_base_addr != sc->scd_base_addr) {
1752 		device_printf(sc->sc_dev,
1753 		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1754 		    __func__, sc->scd_base_addr, scd_base_addr);
1755 	}
1756 
1757 	iwm_nic_unlock(sc);
1758 
1759 	/* reset context data, TX status and translation data */
1760 	error = iwm_write_mem(sc,
1761 	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1762 	    NULL, clear_dwords);
1763 	if (error)
1764 		return EBUSY;
1765 
1766 	if (!iwm_nic_lock(sc))
1767 		return EBUSY;
1768 
1769 	/* Set physical address of TX scheduler rings (1KB aligned). */
1770 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1771 
1772 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1773 
1774 	iwm_nic_unlock(sc);
1775 
1776 	/* enable command channel */
1777 	error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1778 	if (error)
1779 		return error;
1780 
1781 	if (!iwm_nic_lock(sc))
1782 		return EBUSY;
1783 
1784 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1785 
1786 	/* Enable DMA channels. */
1787 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1788 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1789 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1790 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1791 	}
1792 
1793 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1794 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1795 
1796 	iwm_nic_unlock(sc);
1797 
1798 	/* Enable L1-Active */
1799 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1800 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1801 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1802 	}
1803 
1804 	return error;
1805 }
1806 
1807 /*
1808  * NVM read access and content parsing.  We do not support
1809  * external NVM or writing NVM.
1810  * iwlwifi/mvm/nvm.c
1811  */
1812 
1813 /* Default NVM size to read */
1814 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1815 
1816 #define IWM_NVM_WRITE_OPCODE 1
1817 #define IWM_NVM_READ_OPCODE 0
1818 
1819 /* load nvm chunk response */
1820 enum {
1821 	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1822 	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1823 };
1824 
1825 static int
1826 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1827 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1828 {
1829 	struct iwm_nvm_access_cmd nvm_access_cmd = {
1830 		.offset = htole16(offset),
1831 		.length = htole16(length),
1832 		.type = htole16(section),
1833 		.op_code = IWM_NVM_READ_OPCODE,
1834 	};
1835 	struct iwm_nvm_access_resp *nvm_resp;
1836 	struct iwm_rx_packet *pkt;
1837 	struct iwm_host_cmd cmd = {
1838 		.id = IWM_NVM_ACCESS_CMD,
1839 		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1840 		.data = { &nvm_access_cmd, },
1841 	};
1842 	int ret, bytes_read, offset_read;
1843 	uint8_t *resp_data;
1844 
1845 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1846 
1847 	ret = iwm_send_cmd(sc, &cmd);
1848 	if (ret) {
1849 		device_printf(sc->sc_dev,
1850 		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1851 		return ret;
1852 	}
1853 
1854 	pkt = cmd.resp_pkt;
1855 
1856 	/* Extract NVM response */
1857 	nvm_resp = (void *)pkt->data;
1858 	ret = le16toh(nvm_resp->status);
1859 	bytes_read = le16toh(nvm_resp->length);
1860 	offset_read = le16toh(nvm_resp->offset);
1861 	resp_data = nvm_resp->data;
1862 	if (ret) {
1863 		if ((offset != 0) &&
1864 		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1865 			/*
1866 			 * meaning of NOT_VALID_ADDRESS:
1867 			 * driver try to read chunk from address that is
1868 			 * multiple of 2K and got an error since addr is empty.
1869 			 * meaning of (offset != 0): driver already
1870 			 * read valid data from another chunk so this case
1871 			 * is not an error.
1872 			 */
1873 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1874 				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1875 				    offset);
1876 			*len = 0;
1877 			ret = 0;
1878 		} else {
1879 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1880 				    "NVM access command failed with status %d\n", ret);
1881 			ret = EIO;
1882 		}
1883 		goto exit;
1884 	}
1885 
1886 	if (offset_read != offset) {
1887 		device_printf(sc->sc_dev,
1888 		    "NVM ACCESS response with invalid offset %d\n",
1889 		    offset_read);
1890 		ret = EINVAL;
1891 		goto exit;
1892 	}
1893 
1894 	if (bytes_read > length) {
1895 		device_printf(sc->sc_dev,
1896 		    "NVM ACCESS response with too much data "
1897 		    "(%d bytes requested, %d bytes received)\n",
1898 		    length, bytes_read);
1899 		ret = EINVAL;
1900 		goto exit;
1901 	}
1902 
1903 	/* Write data to NVM */
1904 	memcpy(data + offset, resp_data, bytes_read);
1905 	*len = bytes_read;
1906 
1907  exit:
1908 	iwm_free_resp(sc, &cmd);
1909 	return ret;
1910 }
1911 
1912 /*
1913  * Reads an NVM section completely.
1914  * NICs prior to 7000 family don't have a real NVM, but just read
1915  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1916  * by uCode, we need to manually check in this case that we don't
1917  * overflow and try to read more than the EEPROM size.
1918  * For 7000 family NICs, we supply the maximal size we can read, and
1919  * the uCode fills the response with as much data as we can,
1920  * without overflowing, so no check is needed.
1921  */
1922 static int
1923 iwm_nvm_read_section(struct iwm_softc *sc,
1924 	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1925 {
1926 	uint16_t seglen, length, offset = 0;
1927 	int ret;
1928 
1929 	/* Set nvm section read length */
1930 	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1931 
1932 	seglen = length;
1933 
1934 	/* Read the NVM until exhausted (reading less than requested) */
1935 	while (seglen == length) {
1936 		/* Check no memory assumptions fail and cause an overflow */
1937 		if ((size_read + offset + length) >
1938 		    sc->cfg->eeprom_size) {
1939 			device_printf(sc->sc_dev,
1940 			    "EEPROM size is too small for NVM\n");
1941 			return ENOBUFS;
1942 		}
1943 
1944 		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1945 		if (ret) {
1946 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1947 				    "Cannot read NVM from section %d offset %d, length %d\n",
1948 				    section, offset, length);
1949 			return ret;
1950 		}
1951 		offset += seglen;
1952 	}
1953 
1954 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1955 		    "NVM section %d read completed\n", section);
1956 	*len = offset;
1957 	return 0;
1958 }
1959 
1960 /*
1961  * BEGIN IWM_NVM_PARSE
1962  */
1963 
1964 /* iwlwifi/iwl-nvm-parse.c */
1965 
1966 /* NVM offsets (in words) definitions */
1967 enum iwm_nvm_offsets {
1968 	/* NVM HW-Section offset (in words) definitions */
1969 	IWM_HW_ADDR = 0x15,
1970 
1971 /* NVM SW-Section offset (in words) definitions */
1972 	IWM_NVM_SW_SECTION = 0x1C0,
1973 	IWM_NVM_VERSION = 0,
1974 	IWM_RADIO_CFG = 1,
1975 	IWM_SKU = 2,
1976 	IWM_N_HW_ADDRS = 3,
1977 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1978 
1979 /* NVM calibration section offset (in words) definitions */
1980 	IWM_NVM_CALIB_SECTION = 0x2B8,
1981 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1982 };
1983 
1984 enum iwm_8000_nvm_offsets {
1985 	/* NVM HW-Section offset (in words) definitions */
1986 	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1987 	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1988 	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1989 	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1990 	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1991 
1992 	/* NVM SW-Section offset (in words) definitions */
1993 	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1994 	IWM_NVM_VERSION_8000 = 0,
1995 	IWM_RADIO_CFG_8000 = 0,
1996 	IWM_SKU_8000 = 2,
1997 	IWM_N_HW_ADDRS_8000 = 3,
1998 
1999 	/* NVM REGULATORY -Section offset (in words) definitions */
2000 	IWM_NVM_CHANNELS_8000 = 0,
2001 	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
2002 	IWM_NVM_LAR_OFFSET_8000 = 0x507,
2003 	IWM_NVM_LAR_ENABLED_8000 = 0x7,
2004 
2005 	/* NVM calibration section offset (in words) definitions */
2006 	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
2007 	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
2008 };
2009 
2010 /* SKU Capabilities (actual values from NVM definition) */
2011 enum nvm_sku_bits {
2012 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
2013 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
2014 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
2015 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
2016 };
2017 
2018 /* radio config bits (actual values from NVM definition) */
2019 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
2020 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
2021 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
2022 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
2023 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
2024 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2025 
2026 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
2027 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
2028 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
2029 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
2030 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
2031 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
2032 
2033 /**
2034  * enum iwm_nvm_channel_flags - channel flags in NVM
2035  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2036  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2037  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2038  * @IWM_NVM_CHANNEL_RADAR: radar detection required
2039  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
2040  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2041  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2042  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2043  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2044  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2045  */
2046 enum iwm_nvm_channel_flags {
2047 	IWM_NVM_CHANNEL_VALID = (1 << 0),
2048 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
2049 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2050 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
2051 	IWM_NVM_CHANNEL_DFS = (1 << 7),
2052 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
2053 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2054 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2055 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2056 };
2057 
2058 /*
2059  * Translate EEPROM flags to net80211.
2060  */
2061 static uint32_t
2062 iwm_eeprom_channel_flags(uint16_t ch_flags)
2063 {
2064 	uint32_t nflags;
2065 
2066 	nflags = 0;
2067 	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2068 		nflags |= IEEE80211_CHAN_PASSIVE;
2069 	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2070 		nflags |= IEEE80211_CHAN_NOADHOC;
2071 	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2072 		nflags |= IEEE80211_CHAN_DFS;
2073 		/* Just in case. */
2074 		nflags |= IEEE80211_CHAN_NOADHOC;
2075 	}
2076 
2077 	return (nflags);
2078 }
2079 
2080 static void
2081 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2082     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2083     const uint8_t bands[])
2084 {
2085 	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2086 	uint32_t nflags;
2087 	uint16_t ch_flags;
2088 	uint8_t ieee;
2089 	int error;
2090 
2091 	for (; ch_idx < ch_num; ch_idx++) {
2092 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2093 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2094 			ieee = iwm_nvm_channels[ch_idx];
2095 		else
2096 			ieee = iwm_nvm_channels_8000[ch_idx];
2097 
2098 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2099 			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2100 			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2101 			    ieee, ch_flags,
2102 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2103 			    "5.2" : "2.4");
2104 			continue;
2105 		}
2106 
2107 		nflags = iwm_eeprom_channel_flags(ch_flags);
2108 		error = ieee80211_add_channel(chans, maxchans, nchans,
2109 		    ieee, 0, 0, nflags, bands);
2110 		if (error != 0)
2111 			break;
2112 
2113 		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2114 		    "Ch. %d Flags %x [%sGHz] - Added\n",
2115 		    ieee, ch_flags,
2116 		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2117 		    "5.2" : "2.4");
2118 	}
2119 }
2120 
2121 static void
2122 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2123     struct ieee80211_channel chans[])
2124 {
2125 	struct iwm_softc *sc = ic->ic_softc;
2126 	struct iwm_nvm_data *data = sc->nvm_data;
2127 	uint8_t bands[IEEE80211_MODE_BYTES];
2128 	size_t ch_num;
2129 
2130 	memset(bands, 0, sizeof(bands));
2131 	/* 1-13: 11b/g channels. */
2132 	setbit(bands, IEEE80211_MODE_11B);
2133 	setbit(bands, IEEE80211_MODE_11G);
2134 	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2135 	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2136 
2137 	/* 14: 11b channel only. */
2138 	clrbit(bands, IEEE80211_MODE_11G);
2139 	iwm_add_channel_band(sc, chans, maxchans, nchans,
2140 	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2141 
2142 	if (data->sku_cap_band_52GHz_enable) {
2143 		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2144 			ch_num = nitems(iwm_nvm_channels);
2145 		else
2146 			ch_num = nitems(iwm_nvm_channels_8000);
2147 		memset(bands, 0, sizeof(bands));
2148 		setbit(bands, IEEE80211_MODE_11A);
2149 		iwm_add_channel_band(sc, chans, maxchans, nchans,
2150 		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2151 	}
2152 }
2153 
2154 static void
2155 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2156 	const uint16_t *mac_override, const uint16_t *nvm_hw)
2157 {
2158 	const uint8_t *hw_addr;
2159 
2160 	if (mac_override) {
2161 		static const uint8_t reserved_mac[] = {
2162 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2163 		};
2164 
2165 		hw_addr = (const uint8_t *)(mac_override +
2166 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2167 
2168 		/*
2169 		 * Store the MAC address from MAO section.
2170 		 * No byte swapping is required in MAO section
2171 		 */
2172 		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2173 
2174 		/*
2175 		 * Force the use of the OTP MAC address in case of reserved MAC
2176 		 * address in the NVM, or if address is given but invalid.
2177 		 */
2178 		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2179 		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2180 		    iwm_is_valid_ether_addr(data->hw_addr) &&
2181 		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2182 			return;
2183 
2184 		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2185 		    "%s: mac address from nvm override section invalid\n",
2186 		    __func__);
2187 	}
2188 
2189 	if (nvm_hw) {
2190 		/* read the mac address from WFMP registers */
2191 		uint32_t mac_addr0 =
2192 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2193 		uint32_t mac_addr1 =
2194 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2195 
2196 		hw_addr = (const uint8_t *)&mac_addr0;
2197 		data->hw_addr[0] = hw_addr[3];
2198 		data->hw_addr[1] = hw_addr[2];
2199 		data->hw_addr[2] = hw_addr[1];
2200 		data->hw_addr[3] = hw_addr[0];
2201 
2202 		hw_addr = (const uint8_t *)&mac_addr1;
2203 		data->hw_addr[4] = hw_addr[1];
2204 		data->hw_addr[5] = hw_addr[0];
2205 
2206 		return;
2207 	}
2208 
2209 	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2210 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2211 }
2212 
2213 static int
2214 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2215 	    const uint16_t *phy_sku)
2216 {
2217 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2218 		return le16_to_cpup(nvm_sw + IWM_SKU);
2219 
2220 	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2221 }
2222 
2223 static int
2224 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2225 {
2226 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2227 		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2228 	else
2229 		return le32_to_cpup((const uint32_t *)(nvm_sw +
2230 						IWM_NVM_VERSION_8000));
2231 }
2232 
2233 static int
2234 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2235 		  const uint16_t *phy_sku)
2236 {
2237         if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2238                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2239 
2240         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2241 }
2242 
2243 static int
2244 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2245 {
2246 	int n_hw_addr;
2247 
2248 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2249 		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2250 
2251 	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2252 
2253         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2254 }
2255 
2256 static void
2257 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2258 		  uint32_t radio_cfg)
2259 {
2260 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2261 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2262 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2263 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2264 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2265 		return;
2266 	}
2267 
2268 	/* set the radio configuration for family 8000 */
2269 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2270 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2271 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2272 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2273 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2274 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2275 }
2276 
2277 static int
2278 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2279 		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2280 {
2281 #ifdef notyet /* for FAMILY 9000 */
2282 	if (cfg->mac_addr_from_csr) {
2283 		iwm_set_hw_address_from_csr(sc, data);
2284         } else
2285 #endif
2286 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2287 		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2288 
2289 		/* The byte order is little endian 16 bit, meaning 214365 */
2290 		data->hw_addr[0] = hw_addr[1];
2291 		data->hw_addr[1] = hw_addr[0];
2292 		data->hw_addr[2] = hw_addr[3];
2293 		data->hw_addr[3] = hw_addr[2];
2294 		data->hw_addr[4] = hw_addr[5];
2295 		data->hw_addr[5] = hw_addr[4];
2296 	} else {
2297 		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2298 	}
2299 
2300 	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2301 		device_printf(sc->sc_dev, "no valid mac address was found\n");
2302 		return EINVAL;
2303 	}
2304 
2305 	return 0;
2306 }
2307 
2308 static struct iwm_nvm_data *
2309 iwm_parse_nvm_data(struct iwm_softc *sc,
2310 		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2311 		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2312 		   const uint16_t *phy_sku, const uint16_t *regulatory)
2313 {
2314 	struct iwm_nvm_data *data;
2315 	uint32_t sku, radio_cfg;
2316 	uint16_t lar_config;
2317 
2318 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2319 		data = kmalloc(sizeof(*data) +
2320 		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2321 		    M_DEVBUF, M_WAITOK | M_ZERO);
2322 	} else {
2323 		data = kmalloc(sizeof(*data) +
2324 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2325 		    M_DEVBUF, M_WAITOK | M_ZERO);
2326 	}
2327 	if (!data)
2328 		return NULL;
2329 
2330 	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2331 
2332 	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2333 	iwm_set_radio_cfg(sc, data, radio_cfg);
2334 
2335 	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2336 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2337 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2338 	data->sku_cap_11n_enable = 0;
2339 
2340 	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2341 
2342 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2343 		/* TODO: use IWL_NVM_EXT */
2344 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2345 				       IWM_NVM_LAR_OFFSET_8000_OLD :
2346 				       IWM_NVM_LAR_OFFSET_8000;
2347 
2348 		lar_config = le16_to_cpup(regulatory + lar_offset);
2349 		data->lar_enabled = !!(lar_config &
2350 				       IWM_NVM_LAR_ENABLED_8000);
2351 	}
2352 
2353 	/* If no valid mac address was found - bail out */
2354 	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2355 		kfree(data, M_DEVBUF);
2356 		return NULL;
2357 	}
2358 
2359 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2360 		memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2361 		    &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2362 		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2363 	} else {
2364 		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2365 		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2366 	}
2367 
2368 	return data;
2369 }
2370 
2371 static void
2372 iwm_free_nvm_data(struct iwm_nvm_data *data)
2373 {
2374 	if (data != NULL)
2375 		kfree(data, M_DEVBUF);
2376 }
2377 
2378 static struct iwm_nvm_data *
2379 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2380 {
2381 	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2382 
2383 	/* Checking for required sections */
2384 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2385 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2386 		    !sections[sc->cfg->nvm_hw_section_num].data) {
2387 			device_printf(sc->sc_dev,
2388 			    "Can't parse empty OTP/NVM sections\n");
2389 			return NULL;
2390 		}
2391 	} else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2392 		/* SW and REGULATORY sections are mandatory */
2393 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2394 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2395 			device_printf(sc->sc_dev,
2396 			    "Can't parse empty OTP/NVM sections\n");
2397 			return NULL;
2398 		}
2399 		/* MAC_OVERRIDE or at least HW section must exist */
2400 		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2401 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2402 			device_printf(sc->sc_dev,
2403 			    "Can't parse mac_address, empty sections\n");
2404 			return NULL;
2405 		}
2406 
2407 		/* PHY_SKU section is mandatory in B0 */
2408 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2409 			device_printf(sc->sc_dev,
2410 			    "Can't parse phy_sku in B0, empty sections\n");
2411 			return NULL;
2412 		}
2413 	} else {
2414 		panic("unknown device family %d\n", sc->cfg->device_family);
2415 	}
2416 
2417 	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2418 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2419 	calib = (const uint16_t *)
2420 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2421 	regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2422 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2423 	    (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2424 	mac_override = (const uint16_t *)
2425 	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2426 	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2427 
2428 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2429 	    phy_sku, regulatory);
2430 }
2431 
2432 static int
2433 iwm_nvm_init(struct iwm_softc *sc)
2434 {
2435 	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2436 	int i, ret, section;
2437 	uint32_t size_read = 0;
2438 	uint8_t *nvm_buffer, *temp;
2439 	uint16_t len;
2440 
2441 	memset(nvm_sections, 0, sizeof(nvm_sections));
2442 
2443 	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2444 		return EINVAL;
2445 
2446 	/* load NVM values from nic */
2447 	/* Read From FW NVM */
2448 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2449 
2450 	nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF, M_WAITOK | M_ZERO);
2451 	if (!nvm_buffer)
2452 		return ENOMEM;
2453 	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2454 		/* we override the constness for initial read */
2455 		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2456 					   &len, size_read);
2457 		if (ret)
2458 			continue;
2459 		size_read += len;
2460 		temp = kmalloc(len, M_DEVBUF, M_WAITOK);
2461 		if (!temp) {
2462 			ret = ENOMEM;
2463 			break;
2464 		}
2465 		memcpy(temp, nvm_buffer, len);
2466 
2467 		nvm_sections[section].data = temp;
2468 		nvm_sections[section].length = len;
2469 	}
2470 	if (!size_read)
2471 		device_printf(sc->sc_dev, "OTP is blank\n");
2472 	kfree(nvm_buffer, M_DEVBUF);
2473 
2474 	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2475 	if (!sc->nvm_data)
2476 		return EINVAL;
2477 	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2478 		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2479 
2480 	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2481 		if (nvm_sections[i].data != NULL)
2482 			kfree(nvm_sections[i].data, M_DEVBUF);
2483 	}
2484 
2485 	return 0;
2486 }
2487 
2488 static int
2489 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2490 	const struct iwm_fw_desc *section)
2491 {
2492 	struct iwm_dma_info *dma = &sc->fw_dma;
2493 	uint8_t *v_addr;
2494 	bus_addr_t p_addr;
2495 	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2496 	int ret = 0;
2497 
2498 	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2499 		    "%s: [%d] uCode section being loaded...\n",
2500 		    __func__, section_num);
2501 
2502 	v_addr = dma->vaddr;
2503 	p_addr = dma->paddr;
2504 
2505 	for (offset = 0; offset < section->len; offset += chunk_sz) {
2506 		uint32_t copy_size, dst_addr;
2507 		int extended_addr = FALSE;
2508 
2509 		copy_size = MIN(chunk_sz, section->len - offset);
2510 		dst_addr = section->offset + offset;
2511 
2512 		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2513 		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2514 			extended_addr = TRUE;
2515 
2516 		if (extended_addr)
2517 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2518 					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2519 
2520 		memcpy(v_addr, (const uint8_t *)section->data + offset,
2521 		    copy_size);
2522 		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2523 		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2524 						   copy_size);
2525 
2526 		if (extended_addr)
2527 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2528 					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2529 
2530 		if (ret) {
2531 			device_printf(sc->sc_dev,
2532 			    "%s: Could not load the [%d] uCode section\n",
2533 			    __func__, section_num);
2534 			break;
2535 		}
2536 	}
2537 
2538 	return ret;
2539 }
2540 
2541 /*
2542  * ucode
2543  */
2544 static int
2545 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2546 			     bus_addr_t phy_addr, uint32_t byte_cnt)
2547 {
2548 	sc->sc_fw_chunk_done = 0;
2549 
2550 	if (!iwm_nic_lock(sc))
2551 		return EBUSY;
2552 
2553 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2554 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2555 
2556 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2557 	    dst_addr);
2558 
2559 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2560 	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2561 
2562 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2563 	    (iwm_get_dma_hi_addr(phy_addr)
2564 	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2565 
2566 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2567 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2568 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2569 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2570 
2571 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2572 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2573 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2574 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2575 
2576 	iwm_nic_unlock(sc);
2577 
2578 	/* wait up to 5s for this segment to load */
2579 	lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz * 5);
2580 
2581 	if (!sc->sc_fw_chunk_done) {
2582 		device_printf(sc->sc_dev,
2583 		    "fw chunk addr 0x%x len %d failed to load\n",
2584 		    dst_addr, byte_cnt);
2585 		return ETIMEDOUT;
2586 	}
2587 
2588 	return 0;
2589 }
2590 
2591 static int
2592 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2593 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2594 {
2595 	int shift_param;
2596 	int i, ret = 0, sec_num = 0x1;
2597 	uint32_t val, last_read_idx = 0;
2598 
2599 	if (cpu == 1) {
2600 		shift_param = 0;
2601 		*first_ucode_section = 0;
2602 	} else {
2603 		shift_param = 16;
2604 		(*first_ucode_section)++;
2605 	}
2606 
2607 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2608 		last_read_idx = i;
2609 
2610 		/*
2611 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2612 		 * CPU1 to CPU2.
2613 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2614 		 * CPU2 non paged to CPU2 paging sec.
2615 		 */
2616 		if (!image->sec[i].data ||
2617 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2618 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2619 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2620 				    "Break since Data not valid or Empty section, sec = %d\n",
2621 				    i);
2622 			break;
2623 		}
2624 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2625 		if (ret)
2626 			return ret;
2627 
2628 		/* Notify the ucode of the loaded section number and status */
2629 		if (iwm_nic_lock(sc)) {
2630 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2631 			val = val | (sec_num << shift_param);
2632 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2633 			sec_num = (sec_num << 1) | 0x1;
2634 			iwm_nic_unlock(sc);
2635 		}
2636 	}
2637 
2638 	*first_ucode_section = last_read_idx;
2639 
2640 	iwm_enable_interrupts(sc);
2641 
2642 	if (iwm_nic_lock(sc)) {
2643 		if (cpu == 1)
2644 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2645 		else
2646 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2647 		iwm_nic_unlock(sc);
2648 	}
2649 
2650 	return 0;
2651 }
2652 
2653 static int
2654 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2655 	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2656 {
2657 	int shift_param;
2658 	int i, ret = 0;
2659 	uint32_t last_read_idx = 0;
2660 
2661 	if (cpu == 1) {
2662 		shift_param = 0;
2663 		*first_ucode_section = 0;
2664 	} else {
2665 		shift_param = 16;
2666 		(*first_ucode_section)++;
2667 	}
2668 
2669 	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2670 		last_read_idx = i;
2671 
2672 		/*
2673 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2674 		 * CPU1 to CPU2.
2675 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2676 		 * CPU2 non paged to CPU2 paging sec.
2677 		 */
2678 		if (!image->sec[i].data ||
2679 		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2680 		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2681 			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2682 				    "Break since Data not valid or Empty section, sec = %d\n",
2683 				     i);
2684 			break;
2685 		}
2686 
2687 		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2688 		if (ret)
2689 			return ret;
2690 	}
2691 
2692 	*first_ucode_section = last_read_idx;
2693 
2694 	return 0;
2695 
2696 }
2697 
2698 static int
2699 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2700 {
2701 	int ret = 0;
2702 	int first_ucode_section;
2703 
2704 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2705 		     image->is_dual_cpus ? "Dual" : "Single");
2706 
2707 	/* load to FW the binary non secured sections of CPU1 */
2708 	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2709 	if (ret)
2710 		return ret;
2711 
2712 	if (image->is_dual_cpus) {
2713 		/* set CPU2 header address */
2714 		if (iwm_nic_lock(sc)) {
2715 			iwm_write_prph(sc,
2716 				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2717 				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2718 			iwm_nic_unlock(sc);
2719 		}
2720 
2721 		/* load to FW the binary sections of CPU2 */
2722 		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2723 						 &first_ucode_section);
2724 		if (ret)
2725 			return ret;
2726 	}
2727 
2728 	iwm_enable_interrupts(sc);
2729 
2730 	/* release CPU reset */
2731 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2732 
2733 	return 0;
2734 }
2735 
2736 int
2737 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2738 	const struct iwm_fw_img *image)
2739 {
2740 	int ret = 0;
2741 	int first_ucode_section;
2742 
2743 	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2744 		    image->is_dual_cpus ? "Dual" : "Single");
2745 
2746 	/* configure the ucode to be ready to get the secured image */
2747 	/* release CPU reset */
2748 	if (iwm_nic_lock(sc)) {
2749 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2750 		    IWM_RELEASE_CPU_RESET_BIT);
2751 		iwm_nic_unlock(sc);
2752 	}
2753 
2754 	/* load to FW the binary Secured sections of CPU1 */
2755 	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2756 	    &first_ucode_section);
2757 	if (ret)
2758 		return ret;
2759 
2760 	/* load to FW the binary sections of CPU2 */
2761 	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2762 	    &first_ucode_section);
2763 }
2764 
2765 /* XXX Get rid of this definition */
2766 static inline void
2767 iwm_enable_fw_load_int(struct iwm_softc *sc)
2768 {
2769 	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2770 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2771 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2772 }
2773 
2774 /* XXX Add proper rfkill support code */
2775 static int
2776 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2777 {
2778 	int ret;
2779 
2780 	/* This may fail if AMT took ownership of the device */
2781 	if (iwm_prepare_card_hw(sc)) {
2782 		device_printf(sc->sc_dev,
2783 		    "%s: Exit HW not ready\n", __func__);
2784 		ret = EIO;
2785 		goto out;
2786 	}
2787 
2788 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2789 
2790 	iwm_disable_interrupts(sc);
2791 
2792 	/* make sure rfkill handshake bits are cleared */
2793 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2794 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2795 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2796 
2797 	/* clear (again), then enable host interrupts */
2798 	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2799 
2800 	ret = iwm_nic_init(sc);
2801 	if (ret) {
2802 		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2803 		goto out;
2804 	}
2805 
2806 	/*
2807 	 * Now, we load the firmware and don't want to be interrupted, even
2808 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2809 	 * FH_TX interrupt which is needed to load the firmware). If the
2810 	 * RF-Kill switch is toggled, we will find out after having loaded
2811 	 * the firmware and return the proper value to the caller.
2812 	 */
2813 	iwm_enable_fw_load_int(sc);
2814 
2815 	/* really make sure rfkill handshake bits are cleared */
2816 	/* maybe we should write a few times more?  just to make sure */
2817 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2818 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2819 
2820 	/* Load the given image to the HW */
2821 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2822 		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2823 	else
2824 		ret = iwm_pcie_load_given_ucode(sc, fw);
2825 
2826 	/* XXX re-check RF-Kill state */
2827 
2828 out:
2829 	return ret;
2830 }
2831 
2832 static int
2833 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2834 {
2835 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2836 		.valid = htole32(valid_tx_ant),
2837 	};
2838 
2839 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2840 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2841 }
2842 
2843 /* iwlwifi: mvm/fw.c */
2844 static int
2845 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2846 {
2847 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2848 	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2849 
2850 	/* Set parameters */
2851 	phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2852 	phy_cfg_cmd.calib_control.event_trigger =
2853 	    sc->sc_default_calib[ucode_type].event_trigger;
2854 	phy_cfg_cmd.calib_control.flow_trigger =
2855 	    sc->sc_default_calib[ucode_type].flow_trigger;
2856 
2857 	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2858 	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2859 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2860 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2861 }
2862 
2863 static int
2864 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2865 {
2866 	struct iwm_alive_data *alive_data = data;
2867 	struct iwm_alive_resp_v3 *palive3;
2868 	struct iwm_alive_resp *palive;
2869 	struct iwm_umac_alive *umac;
2870 	struct iwm_lmac_alive *lmac1;
2871 	struct iwm_lmac_alive *lmac2 = NULL;
2872 	uint16_t status;
2873 
2874 	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2875 		palive = (void *)pkt->data;
2876 		umac = &palive->umac_data;
2877 		lmac1 = &palive->lmac_data[0];
2878 		lmac2 = &palive->lmac_data[1];
2879 		status = le16toh(palive->status);
2880 	} else {
2881 		palive3 = (void *)pkt->data;
2882 		umac = &palive3->umac_data;
2883 		lmac1 = &palive3->lmac_data;
2884 		status = le16toh(palive3->status);
2885 	}
2886 
2887 	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2888 	if (lmac2)
2889 		sc->error_event_table[1] =
2890 			le32toh(lmac2->error_event_table_ptr);
2891 	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2892 	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2893 	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2894 	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2895 	if (sc->umac_error_event_table)
2896 		sc->support_umac_log = TRUE;
2897 
2898 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2899 		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2900 		    status, lmac1->ver_type, lmac1->ver_subtype);
2901 
2902 	if (lmac2)
2903 		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2904 
2905 	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2906 		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2907 		    le32toh(umac->umac_major),
2908 		    le32toh(umac->umac_minor));
2909 
2910 	return TRUE;
2911 }
2912 
2913 static int
2914 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2915 	struct iwm_rx_packet *pkt, void *data)
2916 {
2917 	struct iwm_phy_db *phy_db = data;
2918 
2919 	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2920 		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2921 			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2922 			    __func__, pkt->hdr.code);
2923 		}
2924 		return TRUE;
2925 	}
2926 
2927 	if (iwm_phy_db_set_section(phy_db, pkt)) {
2928 		device_printf(sc->sc_dev,
2929 		    "%s: iwm_phy_db_set_section failed\n", __func__);
2930 	}
2931 
2932 	return FALSE;
2933 }
2934 
2935 static int
2936 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2937 	enum iwm_ucode_type ucode_type)
2938 {
2939 	struct iwm_notification_wait alive_wait;
2940 	struct iwm_alive_data alive_data;
2941 	const struct iwm_fw_img *fw;
2942 	enum iwm_ucode_type old_type = sc->cur_ucode;
2943 	int error;
2944 	static const uint16_t alive_cmd[] = { IWM_ALIVE };
2945 
2946 	fw = &sc->sc_fw.img[ucode_type];
2947 	sc->cur_ucode = ucode_type;
2948 	sc->ucode_loaded = FALSE;
2949 
2950 	memset(&alive_data, 0, sizeof(alive_data));
2951 	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2952 				   alive_cmd, nitems(alive_cmd),
2953 				   iwm_alive_fn, &alive_data);
2954 
2955 	error = iwm_start_fw(sc, fw);
2956 	if (error) {
2957 		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2958 		sc->cur_ucode = old_type;
2959 		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2960 		return error;
2961 	}
2962 
2963 	/*
2964 	 * Some things may run in the background now, but we
2965 	 * just wait for the ALIVE notification here.
2966 	 */
2967 	IWM_UNLOCK(sc);
2968 	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2969 				      IWM_UCODE_ALIVE_TIMEOUT);
2970 	IWM_LOCK(sc);
2971 	if (error) {
2972 		if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2973 			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2974 			if (iwm_nic_lock(sc)) {
2975 				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2976 				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2977 				iwm_nic_unlock(sc);
2978 			}
2979 			device_printf(sc->sc_dev,
2980 			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2981 			    a, b);
2982 		}
2983 		sc->cur_ucode = old_type;
2984 		return error;
2985 	}
2986 
2987 	if (!alive_data.valid) {
2988 		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2989 		    __func__);
2990 		sc->cur_ucode = old_type;
2991 		return EIO;
2992 	}
2993 
2994 	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2995 
2996 	/*
2997 	 * configure and operate fw paging mechanism.
2998 	 * driver configures the paging flow only once, CPU2 paging image
2999 	 * included in the IWM_UCODE_INIT image.
3000 	 */
3001 	if (fw->paging_mem_size) {
3002 		error = iwm_save_fw_paging(sc, fw);
3003 		if (error) {
3004 			device_printf(sc->sc_dev,
3005 			    "%s: failed to save the FW paging image\n",
3006 			    __func__);
3007 			return error;
3008 		}
3009 
3010 		error = iwm_send_paging_cmd(sc, fw);
3011 		if (error) {
3012 			device_printf(sc->sc_dev,
3013 			    "%s: failed to send the paging cmd\n", __func__);
3014 			iwm_free_fw_paging(sc);
3015 			return error;
3016 		}
3017 	}
3018 
3019 	if (!error)
3020 		sc->ucode_loaded = TRUE;
3021 	return error;
3022 }
3023 
3024 /*
3025  * mvm misc bits
3026  */
3027 
3028 /*
3029  * follows iwlwifi/fw.c
3030  */
3031 static int
3032 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
3033 {
3034 	struct iwm_notification_wait calib_wait;
3035 	static const uint16_t init_complete[] = {
3036 		IWM_INIT_COMPLETE_NOTIF,
3037 		IWM_CALIB_RES_NOTIF_PHY_DB
3038 	};
3039 	int ret;
3040 
3041 	/* do not operate with rfkill switch turned on */
3042 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3043 		device_printf(sc->sc_dev,
3044 		    "radio is disabled by hardware switch\n");
3045 		return EPERM;
3046 	}
3047 
3048 	iwm_init_notification_wait(sc->sc_notif_wait,
3049 				   &calib_wait,
3050 				   init_complete,
3051 				   nitems(init_complete),
3052 				   iwm_wait_phy_db_entry,
3053 				   sc->sc_phy_db);
3054 
3055 	/* Will also start the device */
3056 	ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3057 	if (ret) {
3058 		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3059 		    ret);
3060 		goto error;
3061 	}
3062 
3063 	if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
3064 		ret = iwm_send_bt_init_conf(sc);
3065 		if (ret) {
3066 			device_printf(sc->sc_dev,
3067 			    "failed to send bt coex configuration: %d\n", ret);
3068 			goto error;
3069 		}
3070 	}
3071 
3072 	if (justnvm) {
3073 		/* Read nvm */
3074 		ret = iwm_nvm_init(sc);
3075 		if (ret) {
3076 			device_printf(sc->sc_dev, "failed to read nvm\n");
3077 			goto error;
3078 		}
3079 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3080 		goto error;
3081 	}
3082 
3083 	/* Send TX valid antennas before triggering calibrations */
3084 	ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
3085 	if (ret) {
3086 		device_printf(sc->sc_dev,
3087 		    "failed to send antennas before calibration: %d\n", ret);
3088 		goto error;
3089 	}
3090 
3091 	/*
3092 	 * Send phy configurations command to init uCode
3093 	 * to start the 16.0 uCode init image internal calibrations.
3094 	 */
3095 	ret = iwm_send_phy_cfg_cmd(sc);
3096 	if (ret) {
3097 		device_printf(sc->sc_dev,
3098 		    "%s: Failed to run INIT calibrations: %d\n",
3099 		    __func__, ret);
3100 		goto error;
3101 	}
3102 
3103 	/*
3104 	 * Nothing to do but wait for the init complete notification
3105 	 * from the firmware.
3106 	 */
3107 	IWM_UNLOCK(sc);
3108 	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3109 	    IWM_UCODE_CALIB_TIMEOUT);
3110 	IWM_LOCK(sc);
3111 
3112 
3113 	goto out;
3114 
3115 error:
3116 	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3117 out:
3118 	return ret;
3119 }
3120 
3121 static int
3122 iwm_config_ltr(struct iwm_softc *sc)
3123 {
3124 	struct iwm_ltr_config_cmd cmd = {
3125 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3126 	};
3127 
3128 	if (!sc->sc_ltr_enabled)
3129 		return 0;
3130 
3131 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3132 }
3133 
3134 /*
3135  * receive side
3136  */
3137 
3138 /* (re)stock rx ring, called at init-time and at runtime */
3139 static int
3140 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3141 {
3142 	struct iwm_rx_ring *ring = &sc->rxq;
3143 	struct iwm_rx_data *data = &ring->data[idx];
3144 	struct mbuf *m;
3145 	bus_dmamap_t dmamap;
3146 	bus_dma_segment_t seg;
3147 	int nsegs, error;
3148 
3149 	m = m_getjcl(M_WAITOK, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3150 	if (m == NULL)
3151 		return ENOBUFS;
3152 
3153 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3154 #if defined(__DragonFly__)
3155 	error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3156 	    m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3157 #else
3158 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3159 	    &seg, &nsegs, BUS_DMA_NOWAIT);
3160 #endif
3161 	if (error != 0) {
3162 		device_printf(sc->sc_dev,
3163 		    "%s: can't map mbuf, error %d\n", __func__, error);
3164 		m_freem(m);
3165 		return error;
3166 	}
3167 
3168 	if (data->m != NULL)
3169 		bus_dmamap_unload(ring->data_dmat, data->map);
3170 
3171 	/* Swap ring->spare_map with data->map */
3172 	dmamap = data->map;
3173 	data->map = ring->spare_map;
3174 	ring->spare_map = dmamap;
3175 
3176 	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3177 	data->m = m;
3178 
3179 	/* Update RX descriptor. */
3180 	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3181 	if (sc->cfg->mqrx_supported)
3182 		((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3183 	else
3184 		((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3185 	bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3186 	    BUS_DMASYNC_PREWRITE);
3187 
3188 	return 0;
3189 }
3190 
3191 static void
3192 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3193 {
3194 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3195 
3196 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3197 
3198 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3199 }
3200 
3201 /*
3202  * Retrieve the average noise (in dBm) among receivers.
3203  */
3204 static int
3205 iwm_get_noise(struct iwm_softc *sc,
3206     const struct iwm_statistics_rx_non_phy *stats)
3207 {
3208 	int i, total, nbant, noise;
3209 
3210 	total = nbant = noise = 0;
3211 	for (i = 0; i < 3; i++) {
3212 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3213 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3214 		    __func__,
3215 		    i,
3216 		    noise);
3217 
3218 		if (noise) {
3219 			total += noise;
3220 			nbant++;
3221 		}
3222 	}
3223 
3224 	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3225 	    __func__, nbant, total);
3226 #if 0
3227 	/* There should be at least one antenna but check anyway. */
3228 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3229 #else
3230 	/* For now, just hard-code it to -96 to be safe */
3231 	return (-96);
3232 #endif
3233 }
3234 
3235 static void
3236 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3237 {
3238 	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3239 
3240 	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3241 	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3242 }
3243 
3244 /* iwlwifi: mvm/rx.c */
3245 /*
3246  * iwm_get_signal_strength - use new rx PHY INFO API
3247  * values are reported by the fw as positive values - need to negate
3248  * to obtain their dBM.  Account for missing antennas by replacing 0
3249  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3250  */
3251 static int
3252 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3253     struct iwm_rx_phy_info *phy_info)
3254 {
3255 	int energy_a, energy_b, energy_c, max_energy;
3256 	uint32_t val;
3257 
3258 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3259 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3260 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3261 	energy_a = energy_a ? -energy_a : -256;
3262 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3263 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3264 	energy_b = energy_b ? -energy_b : -256;
3265 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3266 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3267 	energy_c = energy_c ? -energy_c : -256;
3268 	max_energy = MAX(energy_a, energy_b);
3269 	max_energy = MAX(max_energy, energy_c);
3270 
3271 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3272 	    "energy In A %d B %d C %d , and max %d\n",
3273 	    energy_a, energy_b, energy_c, max_energy);
3274 
3275 	return max_energy;
3276 }
3277 
3278 static int
3279 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3280     struct iwm_rx_mpdu_desc *desc)
3281 {
3282 	int energy_a, energy_b;
3283 
3284 	energy_a = desc->v1.energy_a;
3285 	energy_b = desc->v1.energy_b;
3286 	energy_a = energy_a ? -energy_a : -256;
3287 	energy_b = energy_b ? -energy_b : -256;
3288 	return MAX(energy_a, energy_b);
3289 }
3290 
3291 /*
3292  * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3293  *
3294  * Handles the actual data of the Rx packet from the fw
3295  */
3296 static bool
3297 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3298     bool stolen)
3299 {
3300 	struct ieee80211com *ic = &sc->sc_ic;
3301 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3302 	struct ieee80211_frame *wh;
3303 	struct ieee80211_rx_stats rxs;
3304 	struct iwm_rx_phy_info *phy_info;
3305 	struct iwm_rx_mpdu_res_start *rx_res;
3306 	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3307 	uint32_t len;
3308 	uint32_t rx_pkt_status;
3309 	int rssi;
3310 
3311 	phy_info = &sc->sc_last_phy_info;
3312 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3313 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3314 	len = le16toh(rx_res->byte_count);
3315 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3316 
3317 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3318 		device_printf(sc->sc_dev,
3319 		    "dsp size out of range [0,20]: %d\n",
3320 		    phy_info->cfg_phy_cnt);
3321 		return false;
3322 	}
3323 
3324 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3325 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3326 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3327 		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3328 		return false;
3329 	}
3330 
3331 	rssi = iwm_rx_get_signal_strength(sc, phy_info);
3332 
3333 	/* Map it to relative value */
3334 	rssi = rssi - sc->sc_noise;
3335 
3336 	/* replenish ring for the buffer we're going to feed to the sharks */
3337 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3338 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3339 		    __func__);
3340 		return false;
3341 	}
3342 
3343 	m->m_data = pkt->data + sizeof(*rx_res);
3344 	m->m_pkthdr.len = m->m_len = len;
3345 
3346 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3347 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3348 
3349 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3350 	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3351 	    __func__,
3352 	    le16toh(phy_info->channel),
3353 	    le16toh(phy_info->phy_flags));
3354 
3355 	/*
3356 	 * Populate an RX state struct with the provided information.
3357 	 */
3358 	bzero(&rxs, sizeof(rxs));
3359 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3360 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3361 	rxs.c_ieee = le16toh(phy_info->channel);
3362 	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3363 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3364 	} else {
3365 		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3366 	}
3367 
3368 	/* rssi is in 1/2db units */
3369 #if !defined(__DragonFly__)
3370 	rxs.c_rssi = rssi * 2;
3371 	rxs.c_nf = sc->sc_noise;
3372 #else
3373 	/* old DFly ieee80211 ABI does not have c_rssi */
3374 #endif
3375 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3376 		return false;
3377 
3378 	if (ieee80211_radiotap_active_vap(vap)) {
3379 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3380 
3381 		tap->wr_flags = 0;
3382 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3383 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3384 		tap->wr_chan_freq = htole16(rxs.c_freq);
3385 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3386 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3387 		tap->wr_dbm_antsignal = (int8_t)rssi;
3388 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3389 		tap->wr_tsft = phy_info->system_timestamp;
3390 		switch (phy_info->rate) {
3391 		/* CCK rates. */
3392 		case  10: tap->wr_rate =   2; break;
3393 		case  20: tap->wr_rate =   4; break;
3394 		case  55: tap->wr_rate =  11; break;
3395 		case 110: tap->wr_rate =  22; break;
3396 		/* OFDM rates. */
3397 		case 0xd: tap->wr_rate =  12; break;
3398 		case 0xf: tap->wr_rate =  18; break;
3399 		case 0x5: tap->wr_rate =  24; break;
3400 		case 0x7: tap->wr_rate =  36; break;
3401 		case 0x9: tap->wr_rate =  48; break;
3402 		case 0xb: tap->wr_rate =  72; break;
3403 		case 0x1: tap->wr_rate =  96; break;
3404 		case 0x3: tap->wr_rate = 108; break;
3405 		/* Unknown rate: should not happen. */
3406 		default:  tap->wr_rate =   0;
3407 		}
3408 	}
3409 
3410 	return true;
3411 }
3412 
3413 static bool
3414 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3415     bool stolen)
3416 {
3417 	struct ieee80211com *ic = &sc->sc_ic;
3418 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3419 	struct ieee80211_frame *wh;
3420 	struct ieee80211_rx_stats rxs;
3421 	struct iwm_rx_mpdu_desc *desc;
3422 	struct iwm_rx_packet *pkt;
3423 	int rssi;
3424 	uint32_t hdrlen, len, rate_n_flags;
3425 	uint16_t phy_info;
3426 	uint8_t channel;
3427 
3428 	pkt = mtodo(m, offset);
3429 	desc = (void *)pkt->data;
3430 
3431 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3432 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3433 		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3434 		    "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3435 		return false;
3436 	}
3437 
3438 	channel = desc->v1.channel;
3439 	len = le16toh(desc->mpdu_len);
3440 	phy_info = le16toh(desc->phy_info);
3441 	rate_n_flags = desc->v1.rate_n_flags;
3442 
3443 	wh = mtodo(m, sizeof(*desc));
3444 	m->m_data = pkt->data + sizeof(*desc);
3445 	m->m_pkthdr.len = m->m_len = len;
3446 	m->m_len = len;
3447 
3448 	/* Account for padding following the frame header. */
3449 	if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3450 		hdrlen = ieee80211_anyhdrsize(wh);
3451 		memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3452 		m->m_data = mtodo(m, 2);
3453 		wh = mtod(m, struct ieee80211_frame *);
3454 	}
3455 
3456 	/* Map it to relative value */
3457 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
3458 	rssi = rssi - sc->sc_noise;
3459 
3460 	/* replenish ring for the buffer we're going to feed to the sharks */
3461 	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3462 		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3463 		    __func__);
3464 		return false;
3465 	}
3466 
3467 	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3468 	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3469 
3470 	/*
3471 	 * Populate an RX state struct with the provided information.
3472 	 */
3473 	bzero(&rxs, sizeof(rxs));
3474 	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3475 	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3476 	rxs.c_ieee = channel;
3477 	rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3478 	    channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3479 
3480 	/* rssi is in 1/2db units */
3481 #if !defined(__DragonFly__)
3482 	rxs.c_rssi = rssi * 2;
3483 	rxs.c_nf = sc->sc_noise;
3484 #else
3485 	/* old DFly ieee80211 ABI does not have c_rssi */
3486 #endif
3487 	if (ieee80211_add_rx_params(m, &rxs) == 0)
3488 		return false;
3489 
3490 	if (ieee80211_radiotap_active_vap(vap)) {
3491 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3492 
3493 		tap->wr_flags = 0;
3494 		if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3495 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3496 		tap->wr_chan_freq = htole16(rxs.c_freq);
3497 		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3498 		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3499 		tap->wr_dbm_antsignal = (int8_t)rssi;
3500 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3501 		tap->wr_tsft = desc->v1.gp2_on_air_rise;
3502 		switch ((rate_n_flags & 0xff)) {
3503 		/* CCK rates. */
3504 		case  10: tap->wr_rate =   2; break;
3505 		case  20: tap->wr_rate =   4; break;
3506 		case  55: tap->wr_rate =  11; break;
3507 		case 110: tap->wr_rate =  22; break;
3508 		/* OFDM rates. */
3509 		case 0xd: tap->wr_rate =  12; break;
3510 		case 0xf: tap->wr_rate =  18; break;
3511 		case 0x5: tap->wr_rate =  24; break;
3512 		case 0x7: tap->wr_rate =  36; break;
3513 		case 0x9: tap->wr_rate =  48; break;
3514 		case 0xb: tap->wr_rate =  72; break;
3515 		case 0x1: tap->wr_rate =  96; break;
3516 		case 0x3: tap->wr_rate = 108; break;
3517 		/* Unknown rate: should not happen. */
3518 		default:  tap->wr_rate =   0;
3519 		}
3520 	}
3521 
3522 	return true;
3523 }
3524 
3525 static bool
3526 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3527     bool stolen)
3528 {
3529 	struct ieee80211com *ic;
3530 	struct ieee80211_frame *wh;
3531 	struct ieee80211_node *ni;
3532 	bool ret;
3533 
3534 	ic = &sc->sc_ic;
3535 
3536 	ret = sc->cfg->mqrx_supported ?
3537 	    iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3538 	    iwm_rx_rx_mpdu(sc, m, offset, stolen);
3539 	if (!ret) {
3540 #if !defined(__DragonFly__)
3541 		counter_u64_add(ic->ic_ierrors, 1);
3542 #else
3543 		++sc->sc_ic.ic_ierrors;
3544 #endif
3545 		return (ret);
3546 	}
3547 
3548 	wh = mtod(m, struct ieee80211_frame *);
3549 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3550 
3551 	IWM_UNLOCK(sc);
3552 	if (ni != NULL) {
3553 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3554 #if !defined(__DragonFly__)
3555 		ieee80211_input_mimo(ni, m);
3556 #else
3557 		ieee80211_input_mimo(ni, m, NULL);
3558 #endif
3559 		ieee80211_free_node(ni);
3560 	} else {
3561 		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3562 #if !defined(__DragonFly__)
3563 		ieee80211_input_mimo_all(ic, m);
3564 #else
3565 		ieee80211_input_mimo_all(ic, m, NULL);
3566 #endif
3567 	}
3568 	IWM_LOCK(sc);
3569 
3570 	return true;
3571 }
3572 
3573 static int
3574 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3575 	struct iwm_node *in)
3576 {
3577 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3578 #if !defined(__DragonFly__)
3579 	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3580 #endif
3581 	struct ieee80211_node *ni = &in->in_ni;
3582 	struct ieee80211vap *vap = ni->ni_vap;
3583 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3584 	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3585 	boolean_t rate_matched;
3586 	uint8_t tx_resp_rate;
3587 
3588 	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3589 
3590 	/* Update rate control statistics. */
3591 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3592 	    __func__,
3593 	    (int) le16toh(tx_resp->status.status),
3594 	    (int) le16toh(tx_resp->status.sequence),
3595 	    tx_resp->frame_count,
3596 	    tx_resp->bt_kill_count,
3597 	    tx_resp->failure_rts,
3598 	    tx_resp->failure_frame,
3599 	    le32toh(tx_resp->initial_rate),
3600 	    (int) le16toh(tx_resp->wireless_media_time));
3601 
3602 	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3603 
3604 	/* For rate control, ignore frames sent at different initial rate */
3605 	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3606 
3607 	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3608 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3609 		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3610 		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3611 	}
3612 
3613 #if !defined(__DragonFly__)
3614 	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3615 		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3616 	txs->short_retries = tx_resp->failure_rts;
3617 	txs->long_retries = tx_resp->failure_frame;
3618 	if (status != IWM_TX_STATUS_SUCCESS &&
3619 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3620 		switch (status) {
3621 		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3622 			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3623 			break;
3624 		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3625 			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3626 			break;
3627 		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3628 			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3629 			break;
3630 		default:
3631 			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3632 			break;
3633 		}
3634 	} else {
3635 		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3636 	}
3637 
3638 	if (rate_matched) {
3639 		ieee80211_ratectl_tx_complete(ni, txs);
3640 
3641 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3642 		new_rate = vap->iv_bss->ni_txrate;
3643 		if (new_rate != 0 && new_rate != cur_rate) {
3644 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3645 			iwm_setrates(sc, in, rix);
3646 			iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3647 		}
3648 	}
3649 
3650 	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3651 #else
3652 	/*
3653 	 * XXX try to use old ieee80211 ABI, the new one isn't incorporated
3654 	 * into our ieee80211 yet.
3655 	 */
3656 	int failack = tx_resp->failure_frame;
3657 	int ret;
3658 
3659 	if (status != IWM_TX_STATUS_SUCCESS &&
3660 	    status != IWM_TX_STATUS_DIRECT_DONE) {
3661 		if (rate_matched) {
3662 			ieee80211_ratectl_tx_complete(vap, ni,
3663 			    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3664 		}
3665 		ret = 1;
3666 	} else {
3667 		if (rate_matched) {
3668 			ieee80211_ratectl_tx_complete(vap, ni,
3669 			    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3670 		}
3671 		ret = 0;
3672 	}
3673 
3674 	if (rate_matched) {
3675 		int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3676 		new_rate = vap->iv_bss->ni_txrate;
3677 		if (new_rate != 0 && new_rate != cur_rate) {
3678 			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3679 			iwm_setrates(sc, in, rix);
3680 		}
3681 	}
3682 
3683 	return ret;
3684 
3685 #endif
3686 }
3687 
3688 static void
3689 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3690 {
3691 	struct iwm_cmd_header *cmd_hdr;
3692 	struct iwm_tx_ring *ring;
3693 	struct iwm_tx_data *txd;
3694 	struct iwm_node *in;
3695 	struct mbuf *m;
3696 	int idx, qid, qmsk, status;
3697 
3698 	cmd_hdr = &pkt->hdr;
3699 	idx = cmd_hdr->idx;
3700 	qid = cmd_hdr->qid;
3701 
3702 	ring = &sc->txq[qid];
3703 	txd = &ring->data[idx];
3704 	in = txd->in;
3705 	m = txd->m;
3706 
3707 	KASSERT(txd->done == 0, ("txd not done"));
3708 	KASSERT(txd->in != NULL, ("txd without node"));
3709 	KASSERT(txd->m != NULL, ("txd without mbuf"));
3710 
3711 	sc->sc_tx_timer = 0;
3712 
3713 	status = iwm_rx_tx_cmd_single(sc, pkt, in);
3714 
3715 	/* Unmap and free mbuf. */
3716 	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3717 	bus_dmamap_unload(ring->data_dmat, txd->map);
3718 
3719 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3720 	    "free txd %p, in %p\n", txd, txd->in);
3721 	txd->done = 1;
3722 	txd->m = NULL;
3723 	txd->in = NULL;
3724 
3725 	ieee80211_tx_complete(&in->in_ni, m, status);
3726 
3727 	qmsk = 1 << qid;
3728 	if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3729 		sc->qfullmsk &= ~qmsk;
3730 		if (sc->qfullmsk == 0)
3731 			iwm_start(sc);
3732 	}
3733 }
3734 
3735 /*
3736  * transmit side
3737  */
3738 
3739 /*
3740  * Process a "command done" firmware notification.  This is where we wakeup
3741  * processes waiting for a synchronous command completion.
3742  * from if_iwn
3743  */
3744 static void
3745 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3746 {
3747 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3748 	struct iwm_tx_data *data;
3749 
3750 	if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3751 		return;	/* Not a command ack. */
3752 	}
3753 
3754 	/* XXX wide commands? */
3755 	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3756 	    "cmd notification type 0x%x qid %d idx %d\n",
3757 	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3758 
3759 	data = &ring->data[pkt->hdr.idx];
3760 
3761 	/* If the command was mapped in an mbuf, free it. */
3762 	if (data->m != NULL) {
3763 		bus_dmamap_sync(ring->data_dmat, data->map,
3764 		    BUS_DMASYNC_POSTWRITE);
3765 		bus_dmamap_unload(ring->data_dmat, data->map);
3766 		m_freem(data->m);
3767 		data->m = NULL;
3768 	}
3769 	wakeup(&ring->desc[pkt->hdr.idx]);
3770 
3771 	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3772 		device_printf(sc->sc_dev,
3773 		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3774 		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3775 		/* XXX call iwm_force_nmi() */
3776 	}
3777 
3778 	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3779 	ring->queued--;
3780 	if (ring->queued == 0)
3781 		iwm_pcie_clear_cmd_in_flight(sc);
3782 }
3783 
3784 #if 0
3785 /*
3786  * necessary only for block ack mode
3787  */
3788 void
3789 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3790 	uint16_t len)
3791 {
3792 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3793 	uint16_t w_val;
3794 
3795 	scd_bc_tbl = sc->sched_dma.vaddr;
3796 
3797 	len += 8; /* magic numbers came naturally from paris */
3798 	len = roundup(len, 4) / 4;
3799 
3800 	w_val = htole16(sta_id << 12 | len);
3801 
3802 	/* Update TX scheduler. */
3803 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3804 	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3805 	    BUS_DMASYNC_PREWRITE);
3806 
3807 	/* I really wonder what this is ?!? */
3808 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3809 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3810 		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3811 		    BUS_DMASYNC_PREWRITE);
3812 	}
3813 }
3814 #endif
3815 
3816 static int
3817 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3818 {
3819 	int i;
3820 
3821 	for (i = 0; i < nitems(iwm_rates); i++) {
3822 		if (iwm_rates[i].rate == rate)
3823 			return (i);
3824 	}
3825 	/* XXX error? */
3826 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3827 	    "%s: couldn't find an entry for rate=%d\n",
3828 	    __func__,
3829 	    rate);
3830 	return (0);
3831 }
3832 
3833 /*
3834  * Fill in the rate related information for a transmit command.
3835  */
3836 static const struct iwm_rate *
3837 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3838 	struct mbuf *m, struct iwm_tx_cmd *tx)
3839 {
3840 	struct ieee80211_node *ni = &in->in_ni;
3841 	struct ieee80211_frame *wh;
3842 	const struct ieee80211_txparam *tp = ni->ni_txparms;
3843 	const struct iwm_rate *rinfo;
3844 	int type;
3845 	int ridx, rate_flags;
3846 
3847 	wh = mtod(m, struct ieee80211_frame *);
3848 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3849 
3850 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3851 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3852 
3853 	if (type == IEEE80211_FC0_TYPE_MGT ||
3854 	    type == IEEE80211_FC0_TYPE_CTL ||
3855 	    (m->m_flags & M_EAPOL) != 0) {
3856 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3857 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3858 		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3859 	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3860 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3861 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3862 		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3863 	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3864 		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3865 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3866 		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3867 	} else {
3868 		/* for data frames, use RS table */
3869 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3870 		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3871 		if (ridx == -1)
3872 			ridx = 0;
3873 
3874 		/* This is the index into the programmed table */
3875 		tx->initial_rate_index = 0;
3876 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3877 	}
3878 
3879 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3880 	    "%s: frame type=%d txrate %d\n",
3881 	        __func__, type, iwm_rates[ridx].rate);
3882 
3883 	rinfo = &iwm_rates[ridx];
3884 
3885 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3886 	    __func__, ridx,
3887 	    rinfo->rate,
3888 	    !! (IWM_RIDX_IS_CCK(ridx))
3889 	    );
3890 
3891 	/* XXX TODO: hard-coded TX antenna? */
3892 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3893 		rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3894 	else
3895 		rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3896 	if (IWM_RIDX_IS_CCK(ridx))
3897 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3898 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3899 
3900 	return rinfo;
3901 }
3902 
3903 #define TB0_SIZE 16
3904 static int
3905 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3906 {
3907 	struct ieee80211com *ic = &sc->sc_ic;
3908 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3909 	struct iwm_node *in = IWM_NODE(ni);
3910 	struct iwm_tx_ring *ring;
3911 	struct iwm_tx_data *data;
3912 	struct iwm_tfd *desc;
3913 	struct iwm_device_cmd *cmd;
3914 	struct iwm_tx_cmd *tx;
3915 	struct ieee80211_frame *wh;
3916 	struct ieee80211_key *k = NULL;
3917 	struct mbuf *m1;
3918 	const struct iwm_rate *rinfo;
3919 	uint32_t flags;
3920 	u_int hdrlen;
3921 	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3922 	int nsegs;
3923 	uint8_t tid, type;
3924 	int i, totlen, error, pad;
3925 
3926 	wh = mtod(m, struct ieee80211_frame *);
3927 	hdrlen = ieee80211_anyhdrsize(wh);
3928 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3929 	tid = 0;
3930 	ring = &sc->txq[ac];
3931 	desc = &ring->desc[ring->cur];
3932 	data = &ring->data[ring->cur];
3933 
3934 	/* Fill out iwm_tx_cmd to send to the firmware */
3935 	cmd = &ring->cmd[ring->cur];
3936 	cmd->hdr.code = IWM_TX_CMD;
3937 	cmd->hdr.flags = 0;
3938 	cmd->hdr.qid = ring->qid;
3939 	cmd->hdr.idx = ring->cur;
3940 
3941 	tx = (void *)cmd->data;
3942 	memset(tx, 0, sizeof(*tx));
3943 
3944 	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3945 
3946 	/* Encrypt the frame if need be. */
3947 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3948 		/* Retrieve key for TX && do software encryption. */
3949 		k = ieee80211_crypto_encap(ni, m);
3950 		if (k == NULL) {
3951 			m_freem(m);
3952 			return (ENOBUFS);
3953 		}
3954 		/* 802.11 header may have moved. */
3955 		wh = mtod(m, struct ieee80211_frame *);
3956 	}
3957 
3958 	if (ieee80211_radiotap_active_vap(vap)) {
3959 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3960 
3961 		tap->wt_flags = 0;
3962 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3963 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3964 		tap->wt_rate = rinfo->rate;
3965 		if (k != NULL)
3966 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3967 		ieee80211_radiotap_tx(vap, m);
3968 	}
3969 
3970 	flags = 0;
3971 	totlen = m->m_pkthdr.len;
3972 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3973 		flags |= IWM_TX_CMD_FLG_ACK;
3974 	}
3975 
3976 	if (type == IEEE80211_FC0_TYPE_DATA &&
3977 	    totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3978 	    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3979 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3980 	}
3981 
3982 	tx->sta_id = IWM_STATION_ID;
3983 
3984 	if (type == IEEE80211_FC0_TYPE_MGT) {
3985 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3986 
3987 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3988 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3989 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3990 		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3991 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3992 		} else {
3993 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3994 		}
3995 	} else {
3996 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3997 	}
3998 
3999 	if (hdrlen & 3) {
4000 		/* First segment length must be a multiple of 4. */
4001 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4002 		tx->offload_assist |= htole16(1 << IWM_TX_CMD_OFFLD_PAD);
4003 		pad = 4 - (hdrlen & 3);
4004 	} else {
4005 		tx->offload_assist = 0;
4006 		pad = 0;
4007 	}
4008 
4009 	tx->len = htole16(totlen);
4010 	tx->tid_tspec = tid;
4011 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4012 
4013 	/* Set physical address of "scratch area". */
4014 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4015 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4016 
4017 	/* Copy 802.11 header in TX command. */
4018 	memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
4019 
4020 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4021 
4022 	tx->sec_ctl = 0;
4023 	tx->tx_flags |= htole32(flags);
4024 
4025 	/* Trim 802.11 header. */
4026 	m_adj(m, hdrlen);
4027 #if !defined(__DragonFly__)
4028 	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4029 	    segs, &nsegs, BUS_DMA_NOWAIT);
4030 #else
4031 	error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
4032 	    segs, IWM_MAX_SCATTER - 2,
4033 	    &nsegs, BUS_DMA_NOWAIT);
4034 #endif
4035 	if (error != 0) {
4036 		if (error != EFBIG) {
4037 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
4038 			    error);
4039 			m_freem(m);
4040 			return error;
4041 		}
4042 		/* Too many DMA segments, linearize mbuf. */
4043 #if !defined(__DragonFly__)
4044 		m1 = m_collapse(m, M_WAITOK, IWM_MAX_SCATTER - 2);
4045 #else
4046 		m1 = m_defrag(m, M_NOWAIT);
4047 #endif
4048 		if (m1 == NULL) {
4049 			device_printf(sc->sc_dev,
4050 			    "%s: could not defrag mbuf\n", __func__);
4051 			m_freem(m);
4052 			return (ENOBUFS);
4053 		}
4054 		m = m1;
4055 
4056 #if !defined(__DragonFly__)
4057 		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4058 		    segs, &nsegs, BUS_DMA_NOWAIT);
4059 #else
4060 		error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map,
4061 		    &m, segs, IWM_MAX_SCATTER - 2, &nsegs, BUS_DMA_NOWAIT);
4062 #endif
4063 		if (error != 0) {
4064 			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
4065 			    error);
4066 			m_freem(m);
4067 			return error;
4068 		}
4069 	}
4070 	data->m = m;
4071 	data->in = in;
4072 	data->done = 0;
4073 
4074 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4075 	    "sending txd %p, in %p\n", data, data->in);
4076 	KASSERT(data->in != NULL, ("node is NULL"));
4077 
4078 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4079 	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
4080 	    ring->qid, ring->cur, totlen, nsegs,
4081 	    le32toh(tx->tx_flags),
4082 	    le32toh(tx->rate_n_flags),
4083 	    tx->initial_rate_index
4084 	    );
4085 
4086 	/* Fill TX descriptor. */
4087 	memset(desc, 0, sizeof(*desc));
4088 	desc->num_tbs = 2 + nsegs;
4089 
4090 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4091 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
4092 	    (TB0_SIZE << 4));
4093 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4094 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
4095 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
4096 	    hdrlen + pad - TB0_SIZE) << 4));
4097 
4098 	/* Other DMA segments are for data payload. */
4099 	for (i = 0; i < nsegs; i++) {
4100 		seg = &segs[i];
4101 		desc->tbs[i + 2].lo = htole32(seg->ds_addr);
4102 		desc->tbs[i + 2].hi_n_len =
4103 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
4104 		    (seg->ds_len << 4);
4105 	}
4106 
4107 	bus_dmamap_sync(ring->data_dmat, data->map,
4108 	    BUS_DMASYNC_PREWRITE);
4109 	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
4110 	    BUS_DMASYNC_PREWRITE);
4111 	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4112 	    BUS_DMASYNC_PREWRITE);
4113 
4114 #if 0
4115 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4116 #endif
4117 
4118 	/* Kick TX ring. */
4119 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4120 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4121 
4122 	/* Mark TX ring as full if we reach a certain threshold. */
4123 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4124 		sc->qfullmsk |= 1 << ring->qid;
4125 	}
4126 
4127 	return 0;
4128 }
4129 
4130 static int
4131 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4132     const struct ieee80211_bpf_params *params)
4133 {
4134 	struct ieee80211com *ic = ni->ni_ic;
4135 	struct iwm_softc *sc = ic->ic_softc;
4136 	int error = 0;
4137 
4138 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4139 	    "->%s begin\n", __func__);
4140 
4141 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4142 		m_freem(m);
4143 		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4144 		    "<-%s not RUNNING\n", __func__);
4145 		return (ENETDOWN);
4146         }
4147 
4148 	IWM_LOCK(sc);
4149 	/* XXX fix this */
4150         if (params == NULL) {
4151 		error = iwm_tx(sc, m, ni, 0);
4152 	} else {
4153 		error = iwm_tx(sc, m, ni, 0);
4154 	}
4155 	if (sc->sc_tx_timer == 0)
4156 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4157 	sc->sc_tx_timer = 5;
4158 	IWM_UNLOCK(sc);
4159 
4160         return (error);
4161 }
4162 
4163 /*
4164  * mvm/tx.c
4165  */
4166 
4167 /*
4168  * Note that there are transports that buffer frames before they reach
4169  * the firmware. This means that after flush_tx_path is called, the
4170  * queue might not be empty. The race-free way to handle this is to:
4171  * 1) set the station as draining
4172  * 2) flush the Tx path
4173  * 3) wait for the transport queues to be empty
4174  */
4175 int
4176 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
4177 {
4178 	int ret;
4179 	struct iwm_tx_path_flush_cmd flush_cmd = {
4180 		.queues_ctl = htole32(tfd_msk),
4181 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4182 	};
4183 
4184 	ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4185 	    sizeof(flush_cmd), &flush_cmd);
4186 	if (ret)
4187                 device_printf(sc->sc_dev,
4188 		    "Flushing tx queue failed: %d\n", ret);
4189 	return ret;
4190 }
4191 
4192 /*
4193  * BEGIN mvm/quota.c
4194  */
4195 
4196 static int
4197 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4198 {
4199 	struct iwm_time_quota_cmd cmd;
4200 	int i, idx, ret, num_active_macs, quota, quota_rem;
4201 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4202 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4203 	uint16_t id;
4204 
4205 	memset(&cmd, 0, sizeof(cmd));
4206 
4207 	/* currently, PHY ID == binding ID */
4208 	if (ivp) {
4209 		id = ivp->phy_ctxt->id;
4210 		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4211 		colors[id] = ivp->phy_ctxt->color;
4212 
4213 		if (1)
4214 			n_ifs[id] = 1;
4215 	}
4216 
4217 	/*
4218 	 * The FW's scheduling session consists of
4219 	 * IWM_MAX_QUOTA fragments. Divide these fragments
4220 	 * equally between all the bindings that require quota
4221 	 */
4222 	num_active_macs = 0;
4223 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4224 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4225 		num_active_macs += n_ifs[i];
4226 	}
4227 
4228 	quota = 0;
4229 	quota_rem = 0;
4230 	if (num_active_macs) {
4231 		quota = IWM_MAX_QUOTA / num_active_macs;
4232 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
4233 	}
4234 
4235 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4236 		if (colors[i] < 0)
4237 			continue;
4238 
4239 		cmd.quotas[idx].id_and_color =
4240 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4241 
4242 		if (n_ifs[i] <= 0) {
4243 			cmd.quotas[idx].quota = htole32(0);
4244 			cmd.quotas[idx].max_duration = htole32(0);
4245 		} else {
4246 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4247 			cmd.quotas[idx].max_duration = htole32(0);
4248 		}
4249 		idx++;
4250 	}
4251 
4252 	/* Give the remainder of the session to the first binding */
4253 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4254 
4255 	ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4256 	    sizeof(cmd), &cmd);
4257 	if (ret)
4258 		device_printf(sc->sc_dev,
4259 		    "%s: Failed to send quota: %d\n", __func__, ret);
4260 	return ret;
4261 }
4262 
4263 /*
4264  * END mvm/quota.c
4265  */
4266 
4267 /*
4268  * ieee80211 routines
4269  */
4270 
4271 /*
4272  * Change to AUTH state in 80211 state machine.  Roughly matches what
4273  * Linux does in bss_info_changed().
4274  */
4275 static int
4276 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4277 {
4278 	struct ieee80211_node *ni;
4279 	struct iwm_node *in;
4280 	struct iwm_vap *iv = IWM_VAP(vap);
4281 	uint32_t duration;
4282 	int error;
4283 
4284 	/*
4285 	 * XXX i have a feeling that the vap node is being
4286 	 * freed from underneath us. Grr.
4287 	 */
4288 	ni = ieee80211_ref_node(vap->iv_bss);
4289 	in = IWM_NODE(ni);
4290 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4291 	    "%s: called; vap=%p, bss ni=%p\n",
4292 	    __func__,
4293 	    vap,
4294 	    ni);
4295 	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4296 	    __func__, ether_sprintf(ni->ni_bssid));
4297 
4298 	in->in_assoc = 0;
4299 	iv->iv_auth = 1;
4300 
4301 	/*
4302 	 * Firmware bug - it'll crash if the beacon interval is less
4303 	 * than 16. We can't avoid connecting at all, so refuse the
4304 	 * station state change, this will cause net80211 to abandon
4305 	 * attempts to connect to this AP, and eventually wpa_s will
4306 	 * blacklist the AP...
4307 	 */
4308 	if (ni->ni_intval < 16) {
4309 		device_printf(sc->sc_dev,
4310 		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4311 		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4312 		error = EINVAL;
4313 		goto out;
4314 	}
4315 
4316 	error = iwm_allow_mcast(vap, sc);
4317 	if (error) {
4318 		device_printf(sc->sc_dev,
4319 		    "%s: failed to set multicast\n", __func__);
4320 		goto out;
4321 	}
4322 
4323 	/*
4324 	 * This is where it deviates from what Linux does.
4325 	 *
4326 	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4327 	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4328 	 * and always does a mac_ctx_changed().
4329 	 *
4330 	 * The openbsd port doesn't attempt to do that - it reset things
4331 	 * at odd states and does the add here.
4332 	 *
4333 	 * So, until the state handling is fixed (ie, we never reset
4334 	 * the NIC except for a firmware failure, which should drag
4335 	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4336 	 * contexts that are required), let's do a dirty hack here.
4337 	 */
4338 	if (iv->is_uploaded) {
4339 		if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4340 			device_printf(sc->sc_dev,
4341 			    "%s: failed to update MAC\n", __func__);
4342 			goto out;
4343 		}
4344 	} else {
4345 		if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4346 			device_printf(sc->sc_dev,
4347 			    "%s: failed to add MAC\n", __func__);
4348 			goto out;
4349 		}
4350 	}
4351 	sc->sc_firmware_state = 1;
4352 
4353 	if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4354 	    in->in_ni.ni_chan, 1, 1)) != 0) {
4355 		device_printf(sc->sc_dev,
4356 		    "%s: failed update phy ctxt\n", __func__);
4357 		goto out;
4358 	}
4359 	iv->phy_ctxt = &sc->sc_phyctxt[0];
4360 
4361 	if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4362 		device_printf(sc->sc_dev,
4363 		    "%s: binding update cmd\n", __func__);
4364 		goto out;
4365 	}
4366 	sc->sc_firmware_state = 2;
4367 	/*
4368 	 * Authentication becomes unreliable when powersaving is left enabled
4369 	 * here. Powersaving will be activated again when association has
4370 	 * finished or is aborted.
4371 	 */
4372 	iv->ps_disabled = TRUE;
4373 	error = iwm_power_update_mac(sc);
4374 	iv->ps_disabled = FALSE;
4375 	if (error != 0) {
4376 		device_printf(sc->sc_dev,
4377 		    "%s: failed to update power management\n",
4378 		    __func__);
4379 		goto out;
4380 	}
4381 	if ((error = iwm_add_sta(sc, in)) != 0) {
4382 		device_printf(sc->sc_dev,
4383 		    "%s: failed to add sta\n", __func__);
4384 		goto out;
4385 	}
4386 	sc->sc_firmware_state = 3;
4387 
4388 	/*
4389 	 * Prevent the FW from wandering off channel during association
4390 	 * by "protecting" the session with a time event.
4391 	 */
4392 	/* XXX duration is in units of TU, not MS */
4393 	duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4394 	iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4395 
4396 	error = 0;
4397 out:
4398 	if (error != 0)
4399 		iv->iv_auth = 0;
4400 	ieee80211_free_node(ni);
4401 	return (error);
4402 }
4403 
4404 static struct ieee80211_node *
4405 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4406 {
4407 	return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4408 	    M_WAITOK | M_ZERO);
4409 }
4410 
4411 static uint8_t
4412 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4413 {
4414 	uint8_t plcp = rate_n_flags & 0xff;
4415 	int i;
4416 
4417 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4418 		if (iwm_rates[i].plcp == plcp)
4419 			return iwm_rates[i].rate;
4420 	}
4421 	return 0;
4422 }
4423 
4424 uint8_t
4425 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4426 {
4427 	int i;
4428 	uint8_t rval;
4429 
4430 	for (i = 0; i < rs->rs_nrates; i++) {
4431 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4432 		if (rval == iwm_rates[ridx].rate)
4433 			return rs->rs_rates[i];
4434 	}
4435 
4436 	return 0;
4437 }
4438 
4439 static int
4440 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4441 {
4442 	int i;
4443 
4444 	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4445 		if (iwm_rates[i].rate == rate)
4446 			return i;
4447 	}
4448 
4449 	device_printf(sc->sc_dev,
4450 	    "%s: WARNING: device rate for %u not found!\n",
4451 	    __func__, rate);
4452 
4453 	return -1;
4454 }
4455 
4456 
4457 static void
4458 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4459 {
4460 	struct ieee80211_node *ni = &in->in_ni;
4461 	struct iwm_lq_cmd *lq = &in->in_lq;
4462 	struct ieee80211_rateset *rs = &ni->ni_rates;
4463 	int nrates = rs->rs_nrates;
4464 	int i, ridx, tab = 0;
4465 //	int txant = 0;
4466 
4467 	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4468 
4469 	if (nrates > nitems(lq->rs_table)) {
4470 		device_printf(sc->sc_dev,
4471 		    "%s: node supports %d rates, driver handles "
4472 		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4473 		return;
4474 	}
4475 	if (nrates == 0) {
4476 		device_printf(sc->sc_dev,
4477 		    "%s: node supports 0 rates, odd!\n", __func__);
4478 		return;
4479 	}
4480 	nrates = imin(rix + 1, nrates);
4481 
4482 	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4483 	    "%s: nrates=%d\n", __func__, nrates);
4484 
4485 	/* then construct a lq_cmd based on those */
4486 	memset(lq, 0, sizeof(*lq));
4487 	lq->sta_id = IWM_STATION_ID;
4488 
4489 	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4490 	if (ni->ni_flags & IEEE80211_NODE_HT)
4491 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4492 
4493 	/*
4494 	 * are these used? (we don't do SISO or MIMO)
4495 	 * need to set them to non-zero, though, or we get an error.
4496 	 */
4497 	lq->single_stream_ant_msk = 1;
4498 	lq->dual_stream_ant_msk = 1;
4499 
4500 	/*
4501 	 * Build the actual rate selection table.
4502 	 * The lowest bits are the rates.  Additionally,
4503 	 * CCK needs bit 9 to be set.  The rest of the bits
4504 	 * we add to the table select the tx antenna
4505 	 * Note that we add the rates in the highest rate first
4506 	 * (opposite of ni_rates).
4507 	 */
4508 	for (i = 0; i < nrates; i++) {
4509 		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4510 		int nextant;
4511 
4512 		/* Map 802.11 rate to HW rate index. */
4513 		ridx = iwm_rate2ridx(sc, rate);
4514 		if (ridx == -1)
4515 			continue;
4516 
4517 #if 0
4518 		if (txant == 0)
4519 			txant = iwm_get_valid_tx_ant(sc);
4520 		nextant = 1<<(ffs(txant)-1);
4521 		txant &= ~nextant;
4522 #else
4523 		nextant = iwm_get_valid_tx_ant(sc);
4524 #endif
4525 		tab = iwm_rates[ridx].plcp;
4526 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4527 		if (IWM_RIDX_IS_CCK(ridx))
4528 			tab |= IWM_RATE_MCS_CCK_MSK;
4529 		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4530 		    "station rate i=%d, rate=%d, hw=%x\n",
4531 		    i, iwm_rates[ridx].rate, tab);
4532 		lq->rs_table[i] = htole32(tab);
4533 	}
4534 	/* then fill the rest with the lowest possible rate */
4535 	for (i = nrates; i < nitems(lq->rs_table); i++) {
4536 		KASSERT(tab != 0, ("invalid tab"));
4537 		lq->rs_table[i] = htole32(tab);
4538 	}
4539 }
4540 
4541 static int
4542 iwm_media_change(struct ifnet *ifp)
4543 {
4544 	struct ieee80211vap *vap = ifp->if_softc;
4545 	struct ieee80211com *ic = vap->iv_ic;
4546 	struct iwm_softc *sc = ic->ic_softc;
4547 	int error;
4548 
4549 	error = ieee80211_media_change(ifp);
4550 	if (error != ENETRESET)
4551 		return error;
4552 
4553 	IWM_LOCK(sc);
4554 	if (ic->ic_nrunning > 0) {
4555 		iwm_stop(sc);
4556 		iwm_init(sc);
4557 	}
4558 	IWM_UNLOCK(sc);
4559 	return error;
4560 }
4561 
4562 static void
4563 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4564 {
4565 	struct iwm_vap *ivp = IWM_VAP(vap);
4566 	int error;
4567 
4568 	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4569 	sc->sc_tx_timer = 0;
4570 
4571 	ivp->iv_auth = 0;
4572 	if (sc->sc_firmware_state == 3) {
4573 		iwm_xmit_queue_drain(sc);
4574 //		iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4575 		error = iwm_rm_sta(sc, vap, TRUE);
4576 		if (error) {
4577 			device_printf(sc->sc_dev,
4578 			    "%s: Failed to remove station: %d\n",
4579 			    __func__, error);
4580 		}
4581 	}
4582 	if (sc->sc_firmware_state == 3) {
4583 		error = iwm_mac_ctxt_changed(sc, vap);
4584 		if (error) {
4585 			device_printf(sc->sc_dev,
4586 			    "%s: Failed to change mac context: %d\n",
4587 			    __func__, error);
4588 		}
4589 	}
4590 	if (sc->sc_firmware_state == 3) {
4591 		error = iwm_sf_update(sc, vap, FALSE);
4592 		if (error) {
4593 			device_printf(sc->sc_dev,
4594 			    "%s: Failed to update smart FIFO: %d\n",
4595 			    __func__, error);
4596 		}
4597 	}
4598 	if (sc->sc_firmware_state == 3) {
4599 		error = iwm_rm_sta_id(sc, vap);
4600 		if (error) {
4601 			device_printf(sc->sc_dev,
4602 			    "%s: Failed to remove station id: %d\n",
4603 			    __func__, error);
4604 		}
4605 	}
4606 	if (sc->sc_firmware_state == 3) {
4607 		error = iwm_update_quotas(sc, NULL);
4608 		if (error) {
4609 			device_printf(sc->sc_dev,
4610 			    "%s: Failed to update PHY quota: %d\n",
4611 			    __func__, error);
4612 		}
4613 	}
4614 	if (sc->sc_firmware_state == 3) {
4615 		/* XXX Might need to specify bssid correctly. */
4616 		error = iwm_mac_ctxt_changed(sc, vap);
4617 		if (error) {
4618 			device_printf(sc->sc_dev,
4619 			    "%s: Failed to change mac context: %d\n",
4620 			    __func__, error);
4621 		}
4622 	}
4623 	if (sc->sc_firmware_state == 3) {
4624 		sc->sc_firmware_state = 2;
4625 	}
4626 	if (sc->sc_firmware_state > 1) {
4627 		error = iwm_binding_remove_vif(sc, ivp);
4628 		if (error) {
4629 			device_printf(sc->sc_dev,
4630 			    "%s: Failed to remove channel ctx: %d\n",
4631 			    __func__, error);
4632 		}
4633 	}
4634 	if (sc->sc_firmware_state > 1) {
4635 		sc->sc_firmware_state = 1;
4636 	}
4637 	ivp->phy_ctxt = NULL;
4638 	if (sc->sc_firmware_state > 0) {
4639 		error = iwm_mac_ctxt_changed(sc, vap);
4640 		if (error) {
4641 			device_printf(sc->sc_dev,
4642 			    "%s: Failed to change mac context: %d\n",
4643 			    __func__, error);
4644 		}
4645 	}
4646 	if (sc->sc_firmware_state > 0) {
4647 		error = iwm_power_update_mac(sc);
4648 		if (error != 0) {
4649 			device_printf(sc->sc_dev,
4650 			    "%s: failed to update power management\n",
4651 			    __func__);
4652 		}
4653 	}
4654 	sc->sc_firmware_state = 0;
4655 }
4656 
4657 static int
4658 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4659 {
4660 	struct iwm_vap *ivp = IWM_VAP(vap);
4661 	struct ieee80211com *ic = vap->iv_ic;
4662 	struct iwm_softc *sc = ic->ic_softc;
4663 	struct iwm_node *in;
4664 	int error;
4665 
4666 	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4667 	    "switching state %s -> %s arg=0x%x\n",
4668 	    ieee80211_state_name[vap->iv_state],
4669 	    ieee80211_state_name[nstate],
4670 	    arg);
4671 
4672 	IEEE80211_UNLOCK(ic);
4673 	IWM_LOCK(sc);
4674 
4675 	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4676 	    (nstate == IEEE80211_S_AUTH ||
4677 	     nstate == IEEE80211_S_ASSOC ||
4678 	     nstate == IEEE80211_S_RUN)) {
4679 		/* Stop blinking for a scan, when authenticating. */
4680 		iwm_led_blink_stop(sc);
4681 	}
4682 
4683 	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4684 		iwm_led_disable(sc);
4685 		/* disable beacon filtering if we're hopping out of RUN */
4686 		iwm_disable_beacon_filter(sc);
4687 		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4688 			in->in_assoc = 0;
4689 	}
4690 
4691 	if ((vap->iv_state == IEEE80211_S_AUTH ||
4692 	     vap->iv_state == IEEE80211_S_ASSOC ||
4693 	     vap->iv_state == IEEE80211_S_RUN) &&
4694 	    (nstate == IEEE80211_S_INIT ||
4695 	     nstate == IEEE80211_S_SCAN ||
4696 	     nstate == IEEE80211_S_AUTH)) {
4697 		iwm_stop_session_protection(sc, ivp);
4698 	}
4699 
4700 	if ((vap->iv_state == IEEE80211_S_RUN ||
4701 	     vap->iv_state == IEEE80211_S_ASSOC) &&
4702 	    nstate == IEEE80211_S_INIT) {
4703 		/*
4704 		 * In this case, iv_newstate() wants to send an 80211 frame on
4705 		 * the network that we are leaving. So we need to call it,
4706 		 * before tearing down all the firmware state.
4707 		 */
4708 		IWM_UNLOCK(sc);
4709 		IEEE80211_LOCK(ic);
4710 		ivp->iv_newstate(vap, nstate, arg);
4711 		IEEE80211_UNLOCK(ic);
4712 		IWM_LOCK(sc);
4713 		iwm_bring_down_firmware(sc, vap);
4714 		IWM_UNLOCK(sc);
4715 		IEEE80211_LOCK(ic);
4716 		return 0;
4717 	}
4718 
4719 	switch (nstate) {
4720 	case IEEE80211_S_INIT:
4721 	case IEEE80211_S_SCAN:
4722 		break;
4723 
4724 	case IEEE80211_S_AUTH:
4725 		iwm_bring_down_firmware(sc, vap);
4726 		if ((error = iwm_auth(vap, sc)) != 0) {
4727 			device_printf(sc->sc_dev,
4728 			    "%s: could not move to auth state: %d\n",
4729 			    __func__, error);
4730 			iwm_bring_down_firmware(sc, vap);
4731 			IWM_UNLOCK(sc);
4732 			IEEE80211_LOCK(ic);
4733 			return 1;
4734 		}
4735 		break;
4736 
4737 	case IEEE80211_S_ASSOC:
4738 		/*
4739 		 * EBS may be disabled due to previous failures reported by FW.
4740 		 * Reset EBS status here assuming environment has been changed.
4741 		 */
4742 		sc->last_ebs_successful = TRUE;
4743 		break;
4744 
4745 	case IEEE80211_S_RUN:
4746 		in = IWM_NODE(vap->iv_bss);
4747 		/* Update the association state, now we have it all */
4748 		/* (eg associd comes in at this point */
4749 		error = iwm_update_sta(sc, in);
4750 		if (error != 0) {
4751 			device_printf(sc->sc_dev,
4752 			    "%s: failed to update STA\n", __func__);
4753 			IWM_UNLOCK(sc);
4754 			IEEE80211_LOCK(ic);
4755 			return error;
4756 		}
4757 		in->in_assoc = 1;
4758 		error = iwm_mac_ctxt_changed(sc, vap);
4759 		if (error != 0) {
4760 			device_printf(sc->sc_dev,
4761 			    "%s: failed to update MAC: %d\n", __func__, error);
4762 		}
4763 
4764 		iwm_sf_update(sc, vap, FALSE);
4765 		iwm_enable_beacon_filter(sc, ivp);
4766 		iwm_power_update_mac(sc);
4767 		iwm_update_quotas(sc, ivp);
4768 		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4769 		iwm_setrates(sc, in, rix);
4770 
4771 		if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4772 			device_printf(sc->sc_dev,
4773 			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4774 		}
4775 
4776 		iwm_led_enable(sc);
4777 		break;
4778 
4779 	default:
4780 		break;
4781 	}
4782 	IWM_UNLOCK(sc);
4783 	IEEE80211_LOCK(ic);
4784 
4785 	return (ivp->iv_newstate(vap, nstate, arg));
4786 }
4787 
4788 void
4789 iwm_endscan_cb(void *arg, int pending)
4790 {
4791 	struct iwm_softc *sc = arg;
4792 	struct ieee80211com *ic = &sc->sc_ic;
4793 
4794 	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4795 	    "%s: scan ended\n",
4796 	    __func__);
4797 
4798 	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4799 }
4800 
4801 static int
4802 iwm_send_bt_init_conf(struct iwm_softc *sc)
4803 {
4804 	struct iwm_bt_coex_cmd bt_cmd;
4805 
4806 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4807 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4808 
4809 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4810 	    &bt_cmd);
4811 }
4812 
4813 static boolean_t
4814 iwm_is_lar_supported(struct iwm_softc *sc)
4815 {
4816 	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4817 	boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4818 
4819 	if (iwm_lar_disable)
4820 		return FALSE;
4821 
4822 	/*
4823 	 * Enable LAR only if it is supported by the FW (TLV) &&
4824 	 * enabled in the NVM
4825 	 */
4826 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4827 		return nvm_lar && tlv_lar;
4828 	else
4829 		return tlv_lar;
4830 }
4831 
4832 static boolean_t
4833 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4834 {
4835 	return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4836 	    iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4837 }
4838 
4839 static int
4840 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4841 {
4842 	struct iwm_mcc_update_cmd mcc_cmd;
4843 	struct iwm_host_cmd hcmd = {
4844 		.id = IWM_MCC_UPDATE_CMD,
4845 		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4846 		.data = { &mcc_cmd },
4847 	};
4848 	int ret;
4849 #ifdef IWM_DEBUG
4850 	struct iwm_rx_packet *pkt;
4851 	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4852 	struct iwm_mcc_update_resp *mcc_resp;
4853 	int n_channels;
4854 	uint16_t mcc;
4855 #endif
4856 	int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4857 
4858 	if (!iwm_is_lar_supported(sc)) {
4859 		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4860 		    __func__);
4861 		return 0;
4862 	}
4863 
4864 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4865 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4866 	if (iwm_is_wifi_mcc_supported(sc))
4867 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4868 	else
4869 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4870 
4871 	if (resp_v2)
4872 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4873 	else
4874 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4875 
4876 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4877 	    "send MCC update to FW with '%c%c' src = %d\n",
4878 	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4879 
4880 	ret = iwm_send_cmd(sc, &hcmd);
4881 	if (ret)
4882 		return ret;
4883 
4884 #ifdef IWM_DEBUG
4885 	pkt = hcmd.resp_pkt;
4886 
4887 	/* Extract MCC response */
4888 	if (resp_v2) {
4889 		mcc_resp = (void *)pkt->data;
4890 		mcc = mcc_resp->mcc;
4891 		n_channels =  le32toh(mcc_resp->n_channels);
4892 	} else {
4893 		mcc_resp_v1 = (void *)pkt->data;
4894 		mcc = mcc_resp_v1->mcc;
4895 		n_channels =  le32toh(mcc_resp_v1->n_channels);
4896 	}
4897 
4898 	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4899 	if (mcc == 0)
4900 		mcc = 0x3030;  /* "00" - world */
4901 
4902 	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4903 	    "regulatory domain '%c%c' (%d channels available)\n",
4904 	    mcc >> 8, mcc & 0xff, n_channels);
4905 #endif
4906 	iwm_free_resp(sc, &hcmd);
4907 
4908 	return 0;
4909 }
4910 
4911 static void
4912 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4913 {
4914 	struct iwm_host_cmd cmd = {
4915 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4916 		.len = { sizeof(uint32_t), },
4917 		.data = { &backoff, },
4918 	};
4919 
4920 	if (iwm_send_cmd(sc, &cmd) != 0) {
4921 		device_printf(sc->sc_dev,
4922 		    "failed to change thermal tx backoff\n");
4923 	}
4924 }
4925 
4926 static int
4927 iwm_init_hw(struct iwm_softc *sc)
4928 {
4929 	struct ieee80211com *ic = &sc->sc_ic;
4930 	int error, i, ac;
4931 
4932 	sc->sf_state = IWM_SF_UNINIT;
4933 
4934 	if ((error = iwm_start_hw(sc)) != 0) {
4935 		kprintf("iwm_start_hw: failed %d\n", error);
4936 		return error;
4937 	}
4938 
4939 	if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4940 		kprintf("iwm_run_init_ucode: failed %d\n", error);
4941 		return error;
4942 	}
4943 
4944 	/*
4945 	 * should stop and start HW since that INIT
4946 	 * image just loaded
4947 	 */
4948 	iwm_stop_device(sc);
4949 	sc->sc_ps_disabled = FALSE;
4950 	if ((error = iwm_start_hw(sc)) != 0) {
4951 		device_printf(sc->sc_dev, "could not initialize hardware\n");
4952 		return error;
4953 	}
4954 
4955 	/* omstart, this time with the regular firmware */
4956 	error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4957 	if (error) {
4958 		device_printf(sc->sc_dev, "could not load firmware\n");
4959 		goto error;
4960 	}
4961 
4962 	error = iwm_sf_update(sc, NULL, FALSE);
4963 	if (error)
4964 		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4965 
4966 	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4967 		device_printf(sc->sc_dev, "bt init conf failed\n");
4968 		goto error;
4969 	}
4970 
4971 	error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4972 	if (error != 0) {
4973 		device_printf(sc->sc_dev, "antenna config failed\n");
4974 		goto error;
4975 	}
4976 
4977 	/* Send phy db control command and then phy db calibration */
4978 	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4979 		goto error;
4980 
4981 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4982 		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4983 		goto error;
4984 	}
4985 
4986 	/* Add auxiliary station for scanning */
4987 	if ((error = iwm_add_aux_sta(sc)) != 0) {
4988 		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4989 		goto error;
4990 	}
4991 
4992 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4993 		/*
4994 		 * The channel used here isn't relevant as it's
4995 		 * going to be overwritten in the other flows.
4996 		 * For now use the first channel we have.
4997 		 */
4998 		if ((error = iwm_phy_ctxt_add(sc,
4999 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5000 			goto error;
5001 	}
5002 
5003 	/* Initialize tx backoffs to the minimum. */
5004 	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
5005 		iwm_tt_tx_backoff(sc, 0);
5006 
5007 	if (iwm_config_ltr(sc) != 0)
5008 		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
5009 
5010 	error = iwm_power_update_device(sc);
5011 	if (error)
5012 		goto error;
5013 
5014 	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
5015 		goto error;
5016 
5017 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
5018 		if ((error = iwm_config_umac_scan(sc)) != 0)
5019 			goto error;
5020 	}
5021 
5022 	/* Enable Tx queues. */
5023 	for (ac = 0; ac < WME_NUM_AC; ac++) {
5024 		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
5025 		    iwm_ac_to_tx_fifo[ac]);
5026 		if (error)
5027 			goto error;
5028 	}
5029 
5030 	if ((error = iwm_disable_beacon_filter(sc)) != 0) {
5031 		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
5032 		goto error;
5033 	}
5034 
5035 	return 0;
5036 
5037  error:
5038 	iwm_stop_device(sc);
5039 	return error;
5040 }
5041 
5042 /* Allow multicast from our BSSID. */
5043 static int
5044 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
5045 {
5046 	struct ieee80211_node *ni = vap->iv_bss;
5047 	struct iwm_mcast_filter_cmd *cmd;
5048 	size_t size;
5049 	int error;
5050 
5051 	size = roundup(sizeof(*cmd), 4);
5052 	cmd = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
5053 	if (cmd == NULL)
5054 		return ENOMEM;
5055 	cmd->filter_own = 1;
5056 	cmd->port_id = 0;
5057 	cmd->count = 0;
5058 	cmd->pass_all = 1;
5059 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5060 
5061 	error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5062 	    IWM_CMD_SYNC, size, cmd);
5063 	kfree(cmd, M_DEVBUF);
5064 
5065 	return (error);
5066 }
5067 
5068 /*
5069  * ifnet interfaces
5070  */
5071 
5072 static void
5073 iwm_init(struct iwm_softc *sc)
5074 {
5075 	int error;
5076 
5077 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5078 		return;
5079 	}
5080 	sc->sc_generation++;
5081 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
5082 
5083 	if ((error = iwm_init_hw(sc)) != 0) {
5084 		kprintf("iwm_init_hw failed %d\n", error);
5085 		iwm_stop(sc);
5086 		return;
5087 	}
5088 
5089 	/*
5090 	 * Ok, firmware loaded and we are jogging
5091 	 */
5092 	sc->sc_flags |= IWM_FLAG_HW_INITED;
5093 }
5094 
5095 static int
5096 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5097 {
5098 	struct iwm_softc *sc;
5099 	int error;
5100 
5101 	sc = ic->ic_softc;
5102 
5103 	IWM_LOCK(sc);
5104 	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5105 		IWM_UNLOCK(sc);
5106 		return (ENXIO);
5107 	}
5108 	error = mbufq_enqueue(&sc->sc_snd, m);
5109 	if (error) {
5110 		IWM_UNLOCK(sc);
5111 		return (error);
5112 	}
5113 	iwm_start(sc);
5114 	IWM_UNLOCK(sc);
5115 	return (0);
5116 }
5117 
5118 /*
5119  * Dequeue packets from sendq and call send.
5120  */
5121 static void
5122 iwm_start(struct iwm_softc *sc)
5123 {
5124 	struct ieee80211_node *ni;
5125 	struct mbuf *m;
5126 	int ac = 0;
5127 
5128 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5129 	while (sc->qfullmsk == 0 &&
5130 		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5131 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5132 		if (iwm_tx(sc, m, ni, ac) != 0) {
5133 			if_inc_counter(ni->ni_vap->iv_ifp,
5134 			    IFCOUNTER_OERRORS, 1);
5135 			ieee80211_free_node(ni);
5136 			continue;
5137 		}
5138 		if (sc->sc_tx_timer == 0) {
5139 			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
5140 			    sc);
5141 		}
5142 		sc->sc_tx_timer = 15;
5143 	}
5144 	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5145 }
5146 
5147 static void
5148 iwm_stop(struct iwm_softc *sc)
5149 {
5150 
5151 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5152 	sc->sc_flags |= IWM_FLAG_STOPPED;
5153 	sc->sc_generation++;
5154 	iwm_led_blink_stop(sc);
5155 	sc->sc_tx_timer = 0;
5156 	iwm_stop_device(sc);
5157 	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5158 }
5159 
5160 static void
5161 iwm_watchdog(void *arg)
5162 {
5163 	struct iwm_softc *sc = arg;
5164 	struct ieee80211com *ic = &sc->sc_ic;
5165 
5166 	if (sc->sc_attached == 0)
5167 		return;
5168 
5169 	if (sc->sc_tx_timer > 0) {
5170 		if (--sc->sc_tx_timer == 0) {
5171 			device_printf(sc->sc_dev, "device timeout\n");
5172 #ifdef IWM_DEBUG
5173 			iwm_nic_error(sc);
5174 #endif
5175 			ieee80211_restart_all(ic);
5176 #if defined(__DragonFly__)
5177 			++sc->sc_ic.ic_oerrors;
5178 #else
5179 			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5180 #endif
5181 			return;
5182 		}
5183 		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5184 	}
5185 }
5186 
5187 static void
5188 iwm_parent(struct ieee80211com *ic)
5189 {
5190 	struct iwm_softc *sc = ic->ic_softc;
5191 	int startall = 0;
5192 
5193 	IWM_LOCK(sc);
5194 	if (ic->ic_nrunning > 0) {
5195 		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5196 			iwm_init(sc);
5197 			startall = 1;
5198 		}
5199 	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5200 		iwm_stop(sc);
5201 	IWM_UNLOCK(sc);
5202 	if (startall)
5203 		ieee80211_start_all(ic);
5204 }
5205 
5206 /*
5207  * The interrupt side of things
5208  */
5209 
5210 /*
5211  * error dumping routines are from iwlwifi/mvm/utils.c
5212  */
5213 
5214 /*
5215  * Note: This structure is read from the device with IO accesses,
5216  * and the reading already does the endian conversion. As it is
5217  * read with uint32_t-sized accesses, any members with a different size
5218  * need to be ordered correctly though!
5219  */
5220 struct iwm_error_event_table {
5221 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5222 	uint32_t error_id;		/* type of error */
5223 	uint32_t trm_hw_status0;	/* TRM HW status */
5224 	uint32_t trm_hw_status1;	/* TRM HW status */
5225 	uint32_t blink2;		/* branch link */
5226 	uint32_t ilink1;		/* interrupt link */
5227 	uint32_t ilink2;		/* interrupt link */
5228 	uint32_t data1;		/* error-specific data */
5229 	uint32_t data2;		/* error-specific data */
5230 	uint32_t data3;		/* error-specific data */
5231 	uint32_t bcon_time;		/* beacon timer */
5232 	uint32_t tsf_low;		/* network timestamp function timer */
5233 	uint32_t tsf_hi;		/* network timestamp function timer */
5234 	uint32_t gp1;		/* GP1 timer register */
5235 	uint32_t gp2;		/* GP2 timer register */
5236 	uint32_t fw_rev_type;	/* firmware revision type */
5237 	uint32_t major;		/* uCode version major */
5238 	uint32_t minor;		/* uCode version minor */
5239 	uint32_t hw_ver;		/* HW Silicon version */
5240 	uint32_t brd_ver;		/* HW board version */
5241 	uint32_t log_pc;		/* log program counter */
5242 	uint32_t frame_ptr;		/* frame pointer */
5243 	uint32_t stack_ptr;		/* stack pointer */
5244 	uint32_t hcmd;		/* last host command header */
5245 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5246 				 * rxtx_flag */
5247 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5248 				 * host_flag */
5249 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5250 				 * enc_flag */
5251 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5252 				 * time_flag */
5253 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5254 				 * wico interrupt */
5255 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5256 	uint32_t wait_event;		/* wait event() caller address */
5257 	uint32_t l2p_control;	/* L2pControlField */
5258 	uint32_t l2p_duration;	/* L2pDurationField */
5259 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5260 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5261 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5262 				 * (LMPM_PMG_SEL) */
5263 	uint32_t u_timestamp;	/* indicate when the date and time of the
5264 				 * compilation */
5265 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5266 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5267 
5268 /*
5269  * UMAC error struct - relevant starting from family 8000 chip.
5270  * Note: This structure is read from the device with IO accesses,
5271  * and the reading already does the endian conversion. As it is
5272  * read with u32-sized accesses, any members with a different size
5273  * need to be ordered correctly though!
5274  */
5275 struct iwm_umac_error_event_table {
5276 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5277 	uint32_t error_id;	/* type of error */
5278 	uint32_t blink1;	/* branch link */
5279 	uint32_t blink2;	/* branch link */
5280 	uint32_t ilink1;	/* interrupt link */
5281 	uint32_t ilink2;	/* interrupt link */
5282 	uint32_t data1;		/* error-specific data */
5283 	uint32_t data2;		/* error-specific data */
5284 	uint32_t data3;		/* error-specific data */
5285 	uint32_t umac_major;
5286 	uint32_t umac_minor;
5287 	uint32_t frame_pointer;	/* core register 27*/
5288 	uint32_t stack_pointer;	/* core register 28 */
5289 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5290 	uint32_t nic_isr_pref;	/* ISR status register */
5291 } __packed;
5292 
5293 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5294 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5295 
5296 #ifdef IWM_DEBUG
5297 struct {
5298 	const char *name;
5299 	uint8_t num;
5300 } advanced_lookup[] = {
5301 	{ "NMI_INTERRUPT_WDG", 0x34 },
5302 	{ "SYSASSERT", 0x35 },
5303 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5304 	{ "BAD_COMMAND", 0x38 },
5305 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5306 	{ "FATAL_ERROR", 0x3D },
5307 	{ "NMI_TRM_HW_ERR", 0x46 },
5308 	{ "NMI_INTERRUPT_TRM", 0x4C },
5309 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5310 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5311 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5312 	{ "NMI_INTERRUPT_HOST", 0x66 },
5313 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5314 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5315 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5316 	{ "ADVANCED_SYSASSERT", 0 },
5317 };
5318 
5319 static const char *
5320 iwm_desc_lookup(uint32_t num)
5321 {
5322 	int i;
5323 
5324 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5325 		if (advanced_lookup[i].num == num)
5326 			return advanced_lookup[i].name;
5327 
5328 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5329 	return advanced_lookup[i].name;
5330 }
5331 
5332 static void
5333 iwm_nic_umac_error(struct iwm_softc *sc)
5334 {
5335 	struct iwm_umac_error_event_table table;
5336 	uint32_t base;
5337 
5338 	base = sc->umac_error_event_table;
5339 
5340 	if (base < 0x800000) {
5341 		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5342 		    base);
5343 		return;
5344 	}
5345 
5346 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5347 		device_printf(sc->sc_dev, "reading errlog failed\n");
5348 		return;
5349 	}
5350 
5351 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5352 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5353 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5354 		    sc->sc_flags, table.valid);
5355 	}
5356 
5357 	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5358 		iwm_desc_lookup(table.error_id));
5359 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5360 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5361 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5362 	    table.ilink1);
5363 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5364 	    table.ilink2);
5365 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5366 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5367 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5368 	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5369 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5370 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5371 	    table.frame_pointer);
5372 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5373 	    table.stack_pointer);
5374 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5375 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5376 	    table.nic_isr_pref);
5377 }
5378 
5379 /*
5380  * Support for dumping the error log seemed like a good idea ...
5381  * but it's mostly hex junk and the only sensible thing is the
5382  * hw/ucode revision (which we know anyway).  Since it's here,
5383  * I'll just leave it in, just in case e.g. the Intel guys want to
5384  * help us decipher some "ADVANCED_SYSASSERT" later.
5385  */
5386 static void
5387 iwm_nic_error(struct iwm_softc *sc)
5388 {
5389 	struct iwm_error_event_table table;
5390 	uint32_t base;
5391 
5392 	device_printf(sc->sc_dev, "dumping device error log\n");
5393 	base = sc->error_event_table[0];
5394 	if (base < 0x800000) {
5395 		device_printf(sc->sc_dev,
5396 		    "Invalid error log pointer 0x%08x\n", base);
5397 		return;
5398 	}
5399 
5400 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5401 		device_printf(sc->sc_dev, "reading errlog failed\n");
5402 		return;
5403 	}
5404 
5405 	if (!table.valid) {
5406 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5407 		return;
5408 	}
5409 
5410 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5411 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5412 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5413 		    sc->sc_flags, table.valid);
5414 	}
5415 
5416 	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5417 	    iwm_desc_lookup(table.error_id));
5418 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5419 	    table.trm_hw_status0);
5420 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5421 	    table.trm_hw_status1);
5422 	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5423 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5424 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5425 	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5426 	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5427 	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5428 	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5429 	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5430 	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5431 	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5432 	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5433 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5434 	    table.fw_rev_type);
5435 	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5436 	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5437 	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5438 	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5439 	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5440 	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5441 	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5442 	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5443 	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5444 	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5445 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5446 	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5447 	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5448 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5449 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5450 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5451 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5452 	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5453 	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5454 
5455 	if (sc->umac_error_event_table)
5456 		iwm_nic_umac_error(sc);
5457 }
5458 #endif
5459 
5460 static void
5461 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5462 {
5463 	struct ieee80211com *ic = &sc->sc_ic;
5464 	struct iwm_cmd_response *cresp;
5465 	struct mbuf *m1;
5466 	uint32_t offset = 0;
5467 	uint32_t maxoff = IWM_RBUF_SIZE;
5468 	uint32_t nextoff;
5469 	boolean_t stolen = FALSE;
5470 
5471 #define HAVEROOM(a)	\
5472     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5473 
5474 	while (HAVEROOM(offset)) {
5475 		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5476 		    offset);
5477 		int qid, idx, code, len;
5478 
5479 		qid = pkt->hdr.qid;
5480 		idx = pkt->hdr.idx;
5481 
5482 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5483 
5484 		/*
5485 		 * randomly get these from the firmware, no idea why.
5486 		 * they at least seem harmless, so just ignore them for now
5487 		 */
5488 		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5489 		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5490 			break;
5491 		}
5492 
5493 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5494 		    "rx packet qid=%d idx=%d type=%x\n",
5495 		    qid & ~0x80, pkt->hdr.idx, code);
5496 
5497 		len = iwm_rx_packet_len(pkt);
5498 		len += sizeof(uint32_t); /* account for status word */
5499 		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5500 
5501 		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5502 
5503 		switch (code) {
5504 		case IWM_REPLY_RX_PHY_CMD:
5505 			iwm_rx_rx_phy_cmd(sc, pkt);
5506 			break;
5507 
5508 		case IWM_REPLY_RX_MPDU_CMD: {
5509 			/*
5510 			 * If this is the last frame in the RX buffer, we
5511 			 * can directly feed the mbuf to the sharks here.
5512 			 */
5513 			struct iwm_rx_packet *nextpkt = mtodoff(m,
5514 			    struct iwm_rx_packet *, nextoff);
5515 			if (!HAVEROOM(nextoff) ||
5516 			    (nextpkt->hdr.code == 0 &&
5517 			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5518 			     nextpkt->hdr.idx == 0) ||
5519 			    (nextpkt->len_n_flags ==
5520 			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5521 				if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5522 					stolen = FALSE;
5523 					/* Make sure we abort the loop */
5524 					nextoff = maxoff;
5525 				}
5526 				break;
5527 			}
5528 
5529 			/*
5530 			 * Use m_copym instead of m_split, because that
5531 			 * makes it easier to keep a valid rx buffer in
5532 			 * the ring, when iwm_rx_mpdu() fails.
5533 			 *
5534 			 * We need to start m_copym() at offset 0, to get the
5535 			 * M_PKTHDR flag preserved.
5536 			 */
5537 			m1 = m_copym(m, 0, M_COPYALL, M_WAITOK);
5538 			if (m1) {
5539 				if (iwm_rx_mpdu(sc, m1, offset, stolen))
5540 					stolen = TRUE;
5541 				else
5542 					m_freem(m1);
5543 			}
5544 			break;
5545 		}
5546 
5547 		case IWM_TX_CMD:
5548 			iwm_rx_tx_cmd(sc, pkt);
5549 			break;
5550 
5551 		case IWM_MISSED_BEACONS_NOTIFICATION: {
5552 			struct iwm_missed_beacons_notif *resp;
5553 			int missed;
5554 
5555 			/* XXX look at mac_id to determine interface ID */
5556 			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5557 
5558 			resp = (void *)pkt->data;
5559 			missed = le32toh(resp->consec_missed_beacons);
5560 
5561 			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5562 			    "%s: MISSED_BEACON: mac_id=%d, "
5563 			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5564 			    "num_rx=%d\n",
5565 			    __func__,
5566 			    le32toh(resp->mac_id),
5567 			    le32toh(resp->consec_missed_beacons_since_last_rx),
5568 			    le32toh(resp->consec_missed_beacons),
5569 			    le32toh(resp->num_expected_beacons),
5570 			    le32toh(resp->num_recvd_beacons));
5571 
5572 			/* Be paranoid */
5573 			if (vap == NULL)
5574 				break;
5575 
5576 			/* XXX no net80211 locking? */
5577 			if (vap->iv_state == IEEE80211_S_RUN &&
5578 			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5579 				if (missed > vap->iv_bmissthreshold) {
5580 					/* XXX bad locking; turn into task */
5581 					IWM_UNLOCK(sc);
5582 					ieee80211_beacon_miss(ic);
5583 					IWM_LOCK(sc);
5584 				}
5585 			}
5586 
5587 			break;
5588 		}
5589 
5590 		case IWM_MFUART_LOAD_NOTIFICATION:
5591 			break;
5592 
5593 		case IWM_ALIVE:
5594 			break;
5595 
5596 		case IWM_CALIB_RES_NOTIF_PHY_DB:
5597 			break;
5598 
5599 		case IWM_STATISTICS_NOTIFICATION:
5600 			iwm_handle_rx_statistics(sc, pkt);
5601 			break;
5602 
5603 		case IWM_NVM_ACCESS_CMD:
5604 		case IWM_MCC_UPDATE_CMD:
5605 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5606 				memcpy(sc->sc_cmd_resp,
5607 				    pkt, sizeof(sc->sc_cmd_resp));
5608 			}
5609 			break;
5610 
5611 		case IWM_MCC_CHUB_UPDATE_CMD: {
5612 			struct iwm_mcc_chub_notif *notif;
5613 			notif = (void *)pkt->data;
5614 
5615 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5616 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5617 			sc->sc_fw_mcc[2] = '\0';
5618 			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5619 			    "fw source %d sent CC '%s'\n",
5620 			    notif->source_id, sc->sc_fw_mcc);
5621 			break;
5622 		}
5623 
5624 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5625 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5626 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5627 			struct iwm_dts_measurement_notif_v1 *notif;
5628 
5629 			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5630 				device_printf(sc->sc_dev,
5631 				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5632 				break;
5633 			}
5634 			notif = (void *)pkt->data;
5635 			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5636 			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5637 			    notif->temp);
5638 			break;
5639 		}
5640 
5641 		case IWM_PHY_CONFIGURATION_CMD:
5642 		case IWM_TX_ANT_CONFIGURATION_CMD:
5643 		case IWM_ADD_STA:
5644 		case IWM_MAC_CONTEXT_CMD:
5645 		case IWM_REPLY_SF_CFG_CMD:
5646 		case IWM_POWER_TABLE_CMD:
5647 		case IWM_LTR_CONFIG:
5648 		case IWM_PHY_CONTEXT_CMD:
5649 		case IWM_BINDING_CONTEXT_CMD:
5650 		case IWM_TIME_EVENT_CMD:
5651 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5652 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5653 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5654 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5655 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5656 		case IWM_REPLY_BEACON_FILTERING_CMD:
5657 		case IWM_MAC_PM_POWER_TABLE:
5658 		case IWM_TIME_QUOTA_CMD:
5659 		case IWM_REMOVE_STA:
5660 		case IWM_TXPATH_FLUSH:
5661 		case IWM_LQ_CMD:
5662 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5663 				 IWM_FW_PAGING_BLOCK_CMD):
5664 		case IWM_BT_CONFIG:
5665 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5666 			cresp = (void *)pkt->data;
5667 			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5668 				memcpy(sc->sc_cmd_resp,
5669 				    pkt, sizeof(*pkt)+sizeof(*cresp));
5670 			}
5671 			break;
5672 
5673 		/* ignore */
5674 		case IWM_PHY_DB_CMD:
5675 			break;
5676 
5677 		case IWM_INIT_COMPLETE_NOTIF:
5678 			break;
5679 
5680 		case IWM_SCAN_OFFLOAD_COMPLETE:
5681 			iwm_rx_lmac_scan_complete_notif(sc, pkt);
5682 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5683 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5684 				ieee80211_runtask(ic, &sc->sc_es_task);
5685 			}
5686 			break;
5687 
5688 		case IWM_SCAN_ITERATION_COMPLETE: {
5689 			struct iwm_lmac_scan_complete_notif *notif;
5690 			notif = (void *)pkt->data;
5691 			break;
5692 		}
5693 
5694 		case IWM_SCAN_COMPLETE_UMAC:
5695 			iwm_rx_umac_scan_complete_notif(sc, pkt);
5696 			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5697 				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5698 				ieee80211_runtask(ic, &sc->sc_es_task);
5699 			}
5700 			break;
5701 
5702 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5703 			struct iwm_umac_scan_iter_complete_notif *notif;
5704 			notif = (void *)pkt->data;
5705 
5706 			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5707 			    "complete, status=0x%x, %d channels scanned\n",
5708 			    notif->status, notif->scanned_channels);
5709 			break;
5710 		}
5711 
5712 		case IWM_REPLY_ERROR: {
5713 			struct iwm_error_resp *resp;
5714 			resp = (void *)pkt->data;
5715 
5716 			device_printf(sc->sc_dev,
5717 			    "firmware error 0x%x, cmd 0x%x\n",
5718 			    le32toh(resp->error_type),
5719 			    resp->cmd_id);
5720 			break;
5721 		}
5722 
5723 		case IWM_TIME_EVENT_NOTIFICATION:
5724 			iwm_rx_time_event_notif(sc, pkt);
5725 			break;
5726 
5727 		/*
5728 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5729 		 * messages. Just ignore them for now.
5730 		 */
5731 		case IWM_DEBUG_LOG_MSG:
5732 			break;
5733 
5734 		case IWM_MCAST_FILTER_CMD:
5735 			break;
5736 
5737 		case IWM_SCD_QUEUE_CFG: {
5738 			struct iwm_scd_txq_cfg_rsp *rsp;
5739 			rsp = (void *)pkt->data;
5740 
5741 			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5742 			    "queue cfg token=0x%x sta_id=%d "
5743 			    "tid=%d scd_queue=%d\n",
5744 			    rsp->token, rsp->sta_id, rsp->tid,
5745 			    rsp->scd_queue);
5746 			break;
5747 		}
5748 
5749 		default:
5750 			device_printf(sc->sc_dev,
5751 			    "frame %d/%d %x UNHANDLED (this should "
5752 			    "not happen)\n", qid & ~0x80, idx,
5753 			    pkt->len_n_flags);
5754 			break;
5755 		}
5756 
5757 		/*
5758 		 * Why test bit 0x80?  The Linux driver:
5759 		 *
5760 		 * There is one exception:  uCode sets bit 15 when it
5761 		 * originates the response/notification, i.e. when the
5762 		 * response/notification is not a direct response to a
5763 		 * command sent by the driver.  For example, uCode issues
5764 		 * IWM_REPLY_RX when it sends a received frame to the driver;
5765 		 * it is not a direct response to any driver command.
5766 		 *
5767 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5768 		 * uses a slightly different format for pkt->hdr, and "qid"
5769 		 * is actually the upper byte of a two-byte field.
5770 		 */
5771 		if (!(qid & (1 << 7)))
5772 			iwm_cmd_done(sc, pkt);
5773 
5774 		offset = nextoff;
5775 	}
5776 	if (stolen)
5777 		m_freem(m);
5778 #undef HAVEROOM
5779 }
5780 
5781 /*
5782  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5783  * Basic structure from if_iwn
5784  */
5785 static void
5786 iwm_notif_intr(struct iwm_softc *sc)
5787 {
5788 	int count;
5789 	uint32_t wreg;
5790 	uint16_t hw;
5791 
5792 	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5793 	    BUS_DMASYNC_POSTREAD);
5794 
5795 	if (sc->cfg->mqrx_supported) {
5796 		count = IWM_RX_MQ_RING_COUNT;
5797 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5798 	} else {
5799 		count = IWM_RX_LEGACY_RING_COUNT;
5800 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5801 	}
5802 
5803 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5804 
5805 	/*
5806 	 * Process responses
5807 	 */
5808 	while (sc->rxq.cur != hw) {
5809 		struct iwm_rx_ring *ring = &sc->rxq;
5810 		struct iwm_rx_data *data = &ring->data[ring->cur];
5811 
5812 		bus_dmamap_sync(ring->data_dmat, data->map,
5813 		    BUS_DMASYNC_POSTREAD);
5814 
5815 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5816 		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5817 		iwm_handle_rxb(sc, data->m);
5818 
5819 		ring->cur = (ring->cur + 1) % count;
5820 	}
5821 
5822 	/*
5823 	 * Tell the firmware that it can reuse the ring entries that
5824 	 * we have just processed.
5825 	 * Seems like the hardware gets upset unless we align
5826 	 * the write by 8??
5827 	 */
5828 	hw = (hw == 0) ? count - 1 : hw - 1;
5829 	IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5830 }
5831 
5832 static void
5833 iwm_intr(void *arg)
5834 {
5835 	struct iwm_softc *sc = arg;
5836 	int handled = 0;
5837 	int r1, r2, rv = 0;
5838 	int isperiodic = 0;
5839 
5840 #if defined(__DragonFly__)
5841 	if (sc->sc_mem == NULL) {
5842 		kprintf("iwm_intr: detached\n");
5843 		return;
5844 	}
5845 #endif
5846 
5847 	IWM_LOCK(sc);
5848 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5849 
5850 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5851 		uint32_t *ict = sc->ict_dma.vaddr;
5852 		int tmp;
5853 
5854 		tmp = htole32(ict[sc->ict_cur]);
5855 		if (!tmp)
5856 			goto out_ena;
5857 
5858 		/*
5859 		 * ok, there was something.  keep plowing until we have all.
5860 		 */
5861 		r1 = r2 = 0;
5862 		while (tmp) {
5863 			r1 |= tmp;
5864 			ict[sc->ict_cur] = 0;
5865 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5866 			tmp = htole32(ict[sc->ict_cur]);
5867 		}
5868 
5869 		/* this is where the fun begins.  don't ask */
5870 		if (r1 == 0xffffffff)
5871 			r1 = 0;
5872 
5873 		/* i am not expected to understand this */
5874 		if (r1 & 0xc0000)
5875 			r1 |= 0x8000;
5876 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5877 	} else {
5878 		r1 = IWM_READ(sc, IWM_CSR_INT);
5879 		/* "hardware gone" (where, fishing?) */
5880 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5881 			goto out;
5882 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5883 	}
5884 	if (r1 == 0 && r2 == 0) {
5885 		goto out_ena;
5886 	}
5887 
5888 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5889 
5890 	/* Safely ignore these bits for debug checks below */
5891 	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5892 
5893 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5894 		int i;
5895 		struct ieee80211com *ic = &sc->sc_ic;
5896 		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5897 
5898 #ifdef IWM_DEBUG
5899 		iwm_nic_error(sc);
5900 #endif
5901 		/* Dump driver status (TX and RX rings) while we're here. */
5902 		device_printf(sc->sc_dev, "driver status:\n");
5903 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
5904 			struct iwm_tx_ring *ring = &sc->txq[i];
5905 			device_printf(sc->sc_dev,
5906 			    "  tx ring %2d: qid=%-2d cur=%-3d "
5907 			    "queued=%-3d\n",
5908 			    i, ring->qid, ring->cur, ring->queued);
5909 		}
5910 		device_printf(sc->sc_dev,
5911 		    "  rx ring: cur=%d\n", sc->rxq.cur);
5912 		device_printf(sc->sc_dev,
5913 		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5914 
5915 		/* Reset our firmware state tracking. */
5916 		sc->sc_firmware_state = 0;
5917 		/* Don't stop the device; just do a VAP restart */
5918 		IWM_UNLOCK(sc);
5919 
5920 		if (vap == NULL) {
5921 			kprintf("%s: null vap\n", __func__);
5922 			return;
5923 		}
5924 
5925 		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5926 		    "restarting\n", __func__, vap->iv_state);
5927 
5928 		ieee80211_restart_all(ic);
5929 		return;
5930 	}
5931 
5932 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5933 		handled |= IWM_CSR_INT_BIT_HW_ERR;
5934 		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5935 		iwm_stop(sc);
5936 		rv = 1;
5937 		goto out;
5938 	}
5939 
5940 	/* firmware chunk loaded */
5941 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5942 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5943 		handled |= IWM_CSR_INT_BIT_FH_TX;
5944 		sc->sc_fw_chunk_done = 1;
5945 		wakeup(&sc->sc_fw);
5946 	}
5947 
5948 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5949 		handled |= IWM_CSR_INT_BIT_RF_KILL;
5950 		if (iwm_check_rfkill(sc)) {
5951 			device_printf(sc->sc_dev,
5952 			    "%s: rfkill switch, disabling interface\n",
5953 			    __func__);
5954 			iwm_stop(sc);
5955 		}
5956 	}
5957 
5958 	/*
5959 	 * The Linux driver uses periodic interrupts to avoid races.
5960 	 * We cargo-cult like it's going out of fashion.
5961 	 */
5962 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5963 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5964 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5965 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5966 			IWM_WRITE_1(sc,
5967 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5968 		isperiodic = 1;
5969 	}
5970 
5971 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5972 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5973 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5974 
5975 		iwm_notif_intr(sc);
5976 
5977 		/* enable periodic interrupt, see above */
5978 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5979 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5980 			    IWM_CSR_INT_PERIODIC_ENA);
5981 	}
5982 
5983 	if (__predict_false(r1 & ~handled))
5984 		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5985 		    "%s: unhandled interrupts: %x\n", __func__, r1);
5986 	rv = 1;
5987 
5988  out_ena:
5989 	iwm_restore_interrupts(sc);
5990  out:
5991 	IWM_UNLOCK(sc);
5992 	return;
5993 }
5994 
5995 /*
5996  * Autoconf glue-sniffing
5997  */
5998 #define	PCI_VENDOR_INTEL		0x8086
5999 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
6000 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
6001 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
6002 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
6003 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
6004 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
6005 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
6006 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
6007 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
6008 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
6009 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
6010 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
6011 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
6012 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
6013 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
6014 
6015 static const struct iwm_devices {
6016 	uint16_t		device;
6017 	const struct iwm_cfg	*cfg;
6018 } iwm_devices[] = {
6019 	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
6020 	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
6021 	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
6022 	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
6023 	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
6024 	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
6025 	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
6026 	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
6027 	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
6028 	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
6029 	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
6030 	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
6031 	{ PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
6032 	{ PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
6033 	{ PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
6034 };
6035 
6036 static int
6037 iwm_probe(device_t dev)
6038 {
6039 	int i;
6040 
6041 	for (i = 0; i < nitems(iwm_devices); i++) {
6042 		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
6043 		    pci_get_device(dev) == iwm_devices[i].device) {
6044 			device_set_desc(dev, iwm_devices[i].cfg->name);
6045 			return (BUS_PROBE_DEFAULT);
6046 		}
6047 	}
6048 
6049 	return (ENXIO);
6050 }
6051 
6052 static int
6053 iwm_dev_check(device_t dev)
6054 {
6055 	struct iwm_softc *sc;
6056 	uint16_t devid;
6057 	int i;
6058 
6059 	sc = device_get_softc(dev);
6060 
6061 	devid = pci_get_device(dev);
6062 	for (i = 0; i < nitems(iwm_devices); i++) {
6063 		if (iwm_devices[i].device == devid) {
6064 			sc->cfg = iwm_devices[i].cfg;
6065 			return (0);
6066 		}
6067 	}
6068 	device_printf(dev, "unknown adapter type\n");
6069 	return ENXIO;
6070 }
6071 
6072 /* PCI registers */
6073 #define PCI_CFG_RETRY_TIMEOUT	0x041
6074 
6075 static int
6076 iwm_pci_attach(device_t dev)
6077 {
6078 	struct iwm_softc *sc;
6079 	int count, error, rid;
6080 	uint16_t reg;
6081 #if defined(__DragonFly__)
6082         int irq_flags;
6083 #endif
6084 
6085 	sc = device_get_softc(dev);
6086 
6087 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
6088 	 * PCI Tx retries from interfering with C3 CPU state */
6089 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6090 
6091 	/* Enable bus-mastering and hardware bug workaround. */
6092 	pci_enable_busmaster(dev);
6093 	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
6094 	/* if !MSI */
6095 	if (reg & PCIM_STATUS_INTxSTATE) {
6096 		reg &= ~PCIM_STATUS_INTxSTATE;
6097 	}
6098 	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
6099 
6100 	rid = PCIR_BAR(0);
6101 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
6102 	    RF_ACTIVE);
6103 	if (sc->sc_mem == NULL) {
6104 		device_printf(sc->sc_dev, "can't map mem space\n");
6105 		return (ENXIO);
6106 	}
6107 	sc->sc_st = rman_get_bustag(sc->sc_mem);
6108 	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
6109 
6110 	/* Install interrupt handler. */
6111 	count = 1;
6112 	rid = 0;
6113 #if defined(__DragonFly__)
6114 	pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
6115 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
6116 #else
6117 	if (pci_alloc_msi(dev, &count) == 0)
6118 		rid = 1;
6119 	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
6120 	    (rid != 0 ? 0 : RF_SHAREABLE));
6121 #endif
6122 	if (sc->sc_irq == NULL) {
6123 		device_printf(dev, "can't map interrupt\n");
6124 			return (ENXIO);
6125 	}
6126 #if defined(__DragonFly__)
6127 	error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
6128 			       iwm_intr, sc, &sc->sc_ih,
6129 			       &wlan_global_serializer);
6130 #else
6131 	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
6132 	    NULL, iwm_intr, sc, &sc->sc_ih);
6133 #endif
6134 	if (sc->sc_ih == NULL) {
6135 		device_printf(dev, "can't establish interrupt");
6136 			return (ENXIO);
6137 #if defined(__DragonFly__)
6138 		pci_release_msi(dev);
6139 #endif
6140 	}
6141 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
6142 
6143 	return (0);
6144 }
6145 
6146 static void
6147 iwm_pci_detach(device_t dev)
6148 {
6149 	struct iwm_softc *sc = device_get_softc(dev);
6150 
6151 	if (sc->sc_irq != NULL) {
6152 		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6153 		bus_release_resource(dev, SYS_RES_IRQ,
6154 		    rman_get_rid(sc->sc_irq), sc->sc_irq);
6155 		pci_release_msi(dev);
6156 #if defined(__DragonFly__)
6157 		sc->sc_irq = NULL;
6158 #endif
6159         }
6160 	if (sc->sc_mem != NULL) {
6161 		bus_release_resource(dev, SYS_RES_MEMORY,
6162 		    rman_get_rid(sc->sc_mem), sc->sc_mem);
6163 #if defined(__DragonFly__)
6164 		sc->sc_mem = NULL;
6165 #endif
6166 	}
6167 }
6168 
6169 static int
6170 iwm_attach(device_t dev)
6171 {
6172 	struct iwm_softc *sc = device_get_softc(dev);
6173 	struct ieee80211com *ic = &sc->sc_ic;
6174 	int error;
6175 	int txq_i, i;
6176 
6177 	sc->sc_dev = dev;
6178 	sc->sc_attached = 1;
6179 	IWM_LOCK_INIT(sc);
6180 	mbufq_init(&sc->sc_snd, ifqmaxlen);
6181 	callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
6182 	callout_init_lk(&sc->sc_led_blink_to, &sc->sc_lk);
6183 	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6184 
6185 	error = iwm_dev_check(dev);
6186 	if (error != 0)
6187 		goto fail;
6188 
6189 	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6190 	if (sc->sc_notif_wait == NULL) {
6191 		device_printf(dev, "failed to init notification wait struct\n");
6192 		goto fail;
6193 	}
6194 
6195 	sc->sf_state = IWM_SF_UNINIT;
6196 
6197 	/* Init phy db */
6198 	sc->sc_phy_db = iwm_phy_db_init(sc);
6199 	if (!sc->sc_phy_db) {
6200 		device_printf(dev, "Cannot init phy_db\n");
6201 		goto fail;
6202 	}
6203 
6204 	/* Set EBS as successful as long as not stated otherwise by the FW. */
6205 	sc->last_ebs_successful = TRUE;
6206 
6207 	/* PCI attach */
6208 	error = iwm_pci_attach(dev);
6209 	if (error != 0)
6210 		goto fail;
6211 
6212 	sc->sc_wantresp = -1;
6213 
6214 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6215 	/*
6216 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6217 	 * changed, and now the revision step also includes bit 0-1 (no more
6218 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6219 	 * in the old format.
6220 	 */
6221 	if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
6222 		int ret;
6223 		uint32_t hw_step;
6224 
6225 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6226 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6227 
6228 		if (iwm_prepare_card_hw(sc) != 0) {
6229 			device_printf(dev, "could not initialize hardware\n");
6230 			goto fail;
6231 		}
6232 
6233 		/*
6234 		 * In order to recognize C step the driver should read the
6235 		 * chip version id located at the AUX bus MISC address.
6236 		 */
6237 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6238 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6239 		DELAY(2);
6240 
6241 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6242 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6243 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6244 				   25000);
6245 		if (!ret) {
6246 			device_printf(sc->sc_dev,
6247 			    "Failed to wake up the nic\n");
6248 			goto fail;
6249 		}
6250 
6251 		if (iwm_nic_lock(sc)) {
6252 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6253 			hw_step |= IWM_ENABLE_WFPM;
6254 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6255 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6256 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6257 			if (hw_step == 0x3)
6258 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6259 						(IWM_SILICON_C_STEP << 2);
6260 			iwm_nic_unlock(sc);
6261 		} else {
6262 			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6263 			goto fail;
6264 		}
6265 	}
6266 
6267 	/* special-case 7265D, it has the same PCI IDs. */
6268 	if (sc->cfg == &iwm7265_cfg &&
6269 	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6270 		sc->cfg = &iwm7265d_cfg;
6271 	}
6272 
6273 	/* Allocate DMA memory for firmware transfers. */
6274 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6275 		device_printf(dev, "could not allocate memory for firmware\n");
6276 		goto fail;
6277 	}
6278 
6279 	/* Allocate "Keep Warm" page. */
6280 	if ((error = iwm_alloc_kw(sc)) != 0) {
6281 		device_printf(dev, "could not allocate keep warm page\n");
6282 		goto fail;
6283 	}
6284 
6285 	/* We use ICT interrupts */
6286 	if ((error = iwm_alloc_ict(sc)) != 0) {
6287 		device_printf(dev, "could not allocate ICT table\n");
6288 		goto fail;
6289 	}
6290 
6291 	/* Allocate TX scheduler "rings". */
6292 	if ((error = iwm_alloc_sched(sc)) != 0) {
6293 		device_printf(dev, "could not allocate TX scheduler rings\n");
6294 		goto fail;
6295 	}
6296 
6297 	/* Allocate TX rings */
6298 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6299 		if ((error = iwm_alloc_tx_ring(sc,
6300 		    &sc->txq[txq_i], txq_i)) != 0) {
6301 			device_printf(dev,
6302 			    "could not allocate TX ring %d\n",
6303 			    txq_i);
6304 			goto fail;
6305 		}
6306 	}
6307 
6308 	/* Allocate RX ring. */
6309 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6310 		device_printf(dev, "could not allocate RX ring\n");
6311 		goto fail;
6312 	}
6313 
6314 	/* Clear pending interrupts. */
6315 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6316 
6317 	ic->ic_softc = sc;
6318 	ic->ic_name = device_get_nameunit(sc->sc_dev);
6319 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6320 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6321 
6322 	/* Set device capabilities. */
6323 	ic->ic_caps =
6324 	    IEEE80211_C_STA |
6325 	    IEEE80211_C_WPA |		/* WPA/RSN */
6326 	    IEEE80211_C_WME |
6327 	    IEEE80211_C_PMGT |
6328 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6329 	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6330 //	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6331 	    ;
6332 	/* Advertise full-offload scanning */
6333 	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6334 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6335 		sc->sc_phyctxt[i].id = i;
6336 		sc->sc_phyctxt[i].color = 0;
6337 		sc->sc_phyctxt[i].ref = 0;
6338 		sc->sc_phyctxt[i].channel = NULL;
6339 	}
6340 
6341 	/* Default noise floor */
6342 	sc->sc_noise = -96;
6343 
6344 	/* Max RSSI */
6345 	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6346 
6347 #ifdef IWM_DEBUG
6348 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6349 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6350 	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6351 #endif
6352 
6353 	error = iwm_read_firmware(sc);
6354 	if (error) {
6355 		goto fail;
6356 	} else if (sc->sc_fw.fw_fp == NULL) {
6357 		/*
6358 		 * XXX Add a solution for properly deferring firmware load
6359 		 *     during bootup.
6360 		 */
6361 		goto fail;
6362 	} else {
6363 		sc->sc_preinit_hook.ich_func = iwm_preinit;
6364 		sc->sc_preinit_hook.ich_arg = sc;
6365 		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6366 			device_printf(dev,
6367 			    "config_intrhook_establish failed\n");
6368 			goto fail;
6369 		}
6370 	}
6371 
6372 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6373 	    "<-%s\n", __func__);
6374 
6375 	return 0;
6376 
6377 	/* Free allocated memory if something failed during attachment. */
6378 fail:
6379 	iwm_detach_local(sc, 0);
6380 
6381 	return ENXIO;
6382 }
6383 
6384 static int
6385 iwm_is_valid_ether_addr(uint8_t *addr)
6386 {
6387 	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6388 
6389 	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6390 		return (FALSE);
6391 
6392 	return (TRUE);
6393 }
6394 
6395 static int
6396 iwm_wme_update(struct ieee80211com *ic)
6397 {
6398 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6399 	struct iwm_softc *sc = ic->ic_softc;
6400 #if !defined(__DragonFly__)
6401 	struct chanAccParams chp;
6402 #endif
6403 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6404 	struct iwm_vap *ivp = IWM_VAP(vap);
6405 	struct iwm_node *in;
6406 	struct wmeParams tmp[WME_NUM_AC];
6407 	int aci, error;
6408 
6409 	if (vap == NULL)
6410 		return (0);
6411 
6412 #if !defined(__DragonFly__)
6413 	ieee80211_wme_ic_getparams(ic, &chp);
6414 
6415 	IEEE80211_LOCK(ic);
6416 	for (aci = 0; aci < WME_NUM_AC; aci++)
6417 		tmp[aci] = chp.cap_wmeParams[aci];
6418 	IEEE80211_UNLOCK(ic);
6419 #else
6420 	IEEE80211_LOCK(ic);
6421 	for (aci = 0; aci < WME_NUM_AC; aci++)
6422 		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6423 	IEEE80211_UNLOCK(ic);
6424 #endif
6425 
6426 	IWM_LOCK(sc);
6427 	for (aci = 0; aci < WME_NUM_AC; aci++) {
6428 		const struct wmeParams *ac = &tmp[aci];
6429 		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6430 		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6431 		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6432 		ivp->queue_params[aci].edca_txop =
6433 		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6434 	}
6435 	ivp->have_wme = TRUE;
6436 	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6437 		in = IWM_NODE(vap->iv_bss);
6438 		if (in->in_assoc) {
6439 			if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6440 				device_printf(sc->sc_dev,
6441 				    "%s: failed to update MAC\n", __func__);
6442 			}
6443 		}
6444 	}
6445 	IWM_UNLOCK(sc);
6446 
6447 	return (0);
6448 #undef IWM_EXP2
6449 }
6450 
6451 static void
6452 iwm_preinit(void *arg)
6453 {
6454 	struct iwm_softc *sc = arg;
6455 	device_t dev = sc->sc_dev;
6456 	struct ieee80211com *ic = &sc->sc_ic;
6457 	int error;
6458 
6459 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6460 	    "->%s\n", __func__);
6461 
6462 	IWM_LOCK(sc);
6463 	if ((error = iwm_start_hw(sc)) != 0) {
6464 		device_printf(dev, "could not initialize hardware\n");
6465 		IWM_UNLOCK(sc);
6466 		goto fail;
6467 	}
6468 
6469 	error = iwm_run_init_ucode(sc, 1);
6470 	iwm_stop_device(sc);
6471 	if (error) {
6472 		IWM_UNLOCK(sc);
6473 		goto fail;
6474 	}
6475 	device_printf(dev,
6476 	    "hw rev 0x%x, fw ver %s, address %s\n",
6477 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6478 	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6479 
6480 	/* not all hardware can do 5GHz band */
6481 	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6482 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6483 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6484 	IWM_UNLOCK(sc);
6485 
6486 	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6487 	    ic->ic_channels);
6488 
6489 	/*
6490 	 * At this point we've committed - if we fail to do setup,
6491 	 * we now also have to tear down the net80211 state.
6492 	 */
6493 	ieee80211_ifattach(ic);
6494 	ic->ic_vap_create = iwm_vap_create;
6495 	ic->ic_vap_delete = iwm_vap_delete;
6496 	ic->ic_raw_xmit = iwm_raw_xmit;
6497 	ic->ic_node_alloc = iwm_node_alloc;
6498 	ic->ic_scan_start = iwm_scan_start;
6499 	ic->ic_scan_end = iwm_scan_end;
6500 	ic->ic_update_mcast = iwm_update_mcast;
6501 	ic->ic_getradiocaps = iwm_init_channel_map;
6502 	ic->ic_set_channel = iwm_set_channel;
6503 	ic->ic_scan_curchan = iwm_scan_curchan;
6504 	ic->ic_scan_mindwell = iwm_scan_mindwell;
6505 	ic->ic_wme.wme_update = iwm_wme_update;
6506 	ic->ic_parent = iwm_parent;
6507 	ic->ic_transmit = iwm_transmit;
6508 	iwm_radiotap_attach(sc);
6509 	if (bootverbose)
6510 		ieee80211_announce(ic);
6511 
6512 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6513 	    "<-%s\n", __func__);
6514 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6515 
6516 	return;
6517 fail:
6518 	config_intrhook_disestablish(&sc->sc_preinit_hook);
6519 	iwm_detach_local(sc, 0);
6520 }
6521 
6522 /*
6523  * Attach the interface to 802.11 radiotap.
6524  */
6525 static void
6526 iwm_radiotap_attach(struct iwm_softc *sc)
6527 {
6528         struct ieee80211com *ic = &sc->sc_ic;
6529 
6530 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6531 	    "->%s begin\n", __func__);
6532         ieee80211_radiotap_attach(ic,
6533             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6534                 IWM_TX_RADIOTAP_PRESENT,
6535             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6536                 IWM_RX_RADIOTAP_PRESENT);
6537 	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6538 	    "->%s end\n", __func__);
6539 }
6540 
6541 static struct ieee80211vap *
6542 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6543     enum ieee80211_opmode opmode, int flags,
6544     const uint8_t bssid[IEEE80211_ADDR_LEN],
6545     const uint8_t mac[IEEE80211_ADDR_LEN])
6546 {
6547 	struct iwm_vap *ivp;
6548 	struct ieee80211vap *vap;
6549 
6550 	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6551 		return NULL;
6552 	ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6553 	vap = &ivp->iv_vap;
6554 	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6555 	vap->iv_bmissthreshold = 10;            /* override default */
6556 	/* Override with driver methods. */
6557 	ivp->iv_newstate = vap->iv_newstate;
6558 	vap->iv_newstate = iwm_newstate;
6559 
6560 	ivp->id = IWM_DEFAULT_MACID;
6561 	ivp->color = IWM_DEFAULT_COLOR;
6562 
6563 	ivp->have_wme = FALSE;
6564 	ivp->ps_disabled = FALSE;
6565 
6566 	ieee80211_ratectl_init(vap);
6567 	/* Complete setup. */
6568 	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6569 	    mac);
6570 	ic->ic_opmode = opmode;
6571 
6572 	return vap;
6573 }
6574 
6575 static void
6576 iwm_vap_delete(struct ieee80211vap *vap)
6577 {
6578 	struct iwm_vap *ivp = IWM_VAP(vap);
6579 
6580 	ieee80211_ratectl_deinit(vap);
6581 	ieee80211_vap_detach(vap);
6582 	kfree(ivp, M_80211_VAP);
6583 }
6584 
6585 static void
6586 iwm_xmit_queue_drain(struct iwm_softc *sc)
6587 {
6588 	struct mbuf *m;
6589 	struct ieee80211_node *ni;
6590 
6591 	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6592 		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6593 		ieee80211_free_node(ni);
6594 		m_freem(m);
6595 	}
6596 }
6597 
6598 static void
6599 iwm_scan_start(struct ieee80211com *ic)
6600 {
6601 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6602 	struct iwm_softc *sc = ic->ic_softc;
6603 	int error;
6604 
6605 	IWM_LOCK(sc);
6606 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6607 		/* This should not be possible */
6608 		device_printf(sc->sc_dev,
6609 		    "%s: Previous scan not completed yet\n", __func__);
6610 	}
6611 	if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6612 		error = iwm_umac_scan(sc);
6613 	else
6614 		error = iwm_lmac_scan(sc);
6615 	if (error != 0) {
6616 		device_printf(sc->sc_dev, "could not initiate scan\n");
6617 		IWM_UNLOCK(sc);
6618 		ieee80211_cancel_scan(vap);
6619 	} else {
6620 		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6621 		iwm_led_blink_start(sc);
6622 		IWM_UNLOCK(sc);
6623 	}
6624 }
6625 
6626 static void
6627 iwm_scan_end(struct ieee80211com *ic)
6628 {
6629 	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6630 	struct iwm_softc *sc = ic->ic_softc;
6631 
6632 	IWM_LOCK(sc);
6633 	iwm_led_blink_stop(sc);
6634 	if (vap->iv_state == IEEE80211_S_RUN)
6635 		iwm_led_enable(sc);
6636 	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6637 		/*
6638 		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6639 		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6640 		 * taskqueue.
6641 		 */
6642 		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6643 		iwm_scan_stop_wait(sc);
6644 	}
6645 	IWM_UNLOCK(sc);
6646 
6647 	/*
6648 	 * Make sure we don't race, if sc_es_task is still enqueued here.
6649 	 * This is to make sure that it won't call ieee80211_scan_done
6650 	 * when we have already started the next scan.
6651 	 */
6652 	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6653 }
6654 
6655 static void
6656 iwm_update_mcast(struct ieee80211com *ic)
6657 {
6658 }
6659 
6660 static void
6661 iwm_set_channel(struct ieee80211com *ic)
6662 {
6663 }
6664 
6665 static void
6666 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6667 {
6668 }
6669 
6670 static void
6671 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6672 {
6673 }
6674 
6675 void
6676 iwm_init_task(void *arg1)
6677 {
6678 	struct iwm_softc *sc = arg1;
6679 
6680 	IWM_LOCK(sc);
6681 	while (sc->sc_flags & IWM_FLAG_BUSY)
6682 		lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6683 	sc->sc_flags |= IWM_FLAG_BUSY;
6684 	iwm_stop(sc);
6685 	if (sc->sc_ic.ic_nrunning > 0)
6686 		iwm_init(sc);
6687 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6688 	wakeup(&sc->sc_flags);
6689 	IWM_UNLOCK(sc);
6690 }
6691 
6692 static int
6693 iwm_resume(device_t dev)
6694 {
6695 	struct iwm_softc *sc = device_get_softc(dev);
6696 	int do_reinit = 0;
6697 
6698 	/*
6699 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6700 	 * PCI Tx retries from interfering with C3 CPU state.
6701 	 */
6702 	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6703 
6704 	if (!sc->sc_attached)
6705 		return 0;
6706 
6707 	iwm_init_task(device_get_softc(dev));
6708 
6709 	IWM_LOCK(sc);
6710 	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6711 		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6712 		do_reinit = 1;
6713 	}
6714 	IWM_UNLOCK(sc);
6715 
6716 	if (do_reinit)
6717 		ieee80211_resume_all(&sc->sc_ic);
6718 
6719 	return 0;
6720 }
6721 
6722 static int
6723 iwm_suspend(device_t dev)
6724 {
6725 	int do_stop = 0;
6726 	struct iwm_softc *sc = device_get_softc(dev);
6727 
6728 	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6729 
6730 	if (!sc->sc_attached)
6731 		return (0);
6732 
6733 	ieee80211_suspend_all(&sc->sc_ic);
6734 
6735 	if (do_stop) {
6736 		IWM_LOCK(sc);
6737 		iwm_stop(sc);
6738 		sc->sc_flags |= IWM_FLAG_SCANNING;
6739 		IWM_UNLOCK(sc);
6740 	}
6741 
6742 	return (0);
6743 }
6744 
6745 static int
6746 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6747 {
6748 	struct iwm_fw_info *fw = &sc->sc_fw;
6749 	device_t dev = sc->sc_dev;
6750 	int i;
6751 
6752 	if (!sc->sc_attached)
6753 		return 0;
6754 	sc->sc_attached = 0;
6755 	if (do_net80211) {
6756 		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6757 	}
6758 	iwm_stop_device(sc);
6759 	if (do_net80211) {
6760 		IWM_LOCK(sc);
6761 		iwm_xmit_queue_drain(sc);
6762 		IWM_UNLOCK(sc);
6763 		ieee80211_ifdetach(&sc->sc_ic);
6764 	}
6765 	callout_drain(&sc->sc_led_blink_to);
6766 	callout_drain(&sc->sc_watchdog_to);
6767 
6768 	iwm_phy_db_free(sc->sc_phy_db);
6769 	sc->sc_phy_db = NULL;
6770 
6771 	iwm_free_nvm_data(sc->nvm_data);
6772 
6773 	/* Free descriptor rings */
6774 	iwm_free_rx_ring(sc, &sc->rxq);
6775 	for (i = 0; i < nitems(sc->txq); i++)
6776 		iwm_free_tx_ring(sc, &sc->txq[i]);
6777 
6778 	/* Free firmware */
6779 	if (fw->fw_fp != NULL)
6780 		iwm_fw_info_free(fw);
6781 
6782 	/* Free scheduler */
6783 	iwm_dma_contig_free(&sc->sched_dma);
6784 	iwm_dma_contig_free(&sc->ict_dma);
6785 	iwm_dma_contig_free(&sc->kw_dma);
6786 	iwm_dma_contig_free(&sc->fw_dma);
6787 
6788 	iwm_free_fw_paging(sc);
6789 
6790 	/* Finished with the hardware - detach things */
6791 	iwm_pci_detach(dev);
6792 
6793 	if (sc->sc_notif_wait != NULL) {
6794 		iwm_notification_wait_free(sc->sc_notif_wait);
6795 		sc->sc_notif_wait = NULL;
6796 	}
6797 
6798 	IWM_LOCK_DESTROY(sc);
6799 
6800 	return (0);
6801 }
6802 
6803 static int
6804 iwm_detach(device_t dev)
6805 {
6806 	struct iwm_softc *sc = device_get_softc(dev);
6807 
6808 	return (iwm_detach_local(sc, 1));
6809 }
6810 
6811 static device_method_t iwm_pci_methods[] = {
6812         /* Device interface */
6813         DEVMETHOD(device_probe,         iwm_probe),
6814         DEVMETHOD(device_attach,        iwm_attach),
6815         DEVMETHOD(device_detach,        iwm_detach),
6816         DEVMETHOD(device_suspend,       iwm_suspend),
6817         DEVMETHOD(device_resume,        iwm_resume),
6818 
6819         DEVMETHOD_END
6820 };
6821 
6822 static driver_t iwm_pci_driver = {
6823         "iwm",
6824         iwm_pci_methods,
6825         sizeof (struct iwm_softc)
6826 };
6827 
6828 static devclass_t iwm_devclass;
6829 
6830 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6831 #if !defined(__DragonFly__)
6832 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6833     iwm_devices, nitems(iwm_devices));
6834 #endif
6835 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6836 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6837 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6838