xref: /openbsd/sys/dev/pci/if_iwm.c (revision 8529ddd3)
1 /*	$OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89 
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 
106 #include "bpfilter.h"
107 
108 #include <sys/param.h>
109 #include <sys/conf.h>
110 #include <sys/kernel.h>
111 #include <sys/malloc.h>
112 #include <sys/mbuf.h>
113 #include <sys/mutex.h>
114 #include <sys/proc.h>
115 #include <sys/socket.h>
116 #include <sys/sockio.h>
117 #include <sys/systm.h>
118 
119 #include <sys/task.h>
120 #include <machine/bus.h>
121 #include <machine/endian.h>
122 #include <machine/intr.h>
123 
124 #include <dev/pci/pcireg.h>
125 #include <dev/pci/pcivar.h>
126 #include <dev/pci/pcidevs.h>
127 
128 #if NBPFILTER > 0
129 #include <net/bpf.h>
130 #endif
131 #include <net/if.h>
132 #include <net/if_arp.h>
133 #include <net/if_dl.h>
134 #include <net/if_media.h>
135 #include <net/if_types.h>
136 
137 #include <netinet/in.h>
138 #include <netinet/in_systm.h>
139 #include <netinet/if_ether.h>
140 #include <netinet/ip.h>
141 
142 #include <net80211/ieee80211_var.h>
143 #include <net80211/ieee80211_amrr.h>
144 #include <net80211/ieee80211_radiotap.h>
145 
146 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
147 
148 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
149 
150 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
151 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
152 
153 #ifdef IWM_DEBUG
154 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
155 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
156 int iwm_debug = 1;
157 #else
158 #define DPRINTF(x)	do { ; } while (0)
159 #define DPRINTFN(n, x)	do { ; } while (0)
160 #endif
161 
162 #include <dev/pci/if_iwmreg.h>
163 #include <dev/pci/if_iwmvar.h>
164 
165 const uint8_t iwm_nvm_channels[] = {
166 	/* 2.4 GHz */
167 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168 	/* 5 GHz */
169 	36, 40, 44 , 48, 52, 56, 60, 64,
170 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171 	149, 153, 157, 161, 165
172 };
173 #define IWM_NUM_2GHZ_CHANNELS	14
174 
175 const struct iwm_rate {
176 	uint8_t rate;
177 	uint8_t plcp;
178 } iwm_rates[] = {
179 	{   2,	IWM_RATE_1M_PLCP  },
180 	{   4,	IWM_RATE_2M_PLCP  },
181 	{  11,	IWM_RATE_5M_PLCP  },
182 	{  22,	IWM_RATE_11M_PLCP },
183 	{  12,	IWM_RATE_6M_PLCP  },
184 	{  18,	IWM_RATE_9M_PLCP  },
185 	{  24,	IWM_RATE_12M_PLCP },
186 	{  36,	IWM_RATE_18M_PLCP },
187 	{  48,	IWM_RATE_24M_PLCP },
188 	{  72,	IWM_RATE_36M_PLCP },
189 	{  96,	IWM_RATE_48M_PLCP },
190 	{ 108,	IWM_RATE_54M_PLCP },
191 };
192 #define IWM_RIDX_CCK	0
193 #define IWM_RIDX_OFDM	4
194 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
195 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
196 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
197 
198 struct iwm_newstate_state {
199 	struct task ns_wk;
200 	struct ieee80211com *ns_ic;
201 	enum ieee80211_state ns_nstate;
202 	int ns_arg;
203 	int ns_generation;
204 };
205 
206 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
207 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
208 					uint8_t *, size_t);
209 int	iwm_set_default_calib(struct iwm_softc *, const void *);
210 void	iwm_fw_info_free(struct iwm_fw_info *);
211 int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
212 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
213 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
214 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
215 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
216 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
217 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
218 int	iwm_nic_lock(struct iwm_softc *);
219 void	iwm_nic_unlock(struct iwm_softc *);
220 void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
221 		    uint32_t);
222 void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
223 void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
224 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
225 				bus_size_t, bus_size_t);
226 void	iwm_dma_contig_free(struct iwm_dma_info *);
227 int	iwm_alloc_fwmem(struct iwm_softc *);
228 void	iwm_free_fwmem(struct iwm_softc *);
229 int	iwm_alloc_sched(struct iwm_softc *);
230 void	iwm_free_sched(struct iwm_softc *);
231 int	iwm_alloc_kw(struct iwm_softc *);
232 void	iwm_free_kw(struct iwm_softc *);
233 int	iwm_alloc_ict(struct iwm_softc *);
234 void	iwm_free_ict(struct iwm_softc *);
235 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
236 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
237 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
238 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
239 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
240 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
241 void	iwm_enable_rfkill_int(struct iwm_softc *);
242 int	iwm_check_rfkill(struct iwm_softc *);
243 void	iwm_enable_interrupts(struct iwm_softc *);
244 void	iwm_restore_interrupts(struct iwm_softc *);
245 void	iwm_disable_interrupts(struct iwm_softc *);
246 void	iwm_ict_reset(struct iwm_softc *);
247 int	iwm_set_hw_ready(struct iwm_softc *);
248 int	iwm_prepare_card_hw(struct iwm_softc *);
249 void	iwm_apm_config(struct iwm_softc *);
250 int	iwm_apm_init(struct iwm_softc *);
251 void	iwm_apm_stop(struct iwm_softc *);
252 int	iwm_allow_mcast(struct iwm_softc *);
253 int	iwm_start_hw(struct iwm_softc *);
254 void	iwm_stop_device(struct iwm_softc *);
255 void	iwm_set_pwr(struct iwm_softc *);
256 void	iwm_mvm_nic_config(struct iwm_softc *);
257 int	iwm_nic_rx_init(struct iwm_softc *);
258 int	iwm_nic_tx_init(struct iwm_softc *);
259 int	iwm_nic_init(struct iwm_softc *);
260 void	iwm_enable_txq(struct iwm_softc *, int, int);
261 int	iwm_post_alive(struct iwm_softc *);
262 #ifdef notyet
263 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *,
264 					enum iwm_phy_db_section_type, uint16_t);
265 int	iwm_phy_db_set_section(struct iwm_softc *,
266 				struct iwm_calib_res_notif_phy_db *);
267 #endif
268 int	iwm_is_valid_channel(uint16_t);
269 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
270 uint16_t iwm_channel_id_to_papd(uint16_t);
271 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
272 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
273 					uint16_t *, uint16_t);
274 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
275 #ifdef notyet
276 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
277 		enum iwm_phy_db_section_type, uint8_t);
278 #endif
279 int	iwm_send_phy_db_data(struct iwm_softc *);
280 int	iwm_send_phy_db_data(struct iwm_softc *);
281 void	iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
282 				struct iwm_time_event_cmd_v1 *);
283 int	iwm_mvm_send_time_event_cmd(struct iwm_softc *,
284 					const struct iwm_time_event_cmd_v2 *);
285 int	iwm_mvm_time_event_send_add(struct iwm_softc *, struct iwm_node *,
286 					void *, struct iwm_time_event_cmd_v2 *);
287 void	iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
288 				uint32_t, uint32_t, uint32_t);
289 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
290 				uint8_t *, uint16_t *);
291 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
292 				uint16_t *);
293 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const);
294 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
295 				const uint16_t *, const uint16_t *, uint8_t,
296 				uint8_t);
297 #ifdef notyet
298 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
299 #endif
300 int	iwm_nvm_init(struct iwm_softc *);
301 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
302 				uint32_t);
303 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
304 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
305 int	iwm_fw_alive(struct iwm_softc *, uint32_t);
306 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
307 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
308 int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
309 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
310 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
311 int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
312 int	iwm_mvm_get_signal_strength(struct iwm_softc *,
313 					struct iwm_rx_phy_info *);
314 void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
315 				struct iwm_rx_data *);
316 int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
317 void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
318 				struct iwm_rx_data *);
319 void	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
320 				struct iwm_node *);
321 void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
322 			struct iwm_rx_data *);
323 int	iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
324 int	iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *, int);
325 int	iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
326 void	iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
327 			struct iwm_phy_context_cmd *, uint32_t, uint32_t);
328 void	iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
329 		struct iwm_phy_context_cmd *, struct ieee80211_channel *,
330 		uint8_t, uint8_t);
331 int	iwm_mvm_phy_ctxt_apply(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
332 				uint8_t, uint8_t, uint32_t, uint32_t);
333 int	iwm_mvm_phy_ctxt_add(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
334 				struct ieee80211_channel *, uint8_t, uint8_t);
335 int	iwm_mvm_phy_ctxt_changed(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
336 				struct ieee80211_channel *, uint8_t, uint8_t);
337 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
338 int	iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t, uint16_t,
339 				const void *);
340 int	iwm_mvm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
341 				uint32_t *);
342 int	iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
343 					uint16_t, const void *, uint32_t *);
344 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
345 void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
346 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
347 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
348 			struct ieee80211_frame *, struct iwm_tx_cmd *);
349 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
350 int	iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
351 					struct iwm_beacon_filter_cmd *);
352 void	iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
353 			struct iwm_node *, struct iwm_beacon_filter_cmd *);
354 int	iwm_mvm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
355 void	iwm_mvm_power_log(struct iwm_softc *, struct iwm_mac_power_cmd *);
356 void	iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
357 				struct iwm_mac_power_cmd *);
358 int	iwm_mvm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
359 int	iwm_mvm_power_update_device(struct iwm_softc *);
360 int	iwm_mvm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
361 int	iwm_mvm_disable_beacon_filter(struct iwm_softc *, struct iwm_node *);
362 void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
363 					struct iwm_mvm_add_sta_cmd_v5 *);
364 int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
365 					struct iwm_mvm_add_sta_cmd_v6 *, int *);
366 int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *, int);
367 int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
368 int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
369 int	iwm_mvm_add_int_sta_common(struct iwm_softc *, struct iwm_int_sta *,
370 				const uint8_t *, uint16_t, uint16_t);
371 int	iwm_mvm_add_aux_sta(struct iwm_softc *);
372 uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
373 uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
374 uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
375 uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
376 uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
377 uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
378 uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
379 int	iwm_mvm_scan_fill_channels(struct iwm_softc *, struct iwm_scan_cmd *,
380 				int, int, int);
381 uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *, struct ieee80211_frame *,
382 	const uint8_t *, int, const uint8_t *, int, const uint8_t *, int, int);
383 int	iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *, int);
384 void	iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
385 void	iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
386 					struct iwm_mac_ctx_cmd *, uint32_t);
387 int	iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *, struct iwm_mac_ctx_cmd *);
388 void	iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
389 					struct iwm_mac_data_sta *, int);
390 int	iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *, struct iwm_node *,
391 					uint32_t);
392 int	iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *, uint32_t);
393 int	iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
394 int	iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
395 int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
396 int	iwm_auth(struct iwm_softc *);
397 int	iwm_assoc(struct iwm_softc *);
398 int	iwm_release(struct iwm_softc *, struct iwm_node *);
399 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
400 void	iwm_calib_timeout(void *);
401 void	iwm_setrates(struct iwm_node *);
402 int	iwm_media_change(struct ifnet *);
403 void	iwm_newstate_cb(void *);
404 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
405 void	iwm_endscan_cb(void *);
406 int	iwm_init_hw(struct iwm_softc *);
407 int	iwm_init(struct ifnet *);
408 void	iwm_start(struct ifnet *);
409 void	iwm_stop(struct ifnet *, int);
410 void	iwm_watchdog(struct ifnet *);
411 int	iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t);
412 const char *iwm_desc_lookup(uint32_t);
413 #ifdef IWM_DEBUG
414 void	iwm_nic_error(struct iwm_softc *);
415 #endif
416 void	iwm_notif_intr(struct iwm_softc *);
417 int	iwm_intr(void *);
418 int	iwm_match(struct device *, void *, void *);
419 int	iwm_preinit(struct iwm_softc *);
420 void	iwm_attach_hook(iwm_hookarg_t);
421 void	iwm_attach(struct device *, struct device *, void *);
422 void	iwm_init_task(void *);
423 int	iwm_activate(struct device *, int);
424 void	iwm_wakeup(struct iwm_softc *);
425 
426 #if NBPFILTER > 0
427 void	iwm_radiotap_attach(struct iwm_softc *);
428 #endif
429 
430 /*
431  * Firmware parser.
432  */
433 
434 int
435 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
436 {
437 	struct iwm_fw_cscheme_list *l = (void *)data;
438 
439 	if (dlen < sizeof(*l) ||
440 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
441 		return EINVAL;
442 
443 	/* we don't actually store anything for now, always use s/w crypto */
444 
445 	return 0;
446 }
447 
448 int
449 iwm_firmware_store_section(struct iwm_softc *sc,
450 	enum iwm_ucode_type type, uint8_t *data, size_t dlen)
451 {
452 	struct iwm_fw_sects *fws;
453 	struct iwm_fw_onesect *fwone;
454 
455 	if (type >= IWM_UCODE_TYPE_MAX)
456 		return EINVAL;
457 	if (dlen < sizeof(uint32_t))
458 		return EINVAL;
459 
460 	fws = &sc->sc_fw.fw_sects[type];
461 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
462 		return EINVAL;
463 
464 	fwone = &fws->fw_sect[fws->fw_count];
465 
466 	/* first 32bit are device load offset */
467 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
468 
469 	/* rest is data */
470 	fwone->fws_data = data + sizeof(uint32_t);
471 	fwone->fws_len = dlen - sizeof(uint32_t);
472 
473 	fws->fw_count++;
474 	fws->fw_totlen += fwone->fws_len;
475 
476 	return 0;
477 }
478 
479 /* iwlwifi: iwl-drv.c */
480 struct iwm_tlv_calib_data {
481 	uint32_t ucode_type;
482 	struct iwm_tlv_calib_ctrl calib;
483 } __packed;
484 
485 int
486 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
487 {
488 	const struct iwm_tlv_calib_data *def_calib = data;
489 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
490 
491 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
492 		DPRINTF(("%s: Wrong ucode_type %u for default "
493 		    "calibration.\n", DEVNAME(sc), ucode_type));
494 		return EINVAL;
495 	}
496 
497 	sc->sc_default_calib[ucode_type].flow_trigger =
498 	    def_calib->calib.flow_trigger;
499 	sc->sc_default_calib[ucode_type].event_trigger =
500 	    def_calib->calib.event_trigger;
501 
502 	return 0;
503 }
504 
505 void
506 iwm_fw_info_free(struct iwm_fw_info *fw)
507 {
508 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
509 	fw->fw_rawdata = NULL;
510 	fw->fw_rawsize = 0;
511 	/* don't touch fw->fw_status */
512 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
513 }
514 
515 int
516 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
517 {
518 	struct iwm_fw_info *fw = &sc->sc_fw;
519 	struct iwm_tlv_ucode_header *uhdr;
520 	struct iwm_ucode_tlv tlv;
521 	enum iwm_ucode_tlv_type tlv_type;
522 	uint8_t *data;
523 	int error;
524 	size_t len;
525 
526 	if (fw->fw_status == IWM_FW_STATUS_DONE &&
527 	    ucode_type != IWM_UCODE_TYPE_INIT)
528 		return 0;
529 
530 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
531 		tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
532 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
533 
534 	if (fw->fw_rawdata != NULL)
535 		iwm_fw_info_free(fw);
536 
537 	/*
538 	 * Load firmware into driver memory.
539 	 * fw_rawdata and fw_rawsize will be set.
540 	 */
541 	error = loadfirmware(sc->sc_fwname,
542 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
543 	if (error != 0) {
544 		printf("%s: could not read firmware %s (error %d)\n",
545 		    DEVNAME(sc), sc->sc_fwname, error);
546 		goto out;
547 	}
548 
549 	/*
550 	 * Parse firmware contents
551 	 */
552 
553 	uhdr = (void *)fw->fw_rawdata;
554 	if (*(uint32_t *)fw->fw_rawdata != 0
555 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
556 		printf("%s: invalid firmware %s\n",
557 		    DEVNAME(sc), sc->sc_fwname);
558 		error = EINVAL;
559 		goto out;
560 	}
561 
562 	sc->sc_fwver = le32toh(uhdr->ver);
563 	data = uhdr->data;
564 	len = fw->fw_rawsize - sizeof(*uhdr);
565 
566 	while (len >= sizeof(tlv)) {
567 		size_t tlv_len;
568 		void *tlv_data;
569 
570 		memcpy(&tlv, data, sizeof(tlv));
571 		tlv_len = le32toh(tlv.length);
572 		tlv_type = le32toh(tlv.type);
573 
574 		len -= sizeof(tlv);
575 		data += sizeof(tlv);
576 		tlv_data = data;
577 
578 		if (len < tlv_len) {
579 			printf("%s: firmware too short: %zu bytes\n",
580 			    DEVNAME(sc), len);
581 			error = EINVAL;
582 			goto parse_out;
583 		}
584 
585 		switch ((int)tlv_type) {
586 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
587 			if (tlv_len < sizeof(uint32_t)) {
588 				error = EINVAL;
589 				goto parse_out;
590 			}
591 			sc->sc_capa_max_probe_len
592 			    = le32toh(*(uint32_t *)tlv_data);
593 			/* limit it to something sensible */
594 			if (sc->sc_capa_max_probe_len > (1<<16)) {
595 				DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
596 				    "ridiculous\n", DEVNAME(sc)));
597 				error = EINVAL;
598 				goto parse_out;
599 			}
600 			break;
601 		case IWM_UCODE_TLV_PAN:
602 			if (tlv_len) {
603 				error = EINVAL;
604 				goto parse_out;
605 			}
606 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
607 			break;
608 		case IWM_UCODE_TLV_FLAGS:
609 			if (tlv_len < sizeof(uint32_t)) {
610 				error = EINVAL;
611 				goto parse_out;
612 			}
613 			/*
614 			 * Apparently there can be many flags, but Linux driver
615 			 * parses only the first one, and so do we.
616 			 *
617 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
618 			 * Intentional or a bug?  Observations from
619 			 * current firmware file:
620 			 *  1) TLV_PAN is parsed first
621 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
622 			 * ==> this resets TLV_PAN to itself... hnnnk
623 			 */
624 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
625 			break;
626 		case IWM_UCODE_TLV_CSCHEME:
627 			if ((error = iwm_store_cscheme(sc,
628 			    tlv_data, tlv_len)) != 0)
629 				goto parse_out;
630 			break;
631 		case IWM_UCODE_TLV_NUM_OF_CPU:
632 			if (tlv_len != sizeof(uint32_t)) {
633 				error = EINVAL;
634 				goto parse_out;
635 			}
636 			if (le32toh(*(uint32_t*)tlv_data) != 1) {
637 				DPRINTF(("%s: driver supports "
638 				    "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
639 				error = EINVAL;
640 				goto parse_out;
641 			}
642 			break;
643 		case IWM_UCODE_TLV_SEC_RT:
644 			if ((error = iwm_firmware_store_section(sc,
645 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
646 				goto parse_out;
647 			break;
648 		case IWM_UCODE_TLV_SEC_INIT:
649 			if ((error = iwm_firmware_store_section(sc,
650 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
651 				goto parse_out;
652 			break;
653 		case IWM_UCODE_TLV_SEC_WOWLAN:
654 			if ((error = iwm_firmware_store_section(sc,
655 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
656 				goto parse_out;
657 			break;
658 		case IWM_UCODE_TLV_DEF_CALIB:
659 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
660 				error = EINVAL;
661 				goto parse_out;
662 			}
663 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
664 				goto parse_out;
665 			break;
666 		case IWM_UCODE_TLV_PHY_SKU:
667 			if (tlv_len != sizeof(uint32_t)) {
668 				error = EINVAL;
669 				goto parse_out;
670 			}
671 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
672 			break;
673 
674 		case IWM_UCODE_TLV_API_CHANGES_SET:
675 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
676 			/* ignore, not used by current driver */
677 			break;
678 
679 		default:
680 			DPRINTF(("%s: unknown firmware section %d, abort\n",
681 			    DEVNAME(sc), tlv_type));
682 			error = EINVAL;
683 			goto parse_out;
684 		}
685 
686 		len -= roundup(tlv_len, 4);
687 		data += roundup(tlv_len, 4);
688 	}
689 
690 	KASSERT(error == 0);
691 
692  parse_out:
693 	if (error) {
694 		printf("%s: firmware parse error %d, "
695 		    "section type %d\n", DEVNAME(sc), error, tlv_type);
696 	}
697 
698 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
699 		printf("%s: device uses unsupported power ops\n", DEVNAME(sc));
700 		error = ENOTSUP;
701 	}
702 
703  out:
704 	if (error) {
705 		fw->fw_status = IWM_FW_STATUS_NONE;
706 		if (fw->fw_rawdata != NULL)
707 			iwm_fw_info_free(fw);
708 	} else
709 		fw->fw_status = IWM_FW_STATUS_DONE;
710 	wakeup(&sc->sc_fw);
711 
712 	return error;
713 }
714 
715 /*
716  * basic device access
717  */
718 
719 uint32_t
720 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
721 {
722 	IWM_WRITE(sc,
723 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
724 	IWM_BARRIER_READ_WRITE(sc);
725 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
726 }
727 
728 void
729 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
730 {
731 	IWM_WRITE(sc,
732 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
733 	IWM_BARRIER_WRITE(sc);
734 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
735 }
736 
737 /* iwlwifi: pcie/trans.c */
738 int
739 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
740 {
741 	int offs, ret = 0;
742 	uint32_t *vals = buf;
743 
744 	if (iwm_nic_lock(sc)) {
745 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
746 		for (offs = 0; offs < dwords; offs++)
747 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
748 		iwm_nic_unlock(sc);
749 	} else {
750 		ret = EBUSY;
751 	}
752 	return ret;
753 }
754 
755 /* iwlwifi: pcie/trans.c */
756 int
757 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
758 {
759 	int offs;
760 	const uint32_t *vals = buf;
761 
762 	if (iwm_nic_lock(sc)) {
763 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
764 		/* WADDR auto-increments */
765 		for (offs = 0; offs < dwords; offs++) {
766 			uint32_t val = vals ? vals[offs] : 0;
767 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
768 		}
769 		iwm_nic_unlock(sc);
770 	} else {
771 		DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
772 		return EBUSY;
773 	}
774 	return 0;
775 }
776 
777 int
778 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
779 {
780 	return iwm_write_mem(sc, addr, &val, 1);
781 }
782 
783 int
784 iwm_poll_bit(struct iwm_softc *sc, int reg,
785 	uint32_t bits, uint32_t mask, int timo)
786 {
787 	for (;;) {
788 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
789 			return 1;
790 		}
791 		if (timo < 10) {
792 			return 0;
793 		}
794 		timo -= 10;
795 		DELAY(10);
796 	}
797 }
798 
799 int
800 iwm_nic_lock(struct iwm_softc *sc)
801 {
802 	int rv = 0;
803 
804 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
805 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
806 
807 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
808 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
809 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
810 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
811 	    	rv = 1;
812 	} else {
813 		/* jolt */
814 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
815 	}
816 
817 	return rv;
818 }
819 
820 void
821 iwm_nic_unlock(struct iwm_softc *sc)
822 {
823 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
824 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
825 }
826 
827 void
828 iwm_set_bits_mask_prph(struct iwm_softc *sc,
829 	uint32_t reg, uint32_t bits, uint32_t mask)
830 {
831 	uint32_t val;
832 
833 	/* XXX: no error path? */
834 	if (iwm_nic_lock(sc)) {
835 		val = iwm_read_prph(sc, reg) & mask;
836 		val |= bits;
837 		iwm_write_prph(sc, reg, val);
838 		iwm_nic_unlock(sc);
839 	}
840 }
841 
842 void
843 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
844 {
845 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
846 }
847 
848 void
849 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
850 {
851 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
852 }
853 
854 /*
855  * DMA resource routines
856  */
857 
858 int
859 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
860     bus_size_t size, bus_size_t alignment)
861 {
862 	int nsegs, error;
863 	caddr_t va;
864 
865 	dma->tag = tag;
866 	dma->size = size;
867 
868 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
869 	    &dma->map);
870 	if (error != 0)
871 		goto fail;
872 
873 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
874 	    BUS_DMA_NOWAIT);
875 	if (error != 0)
876 		goto fail;
877 
878 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
879 	    BUS_DMA_NOWAIT);
880 	if (error != 0)
881 		goto fail;
882 	dma->vaddr = va;
883 
884 	error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
885 	    BUS_DMA_NOWAIT);
886 	if (error != 0)
887 		goto fail;
888 
889 	memset(dma->vaddr, 0, size);
890 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
891 	dma->paddr = dma->map->dm_segs[0].ds_addr;
892 
893 	return 0;
894 
895 fail:	iwm_dma_contig_free(dma);
896 	return error;
897 }
898 
899 void
900 iwm_dma_contig_free(struct iwm_dma_info *dma)
901 {
902 	if (dma->map != NULL) {
903 		if (dma->vaddr != NULL) {
904 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
905 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
906 			bus_dmamap_unload(dma->tag, dma->map);
907 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
908 			bus_dmamem_free(dma->tag, &dma->seg, 1);
909 			dma->vaddr = NULL;
910 		}
911 		bus_dmamap_destroy(dma->tag, dma->map);
912 		dma->map = NULL;
913 	}
914 }
915 
916 /* fwmem is used to load firmware onto the card */
917 int
918 iwm_alloc_fwmem(struct iwm_softc *sc)
919 {
920 	/* Must be aligned on a 16-byte boundary. */
921 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
922 	    sc->sc_fwdmasegsz, 16);
923 }
924 
925 void
926 iwm_free_fwmem(struct iwm_softc *sc)
927 {
928 	iwm_dma_contig_free(&sc->fw_dma);
929 }
930 
931 /* tx scheduler rings.  not used? */
932 int
933 iwm_alloc_sched(struct iwm_softc *sc)
934 {
935 	int rv;
936 
937 	/* TX scheduler rings must be aligned on a 1KB boundary. */
938 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
939 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
940 	return rv;
941 }
942 
943 void
944 iwm_free_sched(struct iwm_softc *sc)
945 {
946 	iwm_dma_contig_free(&sc->sched_dma);
947 }
948 
949 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
950 int
951 iwm_alloc_kw(struct iwm_softc *sc)
952 {
953 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
954 }
955 
956 void
957 iwm_free_kw(struct iwm_softc *sc)
958 {
959 	iwm_dma_contig_free(&sc->kw_dma);
960 }
961 
962 /* interrupt cause table */
963 int
964 iwm_alloc_ict(struct iwm_softc *sc)
965 {
966 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
967 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
968 }
969 
970 void
971 iwm_free_ict(struct iwm_softc *sc)
972 {
973 	iwm_dma_contig_free(&sc->ict_dma);
974 }
975 
976 int
977 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
978 {
979 	bus_size_t size;
980 	int i, error;
981 
982 	ring->cur = 0;
983 
984 	/* Allocate RX descriptors (256-byte aligned). */
985 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
986 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
987 	if (error != 0) {
988 		printf("%s: could not allocate RX ring DMA memory\n",
989 		    DEVNAME(sc));
990 		goto fail;
991 	}
992 	ring->desc = ring->desc_dma.vaddr;
993 
994 	/* Allocate RX status area (16-byte aligned). */
995 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
996 	    sizeof(*ring->stat), 16);
997 	if (error != 0) {
998 		printf("%s: could not allocate RX status DMA memory\n",
999 		    DEVNAME(sc));
1000 		goto fail;
1001 	}
1002 	ring->stat = ring->stat_dma.vaddr;
1003 
1004 	/*
1005 	 * Allocate and map RX buffers.
1006 	 */
1007 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1008 		struct iwm_rx_data *data = &ring->data[i];
1009 
1010 		memset(data, 0, sizeof(*data));
1011 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1012 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1013 		    &data->map);
1014 		if (error != 0) {
1015 			printf("%s: could not create RX buf DMA map\n",
1016 			    DEVNAME(sc));
1017 			goto fail;
1018 		}
1019 
1020 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1021 			goto fail;
1022 		}
1023 	}
1024 	return 0;
1025 
1026 fail:	iwm_free_rx_ring(sc, ring);
1027 	return error;
1028 }
1029 
1030 void
1031 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1032 {
1033 	int ntries;
1034 
1035 	if (iwm_nic_lock(sc)) {
1036 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1037 		for (ntries = 0; ntries < 1000; ntries++) {
1038 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1039 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1040 				break;
1041 			DELAY(10);
1042 		}
1043 		iwm_nic_unlock(sc);
1044 	}
1045 	ring->cur = 0;
1046 }
1047 
1048 void
1049 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1050 {
1051 	int i;
1052 
1053 	iwm_dma_contig_free(&ring->desc_dma);
1054 	iwm_dma_contig_free(&ring->stat_dma);
1055 
1056 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1057 		struct iwm_rx_data *data = &ring->data[i];
1058 
1059 		if (data->m != NULL) {
1060 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1061 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1062 			bus_dmamap_unload(sc->sc_dmat, data->map);
1063 			m_freem(data->m);
1064 		}
1065 		if (data->map != NULL)
1066 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1067 	}
1068 }
1069 
1070 int
1071 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1072 {
1073 	bus_addr_t paddr;
1074 	bus_size_t size;
1075 	int i, error;
1076 
1077 	ring->qid = qid;
1078 	ring->queued = 0;
1079 	ring->cur = 0;
1080 
1081 	/* Allocate TX descriptors (256-byte aligned). */
1082 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1083 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1084 	if (error != 0) {
1085 		printf("%s: could not allocate TX ring DMA memory\n",
1086 		    DEVNAME(sc));
1087 		goto fail;
1088 	}
1089 	ring->desc = ring->desc_dma.vaddr;
1090 
1091 	/*
1092 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1093 	 * to allocate commands space for other rings.
1094 	 */
1095 	if (qid > IWM_MVM_CMD_QUEUE)
1096 		return 0;
1097 
1098 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1099 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1100 	if (error != 0) {
1101 		printf("%s: could not allocate TX cmd DMA memory\n", DEVNAME(sc));
1102 		goto fail;
1103 	}
1104 	ring->cmd = ring->cmd_dma.vaddr;
1105 
1106 	paddr = ring->cmd_dma.paddr;
1107 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1108 		struct iwm_tx_data *data = &ring->data[i];
1109 
1110 		data->cmd_paddr = paddr;
1111 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1112 		    + offsetof(struct iwm_tx_cmd, scratch);
1113 		paddr += sizeof(struct iwm_device_cmd);
1114 
1115 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1116 		    IWM_NUM_OF_TBS, MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1117 		if (error != 0) {
1118 			printf("%s: could not create TX buf DMA map\n", DEVNAME(sc));
1119 			goto fail;
1120 		}
1121 	}
1122 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1123 	return 0;
1124 
1125 fail:	iwm_free_tx_ring(sc, ring);
1126 	return error;
1127 }
1128 
1129 void
1130 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1131 {
1132 	int i;
1133 
1134 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1135 		struct iwm_tx_data *data = &ring->data[i];
1136 
1137 		if (data->m != NULL) {
1138 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1139 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1140 			bus_dmamap_unload(sc->sc_dmat, data->map);
1141 			m_freem(data->m);
1142 			data->m = NULL;
1143 		}
1144 	}
1145 	/* Clear TX descriptors. */
1146 	memset(ring->desc, 0, ring->desc_dma.size);
1147 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1148 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1149 	sc->qfullmsk &= ~(1 << ring->qid);
1150 	ring->queued = 0;
1151 	ring->cur = 0;
1152 }
1153 
1154 void
1155 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1156 {
1157 	int i;
1158 
1159 	iwm_dma_contig_free(&ring->desc_dma);
1160 	iwm_dma_contig_free(&ring->cmd_dma);
1161 
1162 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1163 		struct iwm_tx_data *data = &ring->data[i];
1164 
1165 		if (data->m != NULL) {
1166 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1167 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1168 			bus_dmamap_unload(sc->sc_dmat, data->map);
1169 			m_freem(data->m);
1170 		}
1171 		if (data->map != NULL)
1172 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1173 	}
1174 }
1175 
1176 /*
1177  * High-level hardware frobbing routines
1178  */
1179 
1180 void
1181 iwm_enable_rfkill_int(struct iwm_softc *sc)
1182 {
1183 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1184 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1185 }
1186 
1187 int
1188 iwm_check_rfkill(struct iwm_softc *sc)
1189 {
1190 	uint32_t v;
1191 	int s;
1192 	int rv;
1193 
1194 	s = splnet();
1195 
1196 	/*
1197 	 * "documentation" is not really helpful here:
1198 	 *  27:	HW_RF_KILL_SW
1199 	 *	Indicates state of (platform's) hardware RF-Kill switch
1200 	 *
1201 	 * But apparently when it's off, it's on ...
1202 	 */
1203 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1204 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1205 	if (rv) {
1206 		sc->sc_flags |= IWM_FLAG_RFKILL;
1207 	} else {
1208 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1209 	}
1210 
1211 	splx(s);
1212 	return rv;
1213 }
1214 
1215 void
1216 iwm_enable_interrupts(struct iwm_softc *sc)
1217 {
1218 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1219 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1220 }
1221 
1222 void
1223 iwm_restore_interrupts(struct iwm_softc *sc)
1224 {
1225 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1226 }
1227 
1228 void
1229 iwm_disable_interrupts(struct iwm_softc *sc)
1230 {
1231 	int s = splnet();
1232 
1233 	/* disable interrupts */
1234 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1235 
1236 	/* acknowledge all interrupts */
1237 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1238 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1239 
1240 	splx(s);
1241 }
1242 
1243 void
1244 iwm_ict_reset(struct iwm_softc *sc)
1245 {
1246 	iwm_disable_interrupts(sc);
1247 
1248 	/* Reset ICT table. */
1249 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1250 	sc->ict_cur = 0;
1251 
1252 	/* Set physical address of ICT table (4KB aligned). */
1253 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1254 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1255 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1256 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1257 
1258 	/* Switch to ICT interrupt mode in driver. */
1259 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1260 
1261 	/* Re-enable interrupts. */
1262 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1263 	iwm_enable_interrupts(sc);
1264 }
1265 
1266 #define IWM_HW_READY_TIMEOUT 50
1267 int
1268 iwm_set_hw_ready(struct iwm_softc *sc)
1269 {
1270 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1271 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1272 
1273 	return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1274 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1275 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1276 	    IWM_HW_READY_TIMEOUT);
1277 }
1278 #undef IWM_HW_READY_TIMEOUT
1279 
1280 int
1281 iwm_prepare_card_hw(struct iwm_softc *sc)
1282 {
1283 	int rv = 0;
1284 	int t = 0;
1285 
1286 	if (iwm_set_hw_ready(sc))
1287 		goto out;
1288 
1289 	/* If HW is not ready, prepare the conditions to check again */
1290 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1291 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1292 
1293 	do {
1294 		if (iwm_set_hw_ready(sc))
1295 			goto out;
1296 		DELAY(200);
1297 		t += 200;
1298 	} while (t < 150000);
1299 
1300 	rv = ETIMEDOUT;
1301 
1302  out:
1303 	return rv;
1304 }
1305 
1306 void
1307 iwm_apm_config(struct iwm_softc *sc)
1308 {
1309 	pcireg_t reg;
1310 
1311 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1312 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1313 	if (reg & PCI_PCIE_LCSR_ASPM_L1) {
1314 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1315 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1316 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1317 	} else {
1318 		/* ... and "Enabling" here */
1319 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1320 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1321 	}
1322 }
1323 
1324 /*
1325  * Start up NIC's basic functionality after it has been reset
1326  * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
1327  * NOTE:  This does not load uCode nor start the embedded processor
1328  */
1329 int
1330 iwm_apm_init(struct iwm_softc *sc)
1331 {
1332 	int error = 0;
1333 
1334 	DPRINTF(("iwm apm start\n"));
1335 
1336 	/* Disable L0S exit timer (platform NMI Work/Around) */
1337 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1338 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1339 
1340 	/*
1341 	 * Disable L0s without affecting L1;
1342 	 *  don't wait for ICH L0s (ICH bug W/A)
1343 	 */
1344 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1345 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1346 
1347 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1348 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1349 
1350 	/*
1351 	 * Enable HAP INTA (interrupt from management bus) to
1352 	 * wake device's PCI Express link L1a -> L0s
1353 	 */
1354 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1355 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1356 
1357 	iwm_apm_config(sc);
1358 
1359 #if 0 /* not for 7k */
1360 	/* Configure analog phase-lock-loop before activating to D0A */
1361 	if (trans->cfg->base_params->pll_cfg_val)
1362 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1363 		    trans->cfg->base_params->pll_cfg_val);
1364 #endif
1365 
1366 	/*
1367 	 * Set "initialization complete" bit to move adapter from
1368 	 * D0U* --> D0A* (powered-up active) state.
1369 	 */
1370 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1371 
1372 	/*
1373 	 * Wait for clock stabilization; once stabilized, access to
1374 	 * device-internal resources is supported, e.g. iwm_write_prph()
1375 	 * and accesses to uCode SRAM.
1376 	 */
1377 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1378 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1379 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1380 		printf("%s: timeout waiting for clock stabilization\n",
1381 		    DEVNAME(sc));
1382 		goto out;
1383 	}
1384 
1385 	if (sc->host_interrupt_operation_mode) {
1386 		/*
1387 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1388 		 * only check host_interrupt_operation_mode even if this is
1389 		 * not related to host_interrupt_operation_mode.
1390 		 *
1391 		 * Enable the oscillator to count wake up time for L1 exit. This
1392 		 * consumes slightly more power (100uA) - but allows to be sure
1393 		 * that we wake up from L1 on time.
1394 		 *
1395 		 * This looks weird: read twice the same register, discard the
1396 		 * value, set a bit, and yet again, read that same register
1397 		 * just to discard the value. But that's the way the hardware
1398 		 * seems to like it.
1399 		 */
1400 		iwm_read_prph(sc, IWM_OSC_CLK);
1401 		iwm_read_prph(sc, IWM_OSC_CLK);
1402 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1403 		iwm_read_prph(sc, IWM_OSC_CLK);
1404 		iwm_read_prph(sc, IWM_OSC_CLK);
1405 	}
1406 
1407 	/*
1408 	 * Enable DMA clock and wait for it to stabilize.
1409 	 *
1410 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1411 	 * do not disable clocks.  This preserves any hardware bits already
1412 	 * set by default in "CLK_CTRL_REG" after reset.
1413 	 */
1414 	iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1415 	//kpause("iwmapm", 0, mstohz(20), NULL);
1416 	DELAY(20);
1417 
1418 	/* Disable L1-Active */
1419 	iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1420 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1421 
1422 	/* Clear the interrupt in APMG if the NIC is in RFKILL */
1423 	iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1424 	    IWM_APMG_RTC_INT_STT_RFKILL);
1425 
1426  out:
1427 	if (error)
1428 		printf("%s: apm init error %d\n", DEVNAME(sc), error);
1429 	return error;
1430 }
1431 
1432 /* iwlwifi/pcie/trans.c */
1433 void
1434 iwm_apm_stop(struct iwm_softc *sc)
1435 {
1436 	/* stop device's busmaster DMA activity */
1437 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1438 
1439 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1440 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1441 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1442 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1443         DPRINTF(("iwm apm stop\n"));
1444 }
1445 
1446 /* iwlwifi pcie/trans.c */
1447 int
1448 iwm_start_hw(struct iwm_softc *sc)
1449 {
1450 	int error;
1451 
1452 	if ((error = iwm_prepare_card_hw(sc)) != 0)
1453 		return error;
1454 
1455 	/* Reset the entire device */
1456 	IWM_WRITE(sc, IWM_CSR_RESET,
1457 	    IWM_CSR_RESET_REG_FLAG_SW_RESET |
1458 	    IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1459 	DELAY(10);
1460 
1461 	if ((error = iwm_apm_init(sc)) != 0)
1462 		return error;
1463 
1464 	iwm_enable_rfkill_int(sc);
1465 	iwm_check_rfkill(sc);
1466 
1467 	return 0;
1468 }
1469 
1470 /* iwlwifi pcie/trans.c */
1471 
1472 void
1473 iwm_stop_device(struct iwm_softc *sc)
1474 {
1475 	int chnl, ntries;
1476 	int qid;
1477 
1478 	/* tell the device to stop sending interrupts */
1479 	iwm_disable_interrupts(sc);
1480 
1481 	/* device going down, Stop using ICT table */
1482 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1483 
1484 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1485 
1486 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1487 
1488 	/* Stop all DMA channels. */
1489 	if (iwm_nic_lock(sc)) {
1490 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1491 			IWM_WRITE(sc,
1492 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1493 			for (ntries = 0; ntries < 200; ntries++) {
1494 				uint32_t r;
1495 
1496 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1497 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1498 				    chnl))
1499 					break;
1500 				DELAY(20);
1501 			}
1502 		}
1503 		iwm_nic_unlock(sc);
1504 	}
1505 
1506 	/* Stop RX ring. */
1507 	iwm_reset_rx_ring(sc, &sc->rxq);
1508 
1509 	/* Reset all TX rings. */
1510 	for (qid = 0; qid < nitems(sc->txq); qid++)
1511 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1512 
1513 	/*
1514 	 * Power-down device's busmaster DMA clocks
1515 	 */
1516 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1517 	DELAY(5);
1518 
1519 	/* Make sure (redundant) we've released our request to stay awake */
1520 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1521 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1522 
1523 	/* Stop the device, and put it in low power state */
1524 	iwm_apm_stop(sc);
1525 
1526 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1527 	 * Clean again the interrupt here
1528 	 */
1529 	iwm_disable_interrupts(sc);
1530 	/* stop and reset the on-board processor */
1531 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1532 
1533 	/*
1534 	 * Even if we stop the HW, we still want the RF kill
1535 	 * interrupt
1536 	 */
1537 	iwm_enable_rfkill_int(sc);
1538 	iwm_check_rfkill(sc);
1539 }
1540 
1541 /* iwlwifi pcie/trans.c (always main power) */
1542 void
1543 iwm_set_pwr(struct iwm_softc *sc)
1544 {
1545 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1546 	    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1547 }
1548 
1549 /* iwlwifi: mvm/ops.c */
1550 void
1551 iwm_mvm_nic_config(struct iwm_softc *sc)
1552 {
1553 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1554 	uint32_t reg_val = 0;
1555 
1556 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1557 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1558 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1559 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1560 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1561 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1562 
1563 	/* SKU control */
1564 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1565 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1566 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1567 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1568 
1569 	/* radio configuration */
1570 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1571 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1572 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1573 
1574 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1575 
1576 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1577 	    radio_cfg_step, radio_cfg_dash));
1578 
1579 	/*
1580 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1581 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1582 	 * to lose ownership and not being able to obtain it back.
1583 	 */
1584 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1585 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1586 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1587 }
1588 
1589 int
1590 iwm_nic_rx_init(struct iwm_softc *sc)
1591 {
1592 	if (!iwm_nic_lock(sc))
1593 		return EBUSY;
1594 
1595 	/*
1596 	 * Initialize RX ring.  This is from the iwn driver.
1597 	 */
1598 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1599 
1600 	/* stop DMA */
1601 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1602 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1603 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1604 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1605 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1606 
1607 	/* Set physical address of RX ring (256-byte aligned). */
1608 	IWM_WRITE(sc,
1609 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1610 
1611 	/* Set physical address of RX status (16-byte aligned). */
1612 	IWM_WRITE(sc,
1613 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1614 
1615 	/* Enable RX. */
1616 	/*
1617 	 * Note: Linux driver also sets this:
1618 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1619 	 *
1620 	 * It causes weird behavior.  YMMV.
1621 	 */
1622 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1623 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1624 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1625 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1626 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1627 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1628 
1629 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1630 
1631 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1632 	if (sc->host_interrupt_operation_mode)
1633 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1634 
1635 	/*
1636 	 * Thus sayeth el jefe (iwlwifi) via a comment:
1637 	 *
1638 	 * This value should initially be 0 (before preparing any
1639  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1640 	 */
1641 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1642 
1643 	iwm_nic_unlock(sc);
1644 
1645 	return 0;
1646 }
1647 
1648 int
1649 iwm_nic_tx_init(struct iwm_softc *sc)
1650 {
1651 	int qid;
1652 
1653 	if (!iwm_nic_lock(sc))
1654 		return EBUSY;
1655 
1656 	/* Deactivate TX scheduler. */
1657 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1658 
1659 	/* Set physical address of "keep warm" page (16-byte aligned). */
1660 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1661 
1662 	/* Initialize TX rings. */
1663 	for (qid = 0; qid < nitems(sc->txq); qid++) {
1664 		struct iwm_tx_ring *txq = &sc->txq[qid];
1665 
1666 		/* Set physical address of TX ring (256-byte aligned). */
1667 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1668 		    txq->desc_dma.paddr >> 8);
1669 		DPRINTF(("loading ring %d descriptors (%p) at %lx\n",
1670 		    qid, txq->desc, txq->desc_dma.paddr >> 8));
1671 	}
1672 	iwm_nic_unlock(sc);
1673 
1674 	return 0;
1675 }
1676 
1677 int
1678 iwm_nic_init(struct iwm_softc *sc)
1679 {
1680 	int error;
1681 
1682 	iwm_apm_init(sc);
1683 	iwm_set_pwr(sc);
1684 
1685 	iwm_mvm_nic_config(sc);
1686 
1687 	if ((error = iwm_nic_rx_init(sc)) != 0)
1688 		return error;
1689 
1690 	/*
1691 	 * Ditto for TX, from iwn
1692 	 */
1693 	if ((error = iwm_nic_tx_init(sc)) != 0)
1694 		return error;
1695 
1696 	DPRINTF(("shadow registers enabled\n"));
1697 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1698 
1699 	return 0;
1700 }
1701 
1702 enum iwm_mvm_tx_fifo {
1703 	IWM_MVM_TX_FIFO_BK = 0,
1704 	IWM_MVM_TX_FIFO_BE,
1705 	IWM_MVM_TX_FIFO_VI,
1706 	IWM_MVM_TX_FIFO_VO,
1707 	IWM_MVM_TX_FIFO_MCAST = 5,
1708 };
1709 
1710 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1711 	IWM_MVM_TX_FIFO_VO,
1712 	IWM_MVM_TX_FIFO_VI,
1713 	IWM_MVM_TX_FIFO_BE,
1714 	IWM_MVM_TX_FIFO_BK,
1715 };
1716 
1717 void
1718 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1719 {
1720 	if (!iwm_nic_lock(sc)) {
1721 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1722 		return; /* XXX return EBUSY */
1723 	}
1724 
1725 	/* unactivate before configuration */
1726 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1727 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1728 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1729 
1730 	if (qid != IWM_MVM_CMD_QUEUE) {
1731 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1732 	}
1733 
1734 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1735 
1736 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1737 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1738 
1739 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1740 	/* Set scheduler window size and frame limit. */
1741 	iwm_write_mem32(sc,
1742 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1743 	    sizeof(uint32_t),
1744 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1745 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1746 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1747 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1748 
1749 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1750 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1751 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1752 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1753 	    IWM_SCD_QUEUE_STTS_REG_MSK);
1754 
1755 	iwm_nic_unlock(sc);
1756 
1757 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1758 }
1759 
1760 int
1761 iwm_post_alive(struct iwm_softc *sc)
1762 {
1763 	int nwords;
1764 	int error, chnl;
1765 
1766 	if (!iwm_nic_lock(sc))
1767 		return EBUSY;
1768 
1769 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1770 		DPRINTF(("%s: sched addr mismatch", DEVNAME(sc)));
1771 		error = EINVAL;
1772 		goto out;
1773 	}
1774 
1775 	iwm_ict_reset(sc);
1776 
1777 	/* Clear TX scheduler state in SRAM. */
1778 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1779 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1780 	    / sizeof(uint32_t);
1781 	error = iwm_write_mem(sc,
1782 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1783 	    NULL, nwords);
1784 	if (error)
1785 		goto out;
1786 
1787 	/* Set physical address of TX scheduler rings (1KB aligned). */
1788 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1789 
1790 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1791 
1792 	/* enable command channel */
1793 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1794 
1795 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1796 
1797 	/* Enable DMA channels. */
1798 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1799 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1800 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1801 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1802 	}
1803 
1804 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1805 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1806 
1807 	/* Enable L1-Active */
1808 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1809 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1810 
1811  out:
1812  	iwm_nic_unlock(sc);
1813 	return error;
1814 }
1815 
1816 /*
1817  * PHY db
1818  * iwlwifi/iwl-phy-db.c
1819  */
1820 
1821 /*
1822  * BEGIN iwl-phy-db.c
1823  */
1824 
1825 enum iwm_phy_db_section_type {
1826 	IWM_PHY_DB_CFG = 1,
1827 	IWM_PHY_DB_CALIB_NCH,
1828 	IWM_PHY_DB_UNUSED,
1829 	IWM_PHY_DB_CALIB_CHG_PAPD,
1830 	IWM_PHY_DB_CALIB_CHG_TXP,
1831 	IWM_PHY_DB_MAX
1832 };
1833 
1834 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
1835 
1836 /*
1837  * phy db - configure operational ucode
1838  */
1839 struct iwm_phy_db_cmd {
1840 	uint16_t type;
1841 	uint16_t length;
1842 	uint8_t data[];
1843 } __packed;
1844 
1845 /* for parsing of tx power channel group data that comes from the firmware*/
1846 struct iwm_phy_db_chg_txp {
1847 	uint32_t space;
1848 	uint16_t max_channel_idx;
1849 } __packed;
1850 
1851 /*
1852  * phy db - Receive phy db chunk after calibrations
1853  */
1854 struct iwm_calib_res_notif_phy_db {
1855 	uint16_t type;
1856 	uint16_t length;
1857 	uint8_t data[];
1858 } __packed;
1859 
1860 /*
1861  * get phy db section: returns a pointer to a phy db section specified by
1862  * type and channel group id.
1863  */
1864 struct iwm_phy_db_entry *
1865 iwm_phy_db_get_section(struct iwm_softc *sc,
1866 	enum iwm_phy_db_section_type type, uint16_t chg_id)
1867 {
1868 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1869 
1870 	if (type >= IWM_PHY_DB_MAX)
1871 		return NULL;
1872 
1873 	switch (type) {
1874 	case IWM_PHY_DB_CFG:
1875 		return &phy_db->cfg;
1876 	case IWM_PHY_DB_CALIB_NCH:
1877 		return &phy_db->calib_nch;
1878 	case IWM_PHY_DB_CALIB_CHG_PAPD:
1879 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
1880 			return NULL;
1881 		return &phy_db->calib_ch_group_papd[chg_id];
1882 	case IWM_PHY_DB_CALIB_CHG_TXP:
1883 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
1884 			return NULL;
1885 		return &phy_db->calib_ch_group_txp[chg_id];
1886 	default:
1887 		return NULL;
1888 	}
1889 	return NULL;
1890 }
1891 
1892 int
1893 iwm_phy_db_set_section(struct iwm_softc *sc,
1894 	struct iwm_calib_res_notif_phy_db *phy_db_notif)
1895 {
1896 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
1897 	uint16_t size  = le16toh(phy_db_notif->length);
1898 	struct iwm_phy_db_entry *entry;
1899 	uint16_t chg_id = 0;
1900 
1901 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
1902 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
1903 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
1904 
1905 	entry = iwm_phy_db_get_section(sc, type, chg_id);
1906 	if (!entry)
1907 		return EINVAL;
1908 
1909 	if (entry->data)
1910 		free(entry->data, M_DEVBUF, entry->size);
1911 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
1912 	if (!entry->data) {
1913 		entry->size = 0;
1914 		return ENOMEM;
1915 	}
1916 	memcpy(entry->data, phy_db_notif->data, size);
1917 	entry->size = size;
1918 
1919 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d , Size: %d, data: %p\n",
1920 	    __func__, __LINE__, type, size, entry->data));
1921 
1922 	return 0;
1923 }
1924 
1925 int
1926 iwm_is_valid_channel(uint16_t ch_id)
1927 {
1928 	if (ch_id <= 14 ||
1929 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
1930 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
1931 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
1932 		return 1;
1933 	return 0;
1934 }
1935 
1936 uint8_t
1937 iwm_ch_id_to_ch_index(uint16_t ch_id)
1938 {
1939 	if (!iwm_is_valid_channel(ch_id))
1940 		return 0xff;
1941 
1942 	if (ch_id <= 14)
1943 		return ch_id - 1;
1944 	if (ch_id <= 64)
1945 		return (ch_id + 20) / 4;
1946 	if (ch_id <= 140)
1947 		return (ch_id - 12) / 4;
1948 	return (ch_id - 13) / 4;
1949 }
1950 
1951 
1952 uint16_t
1953 iwm_channel_id_to_papd(uint16_t ch_id)
1954 {
1955 	if (!iwm_is_valid_channel(ch_id))
1956 		return 0xff;
1957 
1958 	if (1 <= ch_id && ch_id <= 14)
1959 		return 0;
1960 	if (36 <= ch_id && ch_id <= 64)
1961 		return 1;
1962 	if (100 <= ch_id && ch_id <= 140)
1963 		return 2;
1964 	return 3;
1965 }
1966 
1967 uint16_t
1968 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
1969 {
1970 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1971 	struct iwm_phy_db_chg_txp *txp_chg;
1972 	int i;
1973 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
1974 
1975 	if (ch_index == 0xff)
1976 		return 0xff;
1977 
1978 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
1979 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
1980 		if (!txp_chg)
1981 			return 0xff;
1982 		/*
1983 		 * Looking for the first channel group that its max channel is
1984 		 * higher then wanted channel.
1985 		 */
1986 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
1987 			return i;
1988 	}
1989 	return 0xff;
1990 }
1991 
1992 int
1993 iwm_phy_db_get_section_data(struct iwm_softc *sc,
1994 	uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
1995 {
1996 	struct iwm_phy_db_entry *entry;
1997 	uint16_t ch_group_id = 0;
1998 
1999 	/* find wanted channel group */
2000 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2001 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2002 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2003 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2004 
2005 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2006 	if (!entry)
2007 		return EINVAL;
2008 
2009 	*data = entry->data;
2010 	*size = entry->size;
2011 
2012 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2013 		       __func__, __LINE__, type, *size));
2014 
2015 	return 0;
2016 }
2017 
2018 int
2019 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
2020 	uint16_t length, void *data)
2021 {
2022 	struct iwm_phy_db_cmd phy_db_cmd;
2023 	struct iwm_host_cmd cmd = {
2024 		.id = IWM_PHY_DB_CMD,
2025 		.flags = IWM_CMD_SYNC,
2026 	};
2027 
2028 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n", type, length));
2029 
2030 	/* Set phy db cmd variables */
2031 	phy_db_cmd.type = le16toh(type);
2032 	phy_db_cmd.length = le16toh(length);
2033 
2034 	/* Set hcmd variables */
2035 	cmd.data[0] = &phy_db_cmd;
2036 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2037 	cmd.data[1] = data;
2038 	cmd.len[1] = length;
2039 	cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
2040 
2041 	return iwm_send_cmd(sc, &cmd);
2042 }
2043 
2044 int
2045 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2046 	enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2047 {
2048 	uint16_t i;
2049 	int err;
2050 	struct iwm_phy_db_entry *entry;
2051 
2052 	/* Send all the channel-specific groups to operational fw */
2053 	for (i = 0; i < max_ch_groups; i++) {
2054 		entry = iwm_phy_db_get_section(sc, type, i);
2055 		if (!entry)
2056 			return EINVAL;
2057 
2058 		if (!entry->size)
2059 			continue;
2060 
2061 		/* Send the requested PHY DB section */
2062 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2063 		if (err) {
2064 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2065 			    "err %d\n", DEVNAME(sc), type, i, err));
2066 			return err;
2067 		}
2068 
2069 		DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
2070 	}
2071 
2072 	return 0;
2073 }
2074 
2075 int
2076 iwm_send_phy_db_data(struct iwm_softc *sc)
2077 {
2078 	uint8_t *data = NULL;
2079 	uint16_t size = 0;
2080 	int err;
2081 
2082 	DPRINTF(("Sending phy db data and configuration to runtime image\n"));
2083 
2084 	/* Send PHY DB CFG section */
2085 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2086 	if (err) {
2087 		DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
2088 		    DEVNAME(sc), err));
2089 		return err;
2090 	}
2091 
2092 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2093 	if (err) {
2094 		DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
2095 		    DEVNAME(sc), err));
2096 		return err;
2097 	}
2098 
2099 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2100 	    &data, &size, 0);
2101 	if (err) {
2102 		DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
2103 		    "%d\n", DEVNAME(sc), err));
2104 		return err;
2105 	}
2106 
2107 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2108 	if (err) {
2109 		DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
2110 		    "sect, %d\n", DEVNAME(sc), err));
2111 		return err;
2112 	}
2113 
2114 	/* Send all the TXP channel specific data */
2115 	err = iwm_phy_db_send_all_channel_groups(sc,
2116 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2117 	if (err) {
2118 		DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
2119 		    DEVNAME(sc), err));
2120 		return err;
2121 	}
2122 
2123 	/* Send all the TXP channel specific data */
2124 	err = iwm_phy_db_send_all_channel_groups(sc,
2125 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2126 	if (err) {
2127 		DPRINTF(("%s: Cannot send channel specific TX power groups, "
2128 		    "%d\n", DEVNAME(sc), err));
2129 		return err;
2130 	}
2131 
2132 	DPRINTF(("Finished sending phy db non channel data\n"));
2133 	return 0;
2134 }
2135 
2136 /*
2137  * END iwl-phy-db.c
2138  */
2139 
2140 /*
2141  * BEGIN iwlwifi/mvm/time-event.c
2142  */
2143 
2144 /*
2145  * For the high priority TE use a time event type that has similar priority to
2146  * the FW's action scan priority.
2147  */
2148 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2149 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2150 
2151 /* used to convert from time event API v2 to v1 */
2152 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2153 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2154 static inline uint16_t
2155 iwm_te_v2_get_notify(uint16_t policy)
2156 {
2157 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2158 }
2159 
2160 static inline uint16_t
2161 iwm_te_v2_get_dep_policy(uint16_t policy)
2162 {
2163 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2164 		IWM_TE_V2_PLACEMENT_POS;
2165 }
2166 
2167 static inline uint16_t
2168 iwm_te_v2_get_absence(uint16_t policy)
2169 {
2170 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2171 }
2172 
2173 void
2174 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2175 	struct iwm_time_event_cmd_v1 *cmd_v1)
2176 {
2177 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2178 	cmd_v1->action = cmd_v2->action;
2179 	cmd_v1->id = cmd_v2->id;
2180 	cmd_v1->apply_time = cmd_v2->apply_time;
2181 	cmd_v1->max_delay = cmd_v2->max_delay;
2182 	cmd_v1->depends_on = cmd_v2->depends_on;
2183 	cmd_v1->interval = cmd_v2->interval;
2184 	cmd_v1->duration = cmd_v2->duration;
2185 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2186 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2187 	else
2188 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2189 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2190 	cmd_v1->interval_reciprocal = 0; /* unused */
2191 
2192 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2193 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2194 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2195 }
2196 
2197 int
2198 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
2199 	const struct iwm_time_event_cmd_v2 *cmd)
2200 {
2201 	struct iwm_time_event_cmd_v1 cmd_v1;
2202 
2203 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2204 		return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
2205 		    IWM_CMD_SYNC, sizeof(*cmd), cmd);
2206 
2207 	iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
2208 	return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
2209 	    sizeof(cmd_v1), &cmd_v1);
2210 }
2211 
2212 int
2213 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
2214 	void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
2215 {
2216 	int ret;
2217 
2218 	DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
2219 
2220 	ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
2221 	if (ret) {
2222 		DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
2223 		    DEVNAME(sc), ret));
2224 	}
2225 
2226 	return ret;
2227 }
2228 
2229 void
2230 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2231 	uint32_t duration, uint32_t min_duration, uint32_t max_delay)
2232 {
2233 	struct iwm_time_event_cmd_v2 time_cmd;
2234 
2235 	memset(&time_cmd, 0, sizeof(time_cmd));
2236 
2237 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2238 	time_cmd.id_and_color =
2239 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2240 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2241 
2242 	time_cmd.apply_time = htole32(iwm_read_prph(sc,
2243 	    IWM_DEVICE_SYSTEM_TIME_REG));
2244 
2245 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2246 	time_cmd.max_delay = htole32(max_delay);
2247 	/* TODO: why do we need to interval = bi if it is not periodic? */
2248 	time_cmd.interval = htole32(1);
2249 	time_cmd.duration = htole32(duration);
2250 	time_cmd.repeat = 1;
2251 	time_cmd.policy
2252 	    = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2253 	        IWM_TE_V2_NOTIF_HOST_EVENT_END);
2254 
2255 	iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
2256 }
2257 
2258 /*
2259  * END iwlwifi/mvm/time-event.c
2260  */
2261 
2262 /*
2263  * NVM read access and content parsing.  We do not support
2264  * external NVM or writing NVM.
2265  * iwlwifi/mvm/nvm.c
2266  */
2267 
2268 /* list of NVM sections we are allowed/need to read */
2269 const int nvm_to_read[] = {
2270 	IWM_NVM_SECTION_TYPE_HW,
2271 	IWM_NVM_SECTION_TYPE_SW,
2272 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2273 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2274 };
2275 
2276 /* Default NVM size to read */
2277 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2278 #define IWM_MAX_NVM_SECTION_SIZE 7000
2279 
2280 #define IWM_NVM_WRITE_OPCODE 1
2281 #define IWM_NVM_READ_OPCODE 0
2282 
2283 int
2284 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
2285 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
2286 {
2287 	offset = 0;
2288 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2289 		.offset = htole16(offset),
2290 		.length = htole16(length),
2291 		.type = htole16(section),
2292 		.op_code = IWM_NVM_READ_OPCODE,
2293 	};
2294 	struct iwm_nvm_access_resp *nvm_resp;
2295 	struct iwm_rx_packet *pkt;
2296 	struct iwm_host_cmd cmd = {
2297 		.id = IWM_NVM_ACCESS_CMD,
2298 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
2299 		    IWM_CMD_SEND_IN_RFKILL,
2300 		.data = { &nvm_access_cmd, },
2301 	};
2302 	int ret, bytes_read, offset_read;
2303 	uint8_t *resp_data;
2304 
2305 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2306 
2307 	ret = iwm_send_cmd(sc, &cmd);
2308 	if (ret)
2309 		return ret;
2310 
2311 	pkt = cmd.resp_pkt;
2312 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2313 		DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
2314 		    DEVNAME(sc), pkt->hdr.flags));
2315 		ret = EIO;
2316 		goto exit;
2317 	}
2318 
2319 	/* Extract NVM response */
2320 	nvm_resp = (void *)pkt->data;
2321 
2322 	ret = le16toh(nvm_resp->status);
2323 	bytes_read = le16toh(nvm_resp->length);
2324 	offset_read = le16toh(nvm_resp->offset);
2325 	resp_data = nvm_resp->data;
2326 	if (ret) {
2327 		DPRINTF(("%s: NVM access command failed with status %d\n",
2328 		    DEVNAME(sc), ret));
2329 		ret = EINVAL;
2330 		goto exit;
2331 	}
2332 
2333 	if (offset_read != offset) {
2334 		DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
2335 		    DEVNAME(sc), offset_read));
2336 		ret = EINVAL;
2337 		goto exit;
2338 	}
2339 
2340 	memcpy(data + offset, resp_data, bytes_read);
2341 	*len = bytes_read;
2342 
2343  exit:
2344 	iwm_free_resp(sc, &cmd);
2345 	return ret;
2346 }
2347 
2348 /*
2349  * Reads an NVM section completely.
2350  * NICs prior to 7000 family doesn't have a real NVM, but just read
2351  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2352  * by uCode, we need to manually check in this case that we don't
2353  * overflow and try to read more than the EEPROM size.
2354  * For 7000 family NICs, we supply the maximal size we can read, and
2355  * the uCode fills the response with as much data as we can,
2356  * without overflowing, so no check is needed.
2357  */
2358 int
2359 iwm_nvm_read_section(struct iwm_softc *sc,
2360 	uint16_t section, uint8_t *data, uint16_t *len)
2361 {
2362 	uint16_t length, seglen;
2363 	int error;
2364 
2365 	/* Set nvm section read length */
2366 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2367 	*len = 0;
2368 
2369 	/* Read the NVM until exhausted (reading less than requested) */
2370 	while (seglen == length) {
2371 		error = iwm_nvm_read_chunk(sc,
2372 		    section, *len, length, data, &seglen);
2373 		if (error) {
2374 			printf("%s: Cannot read NVM from section "
2375 			    "%d offset %d, length %d\n",
2376 			    DEVNAME(sc), section, *len, length);
2377 			return error;
2378 		}
2379 		*len += seglen;
2380 	}
2381 
2382 	DPRINTFN(4, ("NVM section %d read completed\n", section));
2383 	return 0;
2384 }
2385 
2386 /*
2387  * BEGIN IWM_NVM_PARSE
2388  */
2389 
2390 /* iwlwifi/iwl-nvm-parse.c */
2391 
2392 /* NVM offsets (in words) definitions */
2393 enum wkp_nvm_offsets {
2394 	/* NVM HW-Section offset (in words) definitions */
2395 	IWM_HW_ADDR = 0x15,
2396 
2397 /* NVM SW-Section offset (in words) definitions */
2398 	IWM_NVM_SW_SECTION = 0x1C0,
2399 	IWM_NVM_VERSION = 0,
2400 	IWM_RADIO_CFG = 1,
2401 	IWM_SKU = 2,
2402 	IWM_N_HW_ADDRS = 3,
2403 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
2404 
2405 /* NVM calibration section offset (in words) definitions */
2406 	IWM_NVM_CALIB_SECTION = 0x2B8,
2407 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
2408 };
2409 
2410 /* SKU Capabilities (actual values from NVM definition) */
2411 enum nvm_sku_bits {
2412 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
2413 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
2414 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
2415 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
2416 };
2417 
2418 /* radio config bits (actual values from NVM definition) */
2419 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
2420 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
2421 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
2422 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
2423 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
2424 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2425 
2426 #define DEFAULT_MAX_TX_POWER 16
2427 
2428 /**
2429  * enum iwm_nvm_channel_flags - channel flags in NVM
2430  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2431  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2432  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2433  * @IWM_NVM_CHANNEL_RADAR: radar detection required
2434  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2435  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2436  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2437  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2438  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2439  */
2440 enum iwm_nvm_channel_flags {
2441 	IWM_NVM_CHANNEL_VALID = (1 << 0),
2442 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
2443 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2444 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
2445 	IWM_NVM_CHANNEL_DFS = (1 << 7),
2446 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
2447 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2448 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2449 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2450 };
2451 
2452 void
2453 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
2454 {
2455 	struct ieee80211com *ic = &sc->sc_ic;
2456 	struct iwm_nvm_data *data = &sc->sc_nvm;
2457 	int ch_idx;
2458 	struct ieee80211_channel *channel;
2459 	uint16_t ch_flags;
2460 	int is_5ghz;
2461 	int flags, hw_value;
2462 
2463 	for (ch_idx = 0; ch_idx < nitems(iwm_nvm_channels); ch_idx++) {
2464 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2465 
2466 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2467 		    !data->sku_cap_band_52GHz_enable)
2468 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2469 
2470 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2471 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2472 			    iwm_nvm_channels[ch_idx],
2473 			    ch_flags,
2474 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2475 			    "5.2" : "2.4"));
2476 			continue;
2477 		}
2478 
2479 		hw_value = iwm_nvm_channels[ch_idx];
2480 		channel = &ic->ic_channels[hw_value];
2481 
2482 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2483 		if (!is_5ghz) {
2484 			flags = IEEE80211_CHAN_2GHZ;
2485 			channel->ic_flags
2486 			    = IEEE80211_CHAN_CCK
2487 			    | IEEE80211_CHAN_OFDM
2488 			    | IEEE80211_CHAN_DYN
2489 			    | IEEE80211_CHAN_2GHZ;
2490 		} else {
2491 			flags = IEEE80211_CHAN_5GHZ;
2492 			channel->ic_flags =
2493 			    IEEE80211_CHAN_A;
2494 		}
2495 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2496 
2497 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2498 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2499 	}
2500 }
2501 
2502 int
2503 iwm_parse_nvm_data(struct iwm_softc *sc,
2504 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2505 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
2506 {
2507 	struct iwm_nvm_data *data = &sc->sc_nvm;
2508 	uint8_t hw_addr[ETHER_ADDR_LEN];
2509 	uint16_t radio_cfg, sku;
2510 
2511 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2512 
2513 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2514 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2515 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2516 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2517 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2518 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
2519 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
2520 
2521 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
2522 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2523 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2524 	data->sku_cap_11n_enable = 0;
2525 
2526 	if (!data->valid_tx_ant || !data->valid_rx_ant) {
2527 		DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n",
2528 			    DEVNAME(sc), data->valid_tx_ant,
2529 			    data->valid_rx_ant));
2530 		return EINVAL;
2531 	}
2532 
2533 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2534 
2535 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
2536 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
2537 
2538 	/* The byte order is little endian 16 bit, meaning 214365 */
2539 	memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2540 	data->hw_addr[0] = hw_addr[1];
2541 	data->hw_addr[1] = hw_addr[0];
2542 	data->hw_addr[2] = hw_addr[3];
2543 	data->hw_addr[3] = hw_addr[2];
2544 	data->hw_addr[4] = hw_addr[5];
2545 	data->hw_addr[5] = hw_addr[4];
2546 
2547 	iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
2548 	data->calib_version = 255;   /* TODO:
2549 					this value will prevent some checks from
2550 					failing, we need to check if this
2551 					field is still needed, and if it does,
2552 					where is it in the NVM */
2553 
2554 	return 0;
2555 }
2556 
2557 /*
2558  * END NVM PARSE
2559  */
2560 
2561 struct iwm_nvm_section {
2562 	uint16_t length;
2563 	const uint8_t *data;
2564 };
2565 
2566 #define IWM_FW_VALID_TX_ANT(sc) \
2567     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
2568     >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
2569 #define IWM_FW_VALID_RX_ANT(sc) \
2570     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
2571     >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
2572 
2573 int
2574 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2575 {
2576 	const uint16_t *hw, *sw, *calib;
2577 
2578 	/* Checking for required sections */
2579 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2580 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2581 		DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
2582 		return ENOENT;
2583 	}
2584 
2585 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
2586 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2587 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2588 	return iwm_parse_nvm_data(sc, hw, sw, calib,
2589 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
2590 }
2591 
2592 int
2593 iwm_nvm_init(struct iwm_softc *sc)
2594 {
2595 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2596 	int i, section, error;
2597 	uint16_t len;
2598 	uint8_t *nvm_buffer, *temp;
2599 
2600 	/* Read From FW NVM */
2601 	DPRINTF(("Read NVM\n"));
2602 
2603 	/* TODO: find correct NVM max size for a section */
2604 	nvm_buffer = malloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_WAIT);
2605 	for (i = 0; i < nitems(nvm_to_read); i++) {
2606 		section = nvm_to_read[i];
2607 		KASSERT(section <= nitems(nvm_sections));
2608 
2609 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
2610 		if (error)
2611 			break;
2612 
2613 		temp = malloc(len, M_DEVBUF, M_WAIT);
2614 		memcpy(temp, nvm_buffer, len);
2615 		nvm_sections[section].data = temp;
2616 		nvm_sections[section].length = len;
2617 	}
2618 	free(nvm_buffer, M_DEVBUF, IWM_OTP_LOW_IMAGE_SIZE);
2619 	if (error)
2620 		return error;
2621 
2622 	return iwm_parse_nvm_sections(sc, nvm_sections);
2623 }
2624 
2625 /*
2626  * Firmware loading gunk.  This is kind of a weird hybrid between the
2627  * iwn driver and the Linux iwlwifi driver.
2628  */
2629 
2630 int
2631 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2632 	const uint8_t *section, uint32_t byte_cnt)
2633 {
2634 	struct iwm_dma_info *dma = &sc->fw_dma;
2635 	int error;
2636 
2637 	/* Copy firmware section into pre-allocated DMA-safe memory. */
2638 	memcpy(dma->vaddr, section, byte_cnt);
2639 	bus_dmamap_sync(sc->sc_dmat,
2640 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
2641 
2642 	if (!iwm_nic_lock(sc))
2643 		return EBUSY;
2644 
2645 	sc->sc_fw_chunk_done = 0;
2646 
2647 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2648 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2649 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2650 	    dst_addr);
2651 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2652 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2653 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2654 	    (iwm_get_dma_hi_addr(dma->paddr)
2655 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2656 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2657 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2658 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2659 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2660 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2661 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2662 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2663 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2664 
2665 	iwm_nic_unlock(sc);
2666 
2667 	/* wait 1s for this segment to load */
2668 	while (!sc->sc_fw_chunk_done)
2669 		if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
2670 			break;
2671 
2672 	return error;
2673 }
2674 
2675 int
2676 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2677 {
2678 	struct iwm_fw_sects *fws;
2679 	int error, i, w;
2680 	void *data;
2681 	uint32_t dlen;
2682 	uint32_t offset;
2683 
2684 	sc->sc_uc.uc_intr = 0;
2685 
2686 	fws = &sc->sc_fw.fw_sects[ucode_type];
2687 	for (i = 0; i < fws->fw_count; i++) {
2688 		data = fws->fw_sect[i].fws_data;
2689 		dlen = fws->fw_sect[i].fws_len;
2690 		offset = fws->fw_sect[i].fws_devoff;
2691 		DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
2692 		    ucode_type, offset, dlen));
2693 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2694 		if (error) {
2695 			DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u returned error %02d\n", i, fws->fw_count, error));
2696 			return error;
2697 		}
2698 	}
2699 
2700 	/* wait for the firmware to load */
2701 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2702 
2703 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2704 		error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
2705 	}
2706 
2707 	return error;
2708 }
2709 
2710 /* iwlwifi: pcie/trans.c */
2711 int
2712 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2713 {
2714 	int error;
2715 
2716 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2717 
2718 	if ((error = iwm_nic_init(sc)) != 0) {
2719 		printf("%s: unable to init nic\n", DEVNAME(sc));
2720 		return error;
2721 	}
2722 
2723 	/* make sure rfkill handshake bits are cleared */
2724 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2725 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2726 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2727 
2728 	/* clear (again), then enable host interrupts */
2729 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
2730 	iwm_enable_interrupts(sc);
2731 
2732 	/* really make sure rfkill handshake bits are cleared */
2733 	/* maybe we should write a few times more?  just to make sure */
2734 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2735 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2736 
2737 	/* Load the given image to the HW */
2738 	return iwm_load_firmware(sc, ucode_type);
2739 }
2740 
2741 int
2742 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2743 {
2744 	return iwm_post_alive(sc);
2745 }
2746 
2747 int
2748 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2749 {
2750 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2751 		.valid = htole32(valid_tx_ant),
2752 	};
2753 
2754 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2755 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2756 }
2757 
2758 /* iwlwifi: mvm/fw.c */
2759 int
2760 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2761 {
2762 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2763 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2764 
2765 	/* Set parameters */
2766 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2767 	phy_cfg_cmd.calib_control.event_trigger =
2768 	    sc->sc_default_calib[ucode_type].event_trigger;
2769 	phy_cfg_cmd.calib_control.flow_trigger =
2770 	    sc->sc_default_calib[ucode_type].flow_trigger;
2771 
2772 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
2773 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2774 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2775 }
2776 
2777 int
2778 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2779 	enum iwm_ucode_type ucode_type)
2780 {
2781 	enum iwm_ucode_type old_type = sc->sc_uc_current;
2782 	int error;
2783 
2784 	if ((error = iwm_read_firmware(sc, ucode_type)) != 0)
2785 		return error;
2786 
2787 	sc->sc_uc_current = ucode_type;
2788 	error = iwm_start_fw(sc, ucode_type);
2789 	if (error) {
2790 		sc->sc_uc_current = old_type;
2791 		return error;
2792 	}
2793 
2794 	return iwm_fw_alive(sc, sc->sched_base);
2795 }
2796 
2797 /*
2798  * mvm misc bits
2799  */
2800 
2801 /*
2802  * follows iwlwifi/fw.c
2803  */
2804 int
2805 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2806 {
2807 	int error;
2808 
2809 	/* do not operate with rfkill switch turned on */
2810 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2811 		printf("%s: radio is disabled by hardware switch\n",
2812 		    DEVNAME(sc));
2813 		return EPERM;
2814 	}
2815 
2816 	sc->sc_init_complete = 0;
2817 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2818 	    IWM_UCODE_TYPE_INIT)) != 0)
2819 		return error;
2820 
2821 	if (justnvm) {
2822 		if ((error = iwm_nvm_init(sc)) != 0) {
2823 			printf("%s: failed to read nvm\n", DEVNAME(sc));
2824 			return error;
2825 		}
2826 		memcpy(&sc->sc_ic.ic_myaddr,
2827 		    &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
2828 
2829 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2830 		    + sc->sc_capa_max_probe_len
2831 		    + IWM_MAX_NUM_SCAN_CHANNELS
2832 		    * sizeof(struct iwm_scan_channel);
2833 		sc->sc_scan_cmd = malloc(sc->sc_scan_cmd_len, M_DEVBUF, M_WAIT);
2834 
2835 		return 0;
2836 	}
2837 
2838 	/* Send TX valid antennas before triggering calibrations */
2839 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2840 		return error;
2841 
2842 	/*
2843 	* Send phy configurations command to init uCode
2844 	* to start the 16.0 uCode init image internal calibrations.
2845 	*/
2846 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2847 		DPRINTF(("%s: failed to run internal calibration: %d\n",
2848 		    DEVNAME(sc), error));
2849 		return error;
2850 	}
2851 
2852 	/*
2853 	 * Nothing to do but wait for the init complete notification
2854 	 * from the firmware
2855 	 */
2856 	while (!sc->sc_init_complete)
2857 		if ((error = tsleep(&sc->sc_init_complete,
2858 		    0, "iwminit", 2*hz)) != 0)
2859 			break;
2860 
2861 	return error;
2862 }
2863 
2864 /*
2865  * receive side
2866  */
2867 
2868 /* (re)stock rx ring, called at init-time and at runtime */
2869 int
2870 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2871 {
2872 	struct iwm_rx_ring *ring = &sc->rxq;
2873 	struct iwm_rx_data *data = &ring->data[idx];
2874 	struct mbuf *m;
2875 	int error;
2876 	int fatal = 0;
2877 
2878 	m = m_gethdr(M_DONTWAIT, MT_DATA);
2879 	if (m == NULL)
2880 		return ENOBUFS;
2881 
2882 	if (size <= MCLBYTES) {
2883 		MCLGET(m, M_DONTWAIT);
2884 	} else {
2885 		MCLGETI(m, M_DONTWAIT, NULL, IWM_RBUF_SIZE);
2886 	}
2887 	if ((m->m_flags & M_EXT) == 0) {
2888 		m_freem(m);
2889 		return ENOBUFS;
2890 	}
2891 
2892 	if (data->m != NULL) {
2893 		bus_dmamap_unload(sc->sc_dmat, data->map);
2894 		fatal = 1;
2895 	}
2896 
2897 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2898 	if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
2899 	    BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
2900 		/* XXX */
2901 		if (fatal)
2902 			panic("iwm: could not load RX mbuf");
2903 		m_freem(m);
2904 		return error;
2905 	}
2906 	data->m = m;
2907 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
2908 
2909 	/* Update RX descriptor. */
2910 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
2911 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2912 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
2913 
2914 	return 0;
2915 }
2916 
2917 /* iwlwifi: mvm/rx.c */
2918 #define IWM_RSSI_OFFSET 50
2919 int
2920 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2921 {
2922 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2923 	uint32_t agc_a, agc_b;
2924 	uint32_t val;
2925 
2926 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2927 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2928 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2929 
2930 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2931 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2932 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2933 
2934 	/*
2935 	 * dBm = rssi dB - agc dB - constant.
2936 	 * Higher AGC (higher radio gain) means lower signal.
2937 	 */
2938 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2939 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2940 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2941 
2942 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2943 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
2944 
2945 	return max_rssi_dbm;
2946 }
2947 
2948 /* iwlwifi: mvm/rx.c */
2949 /*
2950  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2951  * values are reported by the fw as positive values - need to negate
2952  * to obtain their dBM.  Account for missing antennas by replacing 0
2953  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2954  */
2955 int
2956 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2957 {
2958 	int energy_a, energy_b, energy_c, max_energy;
2959 	uint32_t val;
2960 
2961 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2962 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2963 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
2964 	energy_a = energy_a ? -energy_a : -256;
2965 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2966 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
2967 	energy_b = energy_b ? -energy_b : -256;
2968 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2969 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
2970 	energy_c = energy_c ? -energy_c : -256;
2971 	max_energy = MAX(energy_a, energy_b);
2972 	max_energy = MAX(max_energy, energy_c);
2973 
2974 	DPRINTFN(12, ("energy In A %d B %d C %d , and max %d\n",
2975 	    energy_a, energy_b, energy_c, max_energy));
2976 
2977 	return max_energy;
2978 }
2979 
2980 void
2981 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2982 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2983 {
2984 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2985 
2986 	DPRINTFN(20, ("received PHY stats\n"));
2987 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
2988 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
2989 
2990 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2991 }
2992 
2993 /*
2994  * Retrieve the average noise (in dBm) among receivers.
2995  */
2996 int
2997 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2998 {
2999 	int i, total, nbant, noise;
3000 
3001 	total = nbant = noise = 0;
3002 	for (i = 0; i < 3; i++) {
3003 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3004 		if (noise) {
3005 			total += noise;
3006 			nbant++;
3007 		}
3008 	}
3009 
3010 	/* There should be at least one antenna but check anyway. */
3011 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3012 }
3013 
3014 /*
3015  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3016  *
3017  * Handles the actual data of the Rx packet from the fw
3018  */
3019 void
3020 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3021 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3022 {
3023 	struct ieee80211com *ic = &sc->sc_ic;
3024 	struct ieee80211_frame *wh;
3025 	struct ieee80211_node *ni;
3026 	struct ieee80211_channel *c = NULL;
3027 	struct ieee80211_rxinfo rxi;
3028 	struct mbuf *m;
3029 	struct iwm_rx_phy_info *phy_info;
3030 	struct iwm_rx_mpdu_res_start *rx_res;
3031 	int device_timestamp;
3032 	uint32_t len;
3033 	uint32_t rx_pkt_status;
3034 	int rssi;
3035 
3036 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3037 	    BUS_DMASYNC_POSTREAD);
3038 
3039 	phy_info = &sc->sc_last_phy_info;
3040 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3041 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3042 	len = le16toh(rx_res->byte_count);
3043 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3044 
3045 	m = data->m;
3046 	m->m_data = pkt->data + sizeof(*rx_res);
3047 	m->m_pkthdr.len = m->m_len = len;
3048 
3049 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3050 		DPRINTF(("dsp size out of range [0,20]: %d\n",
3051 		    phy_info->cfg_phy_cnt));
3052 		return;
3053 	}
3054 
3055 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3056 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3057 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3058 		return; /* drop */
3059 	}
3060 
3061 	device_timestamp = le32toh(phy_info->system_timestamp);
3062 
3063 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3064 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3065 	} else {
3066 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
3067 	}
3068 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
3069 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
3070 
3071 	/* replenish ring for the buffer we're going to feed to the sharks */
3072 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3073 		return;
3074 
3075 	if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
3076 		if (le32toh(phy_info->channel) < nitems(ic->ic_channels))
3077 			c = &ic->ic_channels[le32toh(phy_info->channel)];
3078 	}
3079 
3080 	memset(&rxi, 0, sizeof(rxi));
3081 	rxi.rxi_rssi = rssi;
3082 	rxi.rxi_tstamp = device_timestamp;
3083 	ni = ieee80211_find_rxnode(ic, wh);
3084 	if (c)
3085 		ni->ni_chan = c;
3086 
3087 #if NBPFILTER > 0
3088 	if (sc->sc_drvbpf != NULL) {
3089 		struct mbuf mb;
3090 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3091 
3092 		tap->wr_flags = 0;
3093 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3094 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3095 		tap->wr_chan_freq =
3096 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
3097 		tap->wr_chan_flags =
3098 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
3099 		tap->wr_dbm_antsignal = (int8_t)rssi;
3100 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3101 		tap->wr_tsft = phy_info->system_timestamp;
3102 		switch (phy_info->rate) {
3103 		/* CCK rates. */
3104 		case  10: tap->wr_rate =   2; break;
3105 		case  20: tap->wr_rate =   4; break;
3106 		case  55: tap->wr_rate =  11; break;
3107 		case 110: tap->wr_rate =  22; break;
3108 		/* OFDM rates. */
3109 		case 0xd: tap->wr_rate =  12; break;
3110 		case 0xf: tap->wr_rate =  18; break;
3111 		case 0x5: tap->wr_rate =  24; break;
3112 		case 0x7: tap->wr_rate =  36; break;
3113 		case 0x9: tap->wr_rate =  48; break;
3114 		case 0xb: tap->wr_rate =  72; break;
3115 		case 0x1: tap->wr_rate =  96; break;
3116 		case 0x3: tap->wr_rate = 108; break;
3117 		/* Unknown rate: should not happen. */
3118 		default:  tap->wr_rate =   0;
3119 		}
3120 
3121 		mb.m_data = (caddr_t)tap;
3122 		mb.m_len = sc->sc_rxtap_len;
3123 		mb.m_next = m;
3124 		mb.m_nextpkt = NULL;
3125 		mb.m_type = 0;
3126 		mb.m_flags = 0;
3127 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
3128 	}
3129 #endif
3130 	ieee80211_input(IC2IFP(ic), m, ni, &rxi);
3131 	ieee80211_release_node(ic, ni);
3132 }
3133 
3134 void
3135 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3136 	struct iwm_node *in)
3137 {
3138 	struct ieee80211com *ic = &sc->sc_ic;
3139 	struct ifnet *ifp = IC2IFP(ic);
3140 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3141 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3142 	int failack = tx_resp->failure_frame;
3143 
3144 	KASSERT(tx_resp->frame_count == 1);
3145 
3146 	/* Update rate control statistics. */
3147 	in->in_amn.amn_txcnt++;
3148 	if (failack > 0) {
3149 		in->in_amn.amn_retrycnt++;
3150 	}
3151 
3152 	if (status != IWM_TX_STATUS_SUCCESS &&
3153 	    status != IWM_TX_STATUS_DIRECT_DONE)
3154 		ifp->if_oerrors++;
3155 	else
3156 		ifp->if_opackets++;
3157 }
3158 
3159 void
3160 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3161 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3162 {
3163 	struct ieee80211com *ic = &sc->sc_ic;
3164 	struct ifnet *ifp = IC2IFP(ic);
3165 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3166 	int idx = cmd_hdr->idx;
3167 	int qid = cmd_hdr->qid;
3168 	struct iwm_tx_ring *ring = &sc->txq[qid];
3169 	struct iwm_tx_data *txd = &ring->data[idx];
3170 	struct iwm_node *in = txd->in;
3171 
3172 	if (txd->done) {
3173 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3174 		    DEVNAME(sc)));
3175 		return;
3176 	}
3177 
3178 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3179 	    BUS_DMASYNC_POSTREAD);
3180 
3181 	sc->sc_tx_timer = 0;
3182 
3183 	iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3184 
3185 	/* Unmap and free mbuf. */
3186 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3187 	    BUS_DMASYNC_POSTWRITE);
3188 	bus_dmamap_unload(sc->sc_dmat, txd->map);
3189 	m_freem(txd->m);
3190 
3191 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3192 	KASSERT(txd->done == 0);
3193 	txd->done = 1;
3194 	KASSERT(txd->in);
3195 
3196 	txd->m = NULL;
3197 	txd->in = NULL;
3198 	ieee80211_release_node(ic, &in->in_ni);
3199 
3200 	if (--ring->queued < IWM_TX_RING_LOMARK) {
3201 		sc->qfullmsk &= ~(1 << ring->qid);
3202 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3203 			ifp->if_flags &= ~IFF_OACTIVE;
3204 			/*
3205 			 * Well, we're in interrupt context, but then again
3206 			 * I guess net80211 does all sorts of stunts in
3207 			 * interrupt context, so maybe this is no biggie.
3208 			 */
3209 			(*ifp->if_start)(ifp);
3210 		}
3211 	}
3212 }
3213 
3214 /*
3215  * BEGIN iwlwifi/mvm/binding.c
3216  */
3217 
3218 int
3219 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3220 {
3221 	struct iwm_binding_cmd cmd;
3222 	struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
3223 	int i, ret;
3224 	uint32_t status;
3225 
3226 	memset(&cmd, 0, sizeof(cmd));
3227 
3228 	cmd.id_and_color
3229 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3230 	cmd.action = htole32(action);
3231 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3232 
3233 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3234 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3235 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3236 
3237 	status = 0;
3238 	ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3239 	    sizeof(cmd), &cmd, &status);
3240 	if (ret) {
3241 		DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
3242 		    DEVNAME(sc), action, ret));
3243 		return ret;
3244 	}
3245 
3246 	if (status) {
3247 		DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
3248 		    status));
3249 		ret = EIO;
3250 	}
3251 
3252 	return ret;
3253 }
3254 
3255 int
3256 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
3257 {
3258 	return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
3259 }
3260 
3261 int
3262 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
3263 {
3264 	return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
3265 }
3266 
3267 /*
3268  * END iwlwifi/mvm/binding.c
3269  */
3270 
3271 /*
3272  * BEGIN iwlwifi/mvm/phy-ctxt.c
3273  */
3274 
3275 /*
3276  * Construct the generic fields of the PHY context command
3277  */
3278 void
3279 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3280 	struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3281 {
3282 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3283 
3284 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3285 	    ctxt->color));
3286 	cmd->action = htole32(action);
3287 	cmd->apply_time = htole32(apply_time);
3288 }
3289 
3290 /*
3291  * Add the phy configuration to the PHY context command
3292  */
3293 void
3294 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
3295 	struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
3296 	uint8_t chains_static, uint8_t chains_dynamic)
3297 {
3298 	struct ieee80211com *ic = &sc->sc_ic;
3299 	uint8_t active_cnt, idle_cnt;
3300 
3301 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3302 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3303 
3304 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3305 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3306 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3307 
3308 	/* Set rx the chains */
3309 	idle_cnt = chains_static;
3310 	active_cnt = chains_dynamic;
3311 
3312 	cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
3313 					IWM_PHY_RX_CHAIN_VALID_POS);
3314 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3315 	cmd->rxchain_info |= htole32(active_cnt <<
3316 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3317 
3318 	cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
3319 }
3320 
3321 /*
3322  * Send a command
3323  * only if something in the configuration changed: in case that this is the
3324  * first time that the phy configuration is applied or in case that the phy
3325  * configuration changed from the previous apply.
3326  */
3327 int
3328 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
3329 	struct iwm_mvm_phy_ctxt *ctxt,
3330 	uint8_t chains_static, uint8_t chains_dynamic,
3331 	uint32_t action, uint32_t apply_time)
3332 {
3333 	struct iwm_phy_context_cmd cmd;
3334 	int ret;
3335 
3336 	/* Set the command header fields */
3337 	iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3338 
3339 	/* Set the command data */
3340 	iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3341 	    chains_static, chains_dynamic);
3342 
3343 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
3344 	    sizeof(struct iwm_phy_context_cmd), &cmd);
3345 	if (ret) {
3346 		DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
3347 	}
3348 	return ret;
3349 }
3350 
3351 /*
3352  * Send a command to add a PHY context based on the current HW configuration.
3353  */
3354 int
3355 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3356 	struct ieee80211_channel *chan,
3357 	uint8_t chains_static, uint8_t chains_dynamic)
3358 {
3359 	ctxt->channel = chan;
3360 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3361 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
3362 }
3363 
3364 /*
3365  * Send a command to modify the PHY context based on the current HW
3366  * configuration. Note that the function does not check that the configuration
3367  * changed.
3368  */
3369 int
3370 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
3371 	struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
3372 	uint8_t chains_static, uint8_t chains_dynamic)
3373 {
3374 	ctxt->channel = chan;
3375 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3376 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
3377 }
3378 
3379 /*
3380  * END iwlwifi/mvm/phy-ctxt.c
3381  */
3382 
3383 /*
3384  * transmit side
3385  */
3386 
3387 /*
3388  * Send a command to the firmware.  We try to implement the Linux
3389  * driver interface for the routine.
3390  * mostly from if_iwn (iwn_cmd()).
3391  *
3392  * For now, we always copy the first part and map the second one (if it exists).
3393  */
3394 int
3395 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3396 {
3397 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3398 	struct iwm_tfd *desc;
3399 	struct iwm_tx_data *data;
3400 	struct iwm_device_cmd *cmd;
3401 	struct mbuf *m;
3402 	bus_addr_t paddr;
3403 	uint32_t addr_lo;
3404 	int error = 0, i, paylen, off, s;
3405 	int code;
3406 	int async, wantresp;
3407 
3408 	code = hcmd->id;
3409 	async = hcmd->flags & IWM_CMD_ASYNC;
3410 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3411 
3412 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
3413 		paylen += hcmd->len[i];
3414 	}
3415 
3416 	/* if the command wants an answer, busy sc_cmd_resp */
3417 	if (wantresp) {
3418 		KASSERT(!async);
3419 		while (sc->sc_wantresp != -1)
3420 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3421 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
3422 		DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
3423 	}
3424 
3425 	/*
3426 	 * Is the hardware still available?  (after e.g. above wait).
3427 	 */
3428 	s = splnet();
3429 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
3430 		error = ENXIO;
3431 		goto out;
3432 	}
3433 
3434 	desc = &ring->desc[ring->cur];
3435 	data = &ring->data[ring->cur];
3436 
3437 	if (paylen > sizeof(cmd->data)) {
3438 		/* Command is too large */
3439 		if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
3440 			error = EINVAL;
3441 			goto out;
3442 		}
3443 		m = m_gethdr(M_DONTWAIT, MT_DATA);
3444 		if (m == NULL) {
3445 			error = ENOMEM;
3446 			goto out;
3447 		}
3448 		MCLGETI(m, M_DONTWAIT, NULL, IWM_RBUF_SIZE);
3449 		if (!(m->m_flags & M_EXT)) {
3450 			m_freem(m);
3451 			error = ENOMEM;
3452 			goto out;
3453 		}
3454 		cmd = mtod(m, struct iwm_device_cmd *);
3455 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
3456 		    hcmd->len[0], NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3457 		if (error != 0) {
3458 			m_freem(m);
3459 			goto out;
3460 		}
3461 		data->m = m;
3462 		paddr = data->map->dm_segs[0].ds_addr;
3463 	} else {
3464 		cmd = &ring->cmd[ring->cur];
3465 		paddr = data->cmd_paddr;
3466 	}
3467 
3468 	cmd->hdr.code = code;
3469 	cmd->hdr.flags = 0;
3470 	cmd->hdr.qid = ring->qid;
3471 	cmd->hdr.idx = ring->cur;
3472 
3473 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
3474 		if (hcmd->len[i] == 0)
3475 			continue;
3476 		memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
3477 		off += hcmd->len[i];
3478 	}
3479 	KASSERT(off == paylen);
3480 
3481 	/* lo field is not aligned */
3482 	addr_lo = htole32((uint32_t)paddr);
3483 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3484 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
3485 	    | ((sizeof(cmd->hdr) + paylen) << 4));
3486 	desc->num_tbs = 1;
3487 
3488 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%lu %s\n",
3489 	    code, hcmd->len[0] + hcmd->len[1] + sizeof(cmd->hdr),
3490 	    async ? " (async)" : ""));
3491 
3492 	if (hcmd->len[0] > sizeof(cmd->data)) {
3493 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, hcmd->len[0],
3494 		    BUS_DMASYNC_PREWRITE);
3495 	} else {
3496 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3497 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3498 		    hcmd->len[0] + 4, BUS_DMASYNC_PREWRITE);
3499 	}
3500 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3501 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3502 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
3503 
3504 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3505 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3506 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3507 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3508 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3509 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3510 		DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
3511 		error = EBUSY;
3512 		goto out;
3513 	}
3514 
3515 #if 0
3516 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3517 #endif
3518 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3519 	    code, ring->qid, ring->cur));
3520 
3521 	/* Kick command ring. */
3522 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3523 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3524 
3525 	if (!async) {
3526 		/* m..m-mmyy-mmyyyy-mym-ym m-my generation */
3527 		int generation = sc->sc_generation;
3528 		error = tsleep(desc, PCATCH, "iwmcmd", hz);
3529 		if (error == 0) {
3530 			/* if hardware is no longer up, return error */
3531 			if (generation != sc->sc_generation) {
3532 				error = ENXIO;
3533 			} else {
3534 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3535 			}
3536 		}
3537 	}
3538  out:
3539 	if (wantresp && error != 0) {
3540 		iwm_free_resp(sc, hcmd);
3541 	}
3542 	splx(s);
3543 
3544 	return error;
3545 }
3546 
3547 /* iwlwifi: mvm/utils.c */
3548 int
3549 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
3550 	uint32_t flags, uint16_t len, const void *data)
3551 {
3552 	struct iwm_host_cmd cmd = {
3553 		.id = id,
3554 		.len = { len, },
3555 		.data = { data, },
3556 		.flags = flags,
3557 	};
3558 
3559 	return iwm_send_cmd(sc, &cmd);
3560 }
3561 
3562 /* iwlwifi: mvm/utils.c */
3563 int
3564 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
3565 	struct iwm_host_cmd *cmd, uint32_t *status)
3566 {
3567 	struct iwm_rx_packet *pkt;
3568 	struct iwm_cmd_response *resp;
3569 	int error, resp_len;
3570 
3571 	//lockdep_assert_held(&mvm->mutex);
3572 
3573 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3574 	cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
3575 
3576 	if ((error = iwm_send_cmd(sc, cmd)) != 0)
3577 		return error;
3578 	pkt = cmd->resp_pkt;
3579 
3580 	/* Can happen if RFKILL is asserted */
3581 	if (!pkt) {
3582 		error = 0;
3583 		goto out_free_resp;
3584 	}
3585 
3586 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3587 		error = EIO;
3588 		goto out_free_resp;
3589 	}
3590 
3591 	resp_len = iwm_rx_packet_payload_len(pkt);
3592 	if (resp_len != sizeof(*resp)) {
3593 		error = EIO;
3594 		goto out_free_resp;
3595 	}
3596 
3597 	resp = (void *)pkt->data;
3598 	*status = le32toh(resp->status);
3599  out_free_resp:
3600 	iwm_free_resp(sc, cmd);
3601 	return error;
3602 }
3603 
3604 /* iwlwifi/mvm/utils.c */
3605 int
3606 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
3607 	uint16_t len, const void *data, uint32_t *status)
3608 {
3609 	struct iwm_host_cmd cmd = {
3610 		.id = id,
3611 		.len = { len, },
3612 		.data = { data, },
3613 	};
3614 
3615 	return iwm_mvm_send_cmd_status(sc, &cmd, status);
3616 }
3617 
3618 void
3619 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3620 {
3621 	KASSERT(sc->sc_wantresp != -1);
3622 	KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
3623 	    == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
3624 	sc->sc_wantresp = -1;
3625 	wakeup(&sc->sc_wantresp);
3626 }
3627 
3628 /*
3629  * Process a "command done" firmware notification.  This is where we wakeup
3630  * processes waiting for a synchronous command completion.
3631  * from if_iwn
3632  */
3633 void
3634 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3635 {
3636 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3637 	struct iwm_tx_data *data;
3638 
3639 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3640 		return;	/* Not a command ack. */
3641 	}
3642 
3643 	data = &ring->data[pkt->hdr.idx];
3644 
3645 	/* If the command was mapped in an mbuf, free it. */
3646 	if (data->m != NULL) {
3647 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3648 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3649 		bus_dmamap_unload(sc->sc_dmat, data->map);
3650 		m_freem(data->m);
3651 		data->m = NULL;
3652 	}
3653 	wakeup(&ring->desc[pkt->hdr.idx]);
3654 }
3655 
3656 #if 0
3657 /*
3658  * necessary only for block ack mode
3659  */
3660 void
3661 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3662 	uint16_t len)
3663 {
3664 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3665 	uint16_t w_val;
3666 
3667 	scd_bc_tbl = sc->sched_dma.vaddr;
3668 
3669 	len += 8; /* magic numbers came naturally from paris */
3670 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3671 		len = roundup(len, 4) / 4;
3672 
3673 	w_val = htole16(sta_id << 12 | len);
3674 
3675 	/* Update TX scheduler. */
3676 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3677 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3678 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
3679 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
3680 
3681 	/* I really wonder what this is ?!? */
3682 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3683 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3684 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3685 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
3686 		    (char *)(void *)sc->sched_dma.vaddr,
3687 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3688 	}
3689 }
3690 #endif
3691 
3692 /*
3693  * Fill in various bit for management frames, and leave them
3694  * unfilled for data frames (firmware takes care of that).
3695  * Return the selected TX rate.
3696  */
3697 const struct iwm_rate *
3698 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3699 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3700 {
3701 	struct ieee80211com *ic = &sc->sc_ic;
3702 	struct ieee80211_node *ni = &in->in_ni;
3703 	const struct iwm_rate *rinfo;
3704 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3705 	int ridx, rate_flags;
3706 	int nrates = ni->ni_rates.rs_nrates;
3707 
3708 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3709 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3710 
3711 	if (type != IEEE80211_FC0_TYPE_DATA) {
3712 		/* for non-data, use the lowest supported rate */
3713 		ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3714 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
3715 	} else if (ic->ic_fixed_rate != -1) {
3716 		ridx = sc->sc_fixed_ridx;
3717 	} else {
3718 		/* for data frames, use RS table */
3719 		tx->initial_rate_index = (nrates - 1) - ni->ni_txrate;
3720 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3721 		DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
3722 		ridx = in->in_ridx[ni->ni_txrate];
3723 		return &iwm_rates[ridx];
3724 	}
3725 
3726 	rinfo = &iwm_rates[ridx];
3727 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3728 	if (IWM_RIDX_IS_CCK(ridx))
3729 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3730 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3731 
3732 	return rinfo;
3733 }
3734 
3735 #define TB0_SIZE 16
3736 int
3737 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3738 {
3739 	struct ieee80211com *ic = &sc->sc_ic;
3740 	struct iwm_node *in = (void *)ni;
3741 	struct iwm_tx_ring *ring;
3742 	struct iwm_tx_data *data;
3743 	struct iwm_tfd *desc;
3744 	struct iwm_device_cmd *cmd;
3745 	struct iwm_tx_cmd *tx;
3746 	struct ieee80211_frame *wh;
3747 	struct ieee80211_key *k = NULL;
3748 	struct mbuf *m1;
3749 	const struct iwm_rate *rinfo;
3750 	uint32_t flags;
3751 	u_int hdrlen;
3752 	bus_dma_segment_t *seg;
3753 	uint8_t tid, type;
3754 	int i, totlen, error, pad;
3755 	int hdrlen2;
3756 
3757 	wh = mtod(m, struct ieee80211_frame *);
3758 	hdrlen = ieee80211_get_hdrlen(wh);
3759 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3760 
3761 	hdrlen2 = (ieee80211_has_qos(wh)) ?
3762 	    sizeof (struct ieee80211_qosframe) :
3763 	    sizeof (struct ieee80211_frame);
3764 
3765 	if (hdrlen != hdrlen2)
3766 		DPRINTF(("%s: hdrlen error (%d != %d)\n",
3767 		    DEVNAME(sc), hdrlen, hdrlen2));
3768 
3769 	tid = 0;
3770 
3771 	ring = &sc->txq[ac];
3772 	desc = &ring->desc[ring->cur];
3773 	memset(desc, 0, sizeof(*desc));
3774 	data = &ring->data[ring->cur];
3775 
3776 	/* Fill out iwm_tx_cmd to send to the firmware */
3777 	cmd = &ring->cmd[ring->cur];
3778 	cmd->hdr.code = IWM_TX_CMD;
3779 	cmd->hdr.flags = 0;
3780 	cmd->hdr.qid = ring->qid;
3781 	cmd->hdr.idx = ring->cur;
3782 
3783 	tx = (void *)cmd->data;
3784 	memset(tx, 0, sizeof(*tx));
3785 
3786 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3787 
3788 #if NBPFILTER > 0
3789 	if (sc->sc_drvbpf != NULL) {
3790 		struct mbuf mb;
3791 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3792 
3793 		tap->wt_flags = 0;
3794 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3795 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3796 		tap->wt_rate = rinfo->rate;
3797 		tap->wt_hwqueue = ac;
3798 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
3799 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
3800 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3801 
3802 		mb.m_data = (caddr_t)tap;
3803 		mb.m_len = sc->sc_txtap_len;
3804 		mb.m_next = m;
3805 		mb.m_nextpkt = NULL;
3806 		mb.m_type = 0;
3807 		mb.m_flags = 0;
3808 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
3809 	}
3810 #endif
3811 
3812 	/* Encrypt the frame if need be. */
3813 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3814 		/* Retrieve key for TX && do software encryption. */
3815                 k = ieee80211_get_txkey(ic, wh, ni);
3816 		if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
3817 			return ENOBUFS;
3818 		/* 802.11 header may have moved. */
3819 		wh = mtod(m, struct ieee80211_frame *);
3820 	}
3821 	totlen = m->m_pkthdr.len;
3822 
3823 	flags = 0;
3824 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3825 		flags |= IWM_TX_CMD_FLG_ACK;
3826 	}
3827 
3828 	if (type != IEEE80211_FC0_TYPE_DATA
3829 	    && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
3830 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3831 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3832 	}
3833 
3834 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3835 	    type != IEEE80211_FC0_TYPE_DATA)
3836 		tx->sta_id = sc->sc_aux_sta.sta_id;
3837 	else
3838 		tx->sta_id = IWM_STATION_ID;
3839 
3840 	if (type == IEEE80211_FC0_TYPE_MGT) {
3841 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3842 
3843 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3844 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3845 			tx->pm_frame_timeout = htole16(3);
3846 		else
3847 			tx->pm_frame_timeout = htole16(2);
3848 	} else {
3849 		tx->pm_frame_timeout = htole16(0);
3850 	}
3851 
3852 	if (hdrlen & 3) {
3853 		/* First segment length must be a multiple of 4. */
3854 		flags |= IWM_TX_CMD_FLG_MH_PAD;
3855 		pad = 4 - (hdrlen & 3);
3856 	} else
3857 		pad = 0;
3858 
3859 	tx->driver_txop = 0;
3860 	tx->next_frame_len = 0;
3861 
3862 	tx->len = htole16(totlen);
3863 	tx->tid_tspec = tid;
3864 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3865 
3866 	/* Set physical address of "scratch area". */
3867 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3868 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3869 
3870 	/* Copy 802.11 header in TX command. */
3871 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3872 
3873 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3874 
3875 	tx->sec_ctl = 0;
3876 	tx->tx_flags |= htole32(flags);
3877 
3878 	/* Trim 802.11 header. */
3879 	m_adj(m, hdrlen);
3880 
3881 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3882 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3883 	if (error != 0) {
3884 		if (error != EFBIG) {
3885 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
3886 			    error);
3887 			m_freem(m);
3888 			return error;
3889 		}
3890 		/* Too many DMA segments, linearize mbuf. */
3891 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
3892 		if (m1 == NULL) {
3893 			m_freem(m);
3894 			return ENOBUFS;
3895 		}
3896 		if (m->m_pkthdr.len > MHLEN) {
3897 			MCLGET(m1, M_DONTWAIT);
3898 			if (!(m1->m_flags & M_EXT)) {
3899 				m_freem(m);
3900 				m_freem(m1);
3901 				return ENOBUFS;
3902 			}
3903 		}
3904 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
3905 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
3906 		m_freem(m);
3907 		m = m1;
3908 
3909 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3910 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3911 		if (error != 0) {
3912 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
3913 			    error);
3914 			m_freem(m);
3915 			return error;
3916 		}
3917 	}
3918 	data->m = m;
3919 	data->in = in;
3920 	data->done = 0;
3921 
3922 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
3923 	KASSERT(data->in != NULL);
3924 
3925 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
3926 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
3927 
3928 	/* Fill TX descriptor. */
3929 	desc->num_tbs = 2 + data->map->dm_nsegs;
3930 
3931 	desc->tbs[0].lo = htole32(data->cmd_paddr);
3932 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3933 	    (TB0_SIZE << 4);
3934 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3935 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3936 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3937 	      + hdrlen + pad - TB0_SIZE) << 4);
3938 
3939 	/* Other DMA segments are for data payload. */
3940 	seg = data->map->dm_segs;
3941 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
3942 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3943 		desc->tbs[i+2].hi_n_len = \
3944 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3945 		    | ((seg->ds_len) << 4);
3946 	}
3947 
3948 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
3949 	    BUS_DMASYNC_PREWRITE);
3950 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3951 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3952 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
3953 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3954 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3955 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
3956 
3957 #if 0
3958 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3959 #endif
3960 
3961 	/* Kick TX ring. */
3962 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3963 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3964 
3965 	/* Mark TX ring as full if we reach a certain threshold. */
3966 	if (++ring->queued > IWM_TX_RING_HIMARK) {
3967 		sc->qfullmsk |= 1 << ring->qid;
3968 	}
3969 
3970 	return 0;
3971 }
3972 
3973 #if 0
3974 /* not necessary? */
3975 int
3976 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3977 {
3978 	struct iwm_tx_path_flush_cmd flush_cmd = {
3979 		.queues_ctl = htole32(tfd_msk),
3980 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3981 	};
3982 	int ret;
3983 
3984 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3985 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3986 	    sizeof(flush_cmd), &flush_cmd);
3987 	if (ret)
3988                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), ret);
3989 	return ret;
3990 }
3991 #endif
3992 
3993 
3994 /*
3995  * BEGIN mvm/power.c
3996  */
3997 
3998 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
3999 
4000 int
4001 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
4002 	struct iwm_beacon_filter_cmd *cmd)
4003 {
4004 	int ret;
4005 
4006 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4007 	    IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
4008 
4009 	if (!ret) {
4010 		DPRINTF(("ba_enable_beacon_abort is: %d\n",
4011 		    le32toh(cmd->ba_enable_beacon_abort)));
4012 		DPRINTF(("ba_escape_timer is: %d\n",
4013 		    le32toh(cmd->ba_escape_timer)));
4014 		DPRINTF(("bf_debug_flag is: %d\n",
4015 		    le32toh(cmd->bf_debug_flag)));
4016 		DPRINTF(("bf_enable_beacon_filter is: %d\n",
4017 		    le32toh(cmd->bf_enable_beacon_filter)));
4018 		DPRINTF(("bf_energy_delta is: %d\n",
4019 		    le32toh(cmd->bf_energy_delta)));
4020 		DPRINTF(("bf_escape_timer is: %d\n",
4021 		    le32toh(cmd->bf_escape_timer)));
4022 		DPRINTF(("bf_roaming_energy_delta is: %d\n",
4023 		    le32toh(cmd->bf_roaming_energy_delta)));
4024 		DPRINTF(("bf_roaming_state is: %d\n",
4025 		    le32toh(cmd->bf_roaming_state)));
4026 		DPRINTF(("bf_temp_threshold is: %d\n",
4027 		    le32toh(cmd->bf_temp_threshold)));
4028 		DPRINTF(("bf_temp_fast_filter is: %d\n",
4029 		    le32toh(cmd->bf_temp_fast_filter)));
4030 		DPRINTF(("bf_temp_slow_filter is: %d\n",
4031 		    le32toh(cmd->bf_temp_slow_filter)));
4032 	}
4033 	return ret;
4034 }
4035 
4036 void
4037 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
4038 	struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
4039 {
4040 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4041 }
4042 
4043 int
4044 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
4045 	int enable)
4046 {
4047 	struct iwm_beacon_filter_cmd cmd = {
4048 		IWM_BF_CMD_CONFIG_DEFAULTS,
4049 		.bf_enable_beacon_filter = htole32(1),
4050 		.ba_enable_beacon_abort = htole32(enable),
4051 	};
4052 
4053 	if (!sc->sc_bf.bf_enabled)
4054 		return 0;
4055 
4056 	sc->sc_bf.ba_enabled = enable;
4057 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4058 	return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4059 }
4060 
4061 void
4062 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
4063 {
4064 	DPRINTF(("Sending power table command on mac id 0x%X for "
4065 	    "power level %d, flags = 0x%X\n",
4066 	    cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
4067 	DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
4068 
4069 	if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
4070 		DPRINTF(("Disable power management\n"));
4071 		return;
4072 	}
4073 	KASSERT(0);
4074 
4075 #if 0
4076 	DPRINTF(mvm, "Rx timeout = %u usec\n",
4077 			le32_to_cpu(cmd->rx_data_timeout));
4078 	DPRINTF(mvm, "Tx timeout = %u usec\n",
4079 			le32_to_cpu(cmd->tx_data_timeout));
4080 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
4081 		DPRINTF(mvm, "DTIM periods to skip = %u\n",
4082 				cmd->skip_dtim_periods);
4083 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
4084 		DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
4085 				cmd->lprx_rssi_threshold);
4086 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
4087 		DPRINTF(mvm, "uAPSD enabled\n");
4088 		DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
4089 				le32_to_cpu(cmd->rx_data_timeout_uapsd));
4090 		DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
4091 				le32_to_cpu(cmd->tx_data_timeout_uapsd));
4092 		DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
4093 		DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
4094 		DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
4095 	}
4096 #endif
4097 }
4098 
4099 void
4100 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4101 	struct iwm_mac_power_cmd *cmd)
4102 {
4103 	struct ieee80211com *ic = &sc->sc_ic;
4104 	struct ieee80211_node *ni = &in->in_ni;
4105 	int dtimper, dtimper_msec;
4106 	int keep_alive;
4107 
4108 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4109 	    in->in_color));
4110 	dtimper = ic->ic_dtim_period ?: 1;
4111 
4112 	/*
4113 	 * Regardless of power management state the driver must set
4114 	 * keep alive period. FW will use it for sending keep alive NDPs
4115 	 * immediately after association. Check that keep alive period
4116 	 * is at least 3 * DTIM
4117 	 */
4118 	dtimper_msec = dtimper * ni->ni_intval;
4119 	keep_alive
4120 	    = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4121 	keep_alive = roundup(keep_alive, 1000) / 1000;
4122 	cmd->keep_alive_seconds = htole16(keep_alive);
4123 }
4124 
4125 int
4126 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4127 {
4128 	int ret;
4129 	int ba_enable;
4130 	struct iwm_mac_power_cmd cmd;
4131 
4132 	memset(&cmd, 0, sizeof(cmd));
4133 
4134 	iwm_mvm_power_build_cmd(sc, in, &cmd);
4135 	iwm_mvm_power_log(sc, &cmd);
4136 
4137 	if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
4138 	    IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
4139 		return ret;
4140 
4141 	ba_enable = !!(cmd.flags &
4142 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4143 	return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
4144 }
4145 
4146 int
4147 iwm_mvm_power_update_device(struct iwm_softc *sc)
4148 {
4149 	struct iwm_device_power_cmd cmd = {
4150 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4151 	};
4152 
4153 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4154 		return 0;
4155 
4156 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4157 	DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
4158 
4159 	return iwm_mvm_send_cmd_pdu(sc,
4160 	    IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
4161 }
4162 
4163 int
4164 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4165 {
4166 	struct iwm_beacon_filter_cmd cmd = {
4167 		IWM_BF_CMD_CONFIG_DEFAULTS,
4168 		.bf_enable_beacon_filter = htole32(1),
4169 	};
4170 	int ret;
4171 
4172 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4173 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4174 
4175 	if (ret == 0)
4176 		sc->sc_bf.bf_enabled = 1;
4177 
4178 	return ret;
4179 }
4180 
4181 int
4182 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4183 {
4184 	struct iwm_beacon_filter_cmd cmd;
4185 	int ret;
4186 
4187 	memset(&cmd, 0, sizeof(cmd));
4188 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4189 		return 0;
4190 
4191 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4192 	if (ret == 0)
4193 		sc->sc_bf.bf_enabled = 0;
4194 
4195 	return ret;
4196 }
4197 
4198 #if 0
4199 int
4200 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4201 {
4202 	if (!sc->sc_bf.bf_enabled)
4203 		return 0;
4204 
4205 	return iwm_mvm_enable_beacon_filter(sc, in);
4206 }
4207 #endif
4208 
4209 /*
4210  * END mvm/power.c
4211  */
4212 
4213 /*
4214  * BEGIN mvm/sta.c
4215  */
4216 
4217 void
4218 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
4219 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
4220 {
4221 	memset(cmd_v5, 0, sizeof(*cmd_v5));
4222 
4223 	cmd_v5->add_modify = cmd_v6->add_modify;
4224 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
4225 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
4226 	memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
4227 	cmd_v5->sta_id = cmd_v6->sta_id;
4228 	cmd_v5->modify_mask = cmd_v6->modify_mask;
4229 	cmd_v5->station_flags = cmd_v6->station_flags;
4230 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
4231 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
4232 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
4233 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
4234 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
4235 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
4236 	cmd_v5->assoc_id = cmd_v6->assoc_id;
4237 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
4238 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
4239 }
4240 
4241 int
4242 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
4243 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
4244 {
4245 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
4246 
4247 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
4248 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
4249 		    sizeof(*cmd), cmd, status);
4250 	}
4251 
4252 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
4253 
4254 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
4255 	    &cmd_v5, status);
4256 }
4257 
4258 /* send station add/update command to firmware */
4259 int
4260 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
4261 {
4262 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
4263 	int ret;
4264 	uint32_t status;
4265 
4266 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4267 
4268 	add_sta_cmd.sta_id = IWM_STATION_ID;
4269 	add_sta_cmd.mac_id_n_color
4270 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4271 	if (!update) {
4272 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
4273 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4274 	}
4275 	add_sta_cmd.add_modify = update ? 1 : 0;
4276 	add_sta_cmd.station_flags_msk
4277 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4278 
4279 	status = IWM_ADD_STA_SUCCESS;
4280 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
4281 	if (ret)
4282 		return ret;
4283 
4284 	switch (status) {
4285 	case IWM_ADD_STA_SUCCESS:
4286 		break;
4287 	default:
4288 		ret = EIO;
4289 		DPRINTF(("IWM_ADD_STA failed\n"));
4290 		break;
4291 	}
4292 
4293 	return ret;
4294 }
4295 
4296 int
4297 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
4298 {
4299 	int ret;
4300 
4301 	ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
4302 	if (ret)
4303 		return ret;
4304 
4305 	return 0;
4306 }
4307 
4308 int
4309 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
4310 {
4311 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
4312 }
4313 
4314 int
4315 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
4316 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
4317 {
4318 	struct iwm_mvm_add_sta_cmd_v6 cmd;
4319 	int ret;
4320 	uint32_t status;
4321 
4322 	memset(&cmd, 0, sizeof(cmd));
4323 	cmd.sta_id = sta->sta_id;
4324 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4325 
4326 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4327 
4328 	if (addr)
4329 		memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
4330 
4331 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4332 	if (ret)
4333 		return ret;
4334 
4335 	switch (status) {
4336 	case IWM_ADD_STA_SUCCESS:
4337 		DPRINTF(("Internal station added.\n"));
4338 		return 0;
4339 	default:
4340 		DPRINTF(("%s: Add internal station failed, status=0x%x\n",
4341 		    DEVNAME(sc), status));
4342 		ret = EIO;
4343 		break;
4344 	}
4345 	return ret;
4346 }
4347 
4348 int
4349 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4350 {
4351 	int ret;
4352 
4353 	sc->sc_aux_sta.sta_id = 3;
4354 	sc->sc_aux_sta.tfd_queue_msk = 0;
4355 
4356 	ret = iwm_mvm_add_int_sta_common(sc,
4357 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4358 
4359 	if (ret)
4360 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4361 	return ret;
4362 }
4363 
4364 /*
4365  * END mvm/sta.c
4366  */
4367 
4368 /*
4369  * BEGIN mvm/scan.c
4370  */
4371 
4372 #define IWM_PLCP_QUIET_THRESH 1
4373 #define IWM_ACTIVE_QUIET_TIME 10
4374 #define LONG_OUT_TIME_PERIOD 600
4375 #define SHORT_OUT_TIME_PERIOD 200
4376 #define SUSPEND_TIME_PERIOD 100
4377 
4378 uint16_t
4379 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
4380 {
4381 	uint16_t rx_chain;
4382 	uint8_t rx_ant;
4383 
4384 	rx_ant = IWM_FW_VALID_RX_ANT(sc);
4385 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4386 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4387 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4388 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4389 	return htole16(rx_chain);
4390 }
4391 
4392 #define ieee80211_tu_to_usec(a) (1024*(a))
4393 
4394 uint32_t
4395 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
4396 {
4397 	if (!is_assoc)
4398 		return 0;
4399 	if (flags & 0x1)
4400 		return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
4401 	return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
4402 }
4403 
4404 uint32_t
4405 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
4406 {
4407 	if (!is_assoc)
4408 		return 0;
4409 	return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
4410 }
4411 
4412 uint32_t
4413 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
4414 {
4415 	if (flags & IEEE80211_CHAN_2GHZ)
4416 		return htole32(IWM_PHY_BAND_24);
4417 	else
4418 		return htole32(IWM_PHY_BAND_5);
4419 }
4420 
4421 uint32_t
4422 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4423 {
4424 	uint32_t tx_ant;
4425 	int i, ind;
4426 
4427 	for (i = 0, ind = sc->sc_scan_last_antenna;
4428 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4429 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4430 		if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
4431 			sc->sc_scan_last_antenna = ind;
4432 			break;
4433 		}
4434 	}
4435 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4436 
4437 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4438 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4439 				   tx_ant);
4440 	else
4441 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
4442 }
4443 
4444 /*
4445  * If req->n_ssids > 0, it means we should do an active scan.
4446  * In case of active scan w/o directed scan, we receive a zero-length SSID
4447  * just to notify that this scan is active and not passive.
4448  * In order to notify the FW of the number of SSIDs we wish to scan (including
4449  * the zero-length one), we need to set the corresponding bits in chan->type,
4450  * one for each SSID, and set the active bit (first). If the first SSID is
4451  * already included in the probe template, so we need to set only
4452  * req->n_ssids - 1 bits in addition to the first bit.
4453  */
4454 uint16_t
4455 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4456 {
4457 	if (flags & IEEE80211_CHAN_2GHZ)
4458 		return 30  + 3 * (n_ssids + 1);
4459 	return 20  + 2 * (n_ssids + 1);
4460 }
4461 
4462 uint16_t
4463 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
4464 {
4465 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4466 }
4467 
4468 int
4469 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
4470 	int flags, int n_ssids, int basic_ssid)
4471 {
4472 	struct ieee80211com *ic = &sc->sc_ic;
4473 	uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
4474 	uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
4475 	struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
4476 		(cmd->data + le16toh(cmd->tx_cmd.len));
4477 	int type = (1 << n_ssids) - 1;
4478 	struct ieee80211_channel *c;
4479 	int nchan;
4480 
4481 	if (!basic_ssid)
4482 		type |= (1 << n_ssids);
4483 
4484 	for (nchan = 0, c = &ic->ic_channels[1];
4485 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
4486 	    c++) {
4487 		if ((c->ic_flags & flags) != flags)
4488 			continue;
4489 
4490 		chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
4491 		chan->type = htole32(type);
4492 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
4493 			chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
4494 		chan->active_dwell = htole16(active_dwell);
4495 		chan->passive_dwell = htole16(passive_dwell);
4496 		chan->iteration_count = htole16(1);
4497 		chan++;
4498 		nchan++;
4499 	}
4500 	if (nchan == 0)
4501 		DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
4502 	return nchan;
4503 }
4504 
4505 /*
4506  * Fill in probe request with the following parameters:
4507  * TA is our vif HW address, which mac80211 ensures we have.
4508  * Packet is broadcasted, so this is both SA and DA.
4509  * The probe request IE is made out of two: first comes the most prioritized
4510  * SSID if a directed scan is requested. Second comes whatever extra
4511  * information was given to us as the scan request IE.
4512  */
4513 uint16_t
4514 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
4515 	const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
4516 	const uint8_t *ie, int ie_len, int left)
4517 {
4518 	int len = 0;
4519 	uint8_t *pos = NULL;
4520 
4521 	/* Make sure there is enough space for the probe request,
4522 	 * two mandatory IEs and the data */
4523 	left -= sizeof(*frame);
4524 	if (left < 0)
4525 		return 0;
4526 
4527 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4528 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4529 	frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4530 	IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
4531 	memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
4532 	IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
4533 
4534 	len += sizeof(*frame);
4535 	CTASSERT(sizeof(*frame) == 24);
4536 
4537 	/* for passive scans, no need to fill anything */
4538 	if (n_ssids == 0)
4539 		return (uint16_t)len;
4540 
4541 	/* points to the payload of the request */
4542 	pos = (uint8_t *)frame + sizeof(*frame);
4543 
4544 	/* fill in our SSID IE */
4545 	left -= ssid_len + 2;
4546 	if (left < 0)
4547 		return 0;
4548 	*pos++ = IEEE80211_ELEMID_SSID;
4549 	*pos++ = ssid_len;
4550 	if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
4551 		memcpy(pos, ssid, ssid_len);
4552 		pos += ssid_len;
4553 	}
4554 
4555 	len += ssid_len + 2;
4556 
4557 	if (left < ie_len)
4558 		return len;
4559 
4560 	if (ie && ie_len) {
4561 		memcpy(pos, ie, ie_len);
4562 		len += ie_len;
4563 	}
4564 
4565 	return (uint16_t)len;
4566 }
4567 
4568 int
4569 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
4570 	int n_ssids, uint8_t *ssid, int ssid_len)
4571 {
4572 	struct ieee80211com *ic = &sc->sc_ic;
4573 	struct iwm_host_cmd hcmd = {
4574 		.id = IWM_SCAN_REQUEST_CMD,
4575 		.len = { 0, },
4576 		.data = { sc->sc_scan_cmd, },
4577 		.flags = IWM_CMD_SYNC,
4578 		.dataflags = { IWM_HCMD_DFL_NOCOPY, },
4579 	};
4580 	struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
4581 	int is_assoc = 0;
4582 	int ret;
4583 	uint32_t status;
4584 	int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
4585 
4586 	//lockdep_assert_held(&mvm->mutex);
4587 
4588 	sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
4589 
4590 	DPRINTF(("Handling ieee80211 scan request\n"));
4591 	memset(cmd, 0, sc->sc_scan_cmd_len);
4592 
4593 	cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
4594 	cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
4595 	cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
4596 	cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
4597 	cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
4598 	cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
4599 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
4600 	    IWM_MAC_FILTER_IN_BEACON);
4601 
4602 	cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
4603 	cmd->repeats = htole32(1);
4604 
4605 	/*
4606 	 * If the user asked for passive scan, don't change to active scan if
4607 	 * you see any activity on the channel - remain passive.
4608 	 */
4609 	if (n_ssids > 0) {
4610 		cmd->passive2active = htole16(1);
4611 		cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4612 #if 0
4613 		if (basic_ssid) {
4614 			ssid = req->ssids[0].ssid;
4615 			ssid_len = req->ssids[0].ssid_len;
4616 		}
4617 #endif
4618 	} else {
4619 		cmd->passive2active = 0;
4620 		cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4621 	}
4622 
4623 	cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4624 	    IWM_TX_CMD_FLG_BT_DIS);
4625 	cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
4626 	cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4627 	cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
4628 
4629 	cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
4630 			    (struct ieee80211_frame *)cmd->data,
4631 			    ic->ic_myaddr, n_ssids, ssid, ssid_len,
4632 			    NULL, 0, sc->sc_capa_max_probe_len));
4633 
4634 	cmd->channel_count
4635 	    = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
4636 
4637 	cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
4638 		le16toh(cmd->tx_cmd.len) +
4639 		(cmd->channel_count * sizeof(struct iwm_scan_channel)));
4640 	hcmd.len[0] = le16toh(cmd->len);
4641 
4642 	status = IWM_SCAN_RESPONSE_OK;
4643 	ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
4644 	if (!ret && status == IWM_SCAN_RESPONSE_OK) {
4645 		DPRINTF(("Scan request was sent successfully\n"));
4646 	} else {
4647 		/*
4648 		 * If the scan failed, it usually means that the FW was unable
4649 		 * to allocate the time events. Warn on it, but maybe we
4650 		 * should try to send the command again with different params.
4651 		 */
4652 		sc->sc_scanband = 0;
4653 		ret = EIO;
4654 	}
4655 	return ret;
4656 }
4657 
4658 /*
4659  * END mvm/scan.c
4660  */
4661 
4662 /*
4663  * BEGIN mvm/mac-ctxt.c
4664  */
4665 
4666 void
4667 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
4668 	int *cck_rates, int *ofdm_rates)
4669 {
4670 	struct ieee80211_node *ni = &in->in_ni;
4671 	int lowest_present_ofdm = 100;
4672 	int lowest_present_cck = 100;
4673 	uint8_t cck = 0;
4674 	uint8_t ofdm = 0;
4675 	int i;
4676 
4677 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
4678 		for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
4679 			cck |= (1 << i);
4680 			if (lowest_present_cck > i)
4681 				lowest_present_cck = i;
4682 		}
4683 	}
4684 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
4685 		int adj = i - IWM_FIRST_OFDM_RATE;
4686 		ofdm |= (1 << adj);
4687 		if (lowest_present_ofdm > i)
4688 			lowest_present_ofdm = i;
4689 	}
4690 
4691 	/*
4692 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
4693 	 * variables. This isn't sufficient though, as there might not
4694 	 * be all the right rates in the bitmap. E.g. if the only basic
4695 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
4696 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
4697 	 *
4698 	 *    [...] a STA responding to a received frame shall transmit
4699 	 *    its Control Response frame [...] at the highest rate in the
4700 	 *    BSSBasicRateSet parameter that is less than or equal to the
4701 	 *    rate of the immediately previous frame in the frame exchange
4702 	 *    sequence ([...]) and that is of the same modulation class
4703 	 *    ([...]) as the received frame. If no rate contained in the
4704 	 *    BSSBasicRateSet parameter meets these conditions, then the
4705 	 *    control frame sent in response to a received frame shall be
4706 	 *    transmitted at the highest mandatory rate of the PHY that is
4707 	 *    less than or equal to the rate of the received frame, and
4708 	 *    that is of the same modulation class as the received frame.
4709 	 *
4710 	 * As a consequence, we need to add all mandatory rates that are
4711 	 * lower than all of the basic rates to these bitmaps.
4712 	 */
4713 
4714 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
4715 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
4716 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
4717 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
4718 	/* 6M already there or needed so always add */
4719 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
4720 
4721 	/*
4722 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
4723 	 * Note, however:
4724 	 *  - if no CCK rates are basic, it must be ERP since there must
4725 	 *    be some basic rates at all, so they're OFDM => ERP PHY
4726 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
4727 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
4728 	 *  - if 5.5M is basic, 1M and 2M are mandatory
4729 	 *  - if 2M is basic, 1M is mandatory
4730 	 *  - if 1M is basic, that's the only valid ACK rate.
4731 	 * As a consequence, it's not as complicated as it sounds, just add
4732 	 * any lower rates to the ACK rate bitmap.
4733 	 */
4734 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
4735 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
4736 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
4737 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
4738 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
4739 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
4740 	/* 1M already there or needed so always add */
4741 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
4742 
4743 	*cck_rates = cck;
4744 	*ofdm_rates = ofdm;
4745 }
4746 
4747 void
4748 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
4749 	struct iwm_mac_ctx_cmd *cmd, uint32_t action)
4750 {
4751 	struct ieee80211com *ic = &sc->sc_ic;
4752 	struct ieee80211_node *ni = ic->ic_bss;
4753 	int cck_ack_rates, ofdm_ack_rates;
4754 	int i;
4755 
4756 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4757 	    in->in_color));
4758 	cmd->action = htole32(action);
4759 
4760 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
4761 	cmd->tsf_id = htole32(in->in_tsfid);
4762 
4763 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
4764 	if (in->in_assoc) {
4765 		IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
4766 	} else {
4767 		memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
4768 	}
4769 	iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
4770 	cmd->cck_rates = htole32(cck_ack_rates);
4771 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
4772 
4773 	cmd->cck_short_preamble
4774 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4775 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
4776 	cmd->short_slot
4777 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
4778 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
4779 
4780 	for (i = 0; i < IWM_AC_NUM+1; i++) {
4781 		int txf = i;
4782 
4783 		cmd->ac[txf].cw_min = htole16(0x0f);
4784 		cmd->ac[txf].cw_max = htole16(0x3f);
4785 		cmd->ac[txf].aifsn = 1;
4786 		cmd->ac[txf].fifos_mask = (1 << txf);
4787 		cmd->ac[txf].edca_txop = 0;
4788 	}
4789 
4790 	if (ic->ic_flags & IEEE80211_F_USEPROT)
4791 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
4792 
4793 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
4794 }
4795 
4796 int
4797 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
4798 {
4799 	int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
4800 				       sizeof(*cmd), cmd);
4801 	if (ret)
4802 		DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
4803 		    DEVNAME(sc), le32toh(cmd->action), ret));
4804 	return ret;
4805 }
4806 
4807 /*
4808  * Fill the specific data for mac context of type station or p2p client
4809  */
4810 void
4811 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
4812 	struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
4813 {
4814 	struct ieee80211_node *ni = &in->in_ni;
4815 	unsigned dtim_period, dtim_count;
4816 	struct ieee80211com *ic = &sc->sc_ic;
4817 
4818 	/* will this work? */
4819 	dtim_period = ic->ic_dtim_period;
4820 	dtim_count = ic->ic_dtim_count;
4821 	DPRINTF(("dtim %d %d\n", dtim_period, dtim_count));
4822 
4823 	/* We need the dtim_period to set the MAC as associated */
4824 	if (in->in_assoc && dtim_period && !force_assoc_off) {
4825 		uint64_t tsf;
4826 		uint32_t dtim_offs;
4827 
4828 		/*
4829 		 * The DTIM count counts down, so when it is N that means N
4830 		 * more beacon intervals happen until the DTIM TBTT. Therefore
4831 		 * add this to the current time. If that ends up being in the
4832 		 * future, the firmware will handle it.
4833 		 *
4834 		 * Also note that the system_timestamp (which we get here as
4835 		 * "sync_device_ts") and TSF timestamp aren't at exactly the
4836 		 * same offset in the frame -- the TSF is at the first symbol
4837 		 * of the TSF, the system timestamp is at signal acquisition
4838 		 * time. This means there's an offset between them of at most
4839 		 * a few hundred microseconds (24 * 8 bits + PLCP time gives
4840 		 * 384us in the longest case), this is currently not relevant
4841 		 * as the firmware wakes up around 2ms before the TBTT.
4842 		 */
4843 		dtim_offs = dtim_count * ni->ni_intval;
4844 		/* convert TU to usecs */
4845 		dtim_offs *= 1024;
4846 
4847 		/* XXX: byte order? */
4848 		memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
4849 
4850 		ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
4851 		ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
4852 
4853 		DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
4854 		    (long long)le64toh(ctxt_sta->dtim_tsf),
4855 		    le32toh(ctxt_sta->dtim_time), dtim_offs));
4856 
4857 		ctxt_sta->is_assoc = htole32(1);
4858 	} else {
4859 		ctxt_sta->is_assoc = htole32(0);
4860 	}
4861 
4862 	ctxt_sta->bi = htole32(ni->ni_intval);
4863 	ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
4864 	ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
4865 	ctxt_sta->dtim_reciprocal =
4866 	    htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
4867 
4868 	/* 10 = CONN_MAX_LISTEN_INTERVAL */
4869 	ctxt_sta->listen_interval = htole32(10);
4870 	ctxt_sta->assoc_id = htole32(ni->ni_associd);
4871 }
4872 
4873 int
4874 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
4875 	uint32_t action)
4876 {
4877 	struct iwm_mac_ctx_cmd cmd;
4878 
4879 	memset(&cmd, 0, sizeof(cmd));
4880 
4881 	/* Fill the common data for all mac context types */
4882 	iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
4883 
4884 	/* Allow beacons to pass through as long as we are not associated,or we
4885 	 * do not have dtim period information */
4886 	if (!in->in_assoc || !sc->sc_ic.ic_dtim_period)
4887 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
4888 	else
4889 		cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
4890 
4891 	/* Fill the data specific for station mode */
4892 	iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
4893 	    &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
4894 
4895 	return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
4896 }
4897 
4898 int
4899 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4900 {
4901 	return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
4902 }
4903 
4904 int
4905 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
4906 {
4907 	int ret;
4908 
4909 	ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
4910 	if (ret)
4911 		return ret;
4912 
4913 	return 0;
4914 }
4915 
4916 int
4917 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
4918 {
4919 	return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
4920 }
4921 
4922 #if 0
4923 int
4924 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
4925 {
4926 	struct iwm_mac_ctx_cmd cmd;
4927 	int ret;
4928 
4929 	if (!in->in_uploaded) {
4930 		print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
4931 		return EIO;
4932 	}
4933 
4934 	memset(&cmd, 0, sizeof(cmd));
4935 
4936 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4937 	    in->in_color));
4938 	cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
4939 
4940 	ret = iwm_mvm_send_cmd_pdu(sc,
4941 	    IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
4942 	if (ret) {
4943 		printf("%s: Failed to remove MAC context: %d\n", DEVNAME(sc), ret);
4944 		return ret;
4945 	}
4946 	in->in_uploaded = 0;
4947 
4948 	return 0;
4949 }
4950 #endif
4951 
4952 /*
4953  * END mvm/mac-ctxt.c
4954  */
4955 
4956 /*
4957  * BEGIN mvm/quota.c
4958  */
4959 
4960 int
4961 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4962 {
4963 	struct iwm_time_quota_cmd cmd;
4964 	int i, idx, ret, num_active_macs, quota, quota_rem;
4965 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4966 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4967 	uint16_t id;
4968 
4969 	memset(&cmd, 0, sizeof(cmd));
4970 
4971 	/* currently, PHY ID == binding ID */
4972 	if (in) {
4973 		id = in->in_phyctxt->id;
4974 		KASSERT(id < IWM_MAX_BINDINGS);
4975 		colors[id] = in->in_phyctxt->color;
4976 
4977 		if (1)
4978 			n_ifs[id] = 1;
4979 	}
4980 
4981 	/*
4982 	 * The FW's scheduling session consists of
4983 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4984 	 * equally between all the bindings that require quota
4985 	 */
4986 	num_active_macs = 0;
4987 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4988 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4989 		num_active_macs += n_ifs[i];
4990 	}
4991 
4992 	quota = 0;
4993 	quota_rem = 0;
4994 	if (num_active_macs) {
4995 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4996 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4997 	}
4998 
4999 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5000 		if (colors[i] < 0)
5001 			continue;
5002 
5003 		cmd.quotas[idx].id_and_color =
5004 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5005 
5006 		if (n_ifs[i] <= 0) {
5007 			cmd.quotas[idx].quota = htole32(0);
5008 			cmd.quotas[idx].max_duration = htole32(0);
5009 		} else {
5010 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5011 			cmd.quotas[idx].max_duration = htole32(0);
5012 		}
5013 		idx++;
5014 	}
5015 
5016 	/* Give the remainder of the session to the first binding */
5017 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5018 
5019 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
5020 	    sizeof(cmd), &cmd);
5021 	if (ret)
5022 		DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
5023 	return ret;
5024 }
5025 
5026 /*
5027  * END mvm/quota.c
5028  */
5029 
5030 /*
5031  * aieee80211 routines
5032  */
5033 
5034 /*
5035  * Change to AUTH state in 80211 state machine.  Roughly matches what
5036  * Linux does in bss_info_changed().
5037  */
5038 int
5039 iwm_auth(struct iwm_softc *sc)
5040 {
5041 	struct ieee80211com *ic = &sc->sc_ic;
5042 	struct iwm_node *in = (void *)ic->ic_bss;
5043 	uint32_t duration;
5044 	uint32_t min_duration;
5045 	int error;
5046 
5047 	in->in_assoc = 0;
5048 
5049 	error = iwm_allow_mcast(sc);
5050 	if (error)
5051 		return error;
5052 
5053 	if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
5054 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5055 		return error;
5056 	}
5057 
5058 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
5059 	    in->in_ni.ni_chan, 1, 1)) != 0) {
5060 		DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
5061 		return error;
5062 	}
5063 	in->in_phyctxt = &sc->sc_phyctxt[0];
5064 
5065 	if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
5066 		DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
5067 		return error;
5068 	}
5069 
5070 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
5071 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5072 		return error;
5073 	}
5074 
5075 	/* a bit superfluous? */
5076 	while (sc->sc_auth_prot)
5077 		tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
5078 	sc->sc_auth_prot = 1;
5079 
5080 	duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
5081 	    200 + in->in_ni.ni_intval);
5082 	min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
5083 	    100 + in->in_ni.ni_intval);
5084 	iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
5085 
5086 	while (sc->sc_auth_prot != 2) {
5087 		/*
5088 		 * well, meh, but if the kernel is sleeping for half a
5089 		 * second, we have bigger problems
5090 		 */
5091 		if (sc->sc_auth_prot == 0) {
5092 			DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
5093 			return ETIMEDOUT;
5094 		} else if (sc->sc_auth_prot == -1) {
5095 			DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
5096 			sc->sc_auth_prot = 0;
5097 			return EAUTH;
5098 		}
5099 		tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
5100 	}
5101 
5102 	return 0;
5103 }
5104 
5105 int
5106 iwm_assoc(struct iwm_softc *sc)
5107 {
5108 	struct ieee80211com *ic = &sc->sc_ic;
5109 	struct iwm_node *in = (void *)ic->ic_bss;
5110 	int error;
5111 
5112 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
5113 		DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
5114 		return error;
5115 	}
5116 
5117 	in->in_assoc = 1;
5118 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5119 		DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
5120 		return error;
5121 	}
5122 
5123 	return 0;
5124 }
5125 
5126 int
5127 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
5128 {
5129 	/*
5130 	 * Ok, so *technically* the proper set of calls for going
5131 	 * from RUN back to SCAN is:
5132 	 *
5133 	 * iwm_mvm_power_mac_disable(sc, in);
5134 	 * iwm_mvm_mac_ctxt_changed(sc, in);
5135 	 * iwm_mvm_rm_sta(sc, in);
5136 	 * iwm_mvm_update_quotas(sc, NULL);
5137 	 * iwm_mvm_mac_ctxt_changed(sc, in);
5138 	 * iwm_mvm_binding_remove_vif(sc, in);
5139 	 * iwm_mvm_mac_ctxt_remove(sc, in);
5140 	 *
5141 	 * However, that freezes the device not matter which permutations
5142 	 * and modifications are attempted.  Obviously, this driver is missing
5143 	 * something since it works in the Linux driver, but figuring out what
5144 	 * is missing is a little more complicated.  Now, since we're going
5145 	 * back to nothing anyway, we'll just do a complete device reset.
5146 	 * Up your's, device!
5147 	 */
5148 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
5149 	iwm_stop_device(sc);
5150 	iwm_init_hw(sc);
5151 	if (in)
5152 		in->in_assoc = 0;
5153 	return 0;
5154 
5155 #if 0
5156 	int error;
5157 
5158 	iwm_mvm_power_mac_disable(sc, in);
5159 
5160 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5161 		printf("%s: mac ctxt change fail 1 %d\n", DEVNAME(sc), error);
5162 		return error;
5163 	}
5164 
5165 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
5166 		printf("%s: sta remove fail %d\n", DEVNAME(sc), error);
5167 		return error;
5168 	}
5169 	error = iwm_mvm_rm_sta(sc, in);
5170 	in->in_assoc = 0;
5171 	iwm_mvm_update_quotas(sc, NULL);
5172 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5173 		printf("%s: mac ctxt change fail 2 %d\n", DEVNAME(sc), error);
5174 		return error;
5175 	}
5176 	iwm_mvm_binding_remove_vif(sc, in);
5177 
5178 	iwm_mvm_mac_ctxt_remove(sc, in);
5179 
5180 	return error;
5181 #endif
5182 }
5183 
5184 struct ieee80211_node *
5185 iwm_node_alloc(struct ieee80211com *ic)
5186 {
5187 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
5188 }
5189 
5190 void
5191 iwm_calib_timeout(void *arg)
5192 {
5193 	struct iwm_softc *sc = arg;
5194 	struct ieee80211com *ic = &sc->sc_ic;
5195 	int s;
5196 
5197 	s = splnet();
5198 	if (ic->ic_fixed_rate == -1
5199 	    && ic->ic_opmode == IEEE80211_M_STA
5200 	    && ic->ic_bss) {
5201 		struct iwm_node *in = (void *)ic->ic_bss;
5202 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5203 	}
5204 	splx(s);
5205 
5206 	timeout_add_msec(&sc->sc_calib_to, 500);
5207 }
5208 
5209 void
5210 iwm_setrates(struct iwm_node *in)
5211 {
5212 	struct ieee80211_node *ni = &in->in_ni;
5213 	struct ieee80211com *ic = ni->ni_ic;
5214 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5215 	struct iwm_lq_cmd *lq = &in->in_lq;
5216 	int nrates = ni->ni_rates.rs_nrates;
5217 	int i, ridx, tab = 0;
5218 	int txant = 0;
5219 
5220 	if (nrates > nitems(lq->rs_table)) {
5221 		DPRINTF(("%s: node supports %d rates, driver handles "
5222 		    "only %zu\n", DEVNAME(sc), nrates, nitems(lq->rs_table)));
5223 		return;
5224 	}
5225 
5226 	/* first figure out which rates we should support */
5227 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
5228 	for (i = 0; i < nrates; i++) {
5229 		int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
5230 
5231 		/* Map 802.11 rate to HW rate index. */
5232 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5233 			if (iwm_rates[ridx].rate == rate)
5234 				break;
5235 		if (ridx > IWM_RIDX_MAX)
5236 			DPRINTF(("%s: WARNING: device rate for %d not found!\n",
5237 			    DEVNAME(sc), rate));
5238 		else
5239 			in->in_ridx[i] = ridx;
5240 	}
5241 
5242 	/* then construct a lq_cmd based on those */
5243 	memset(lq, 0, sizeof(*lq));
5244 	lq->sta_id = IWM_STATION_ID;
5245 
5246 	/*
5247 	 * are these used? (we don't do SISO or MIMO)
5248 	 * need to set them to non-zero, though, or we get an error.
5249 	 */
5250 	lq->single_stream_ant_msk = 1;
5251 	lq->dual_stream_ant_msk = 1;
5252 
5253 	/*
5254 	 * Build the actual rate selection table.
5255 	 * The lowest bits are the rates.  Additionally,
5256 	 * CCK needs bit 9 to be set.  The rest of the bits
5257 	 * we add to the table select the tx antenna
5258 	 * Note that we add the rates in the highest rate first
5259 	 * (opposite of ni_rates).
5260 	 */
5261 	for (i = 0; i < nrates; i++) {
5262 		int nextant;
5263 
5264 		if (txant == 0)
5265 			txant = IWM_FW_VALID_TX_ANT(sc);
5266 		nextant = 1<<(ffs(txant)-1);
5267 		txant &= ~nextant;
5268 
5269 		ridx = in->in_ridx[(nrates-1)-i];
5270 		tab = iwm_rates[ridx].plcp;
5271 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
5272 		if (IWM_RIDX_IS_CCK(ridx))
5273 			tab |= IWM_RATE_MCS_CCK_MSK;
5274 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
5275 		lq->rs_table[i] = htole32(tab);
5276 	}
5277 	/* then fill the rest with the lowest possible rate */
5278 	for (i = nrates; i < nitems(lq->rs_table); i++) {
5279 		KASSERT(tab != 0);
5280 		lq->rs_table[i] = htole32(tab);
5281 	}
5282 
5283 	/* init amrr */
5284 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5285 	/* Start at lowest available bit-rate, AMRR will raise. */
5286 	ni->ni_txrate = 0;
5287 }
5288 
5289 int
5290 iwm_media_change(struct ifnet *ifp)
5291 {
5292 	struct iwm_softc *sc = ifp->if_softc;
5293 	struct ieee80211com *ic = &sc->sc_ic;
5294 	uint8_t rate, ridx;
5295 	int error;
5296 
5297 	error = ieee80211_media_change(ifp);
5298 	if (error != ENETRESET)
5299 		return error;
5300 
5301 	if (ic->ic_fixed_rate != -1) {
5302 		rate = ic->ic_sup_rates[ic->ic_curmode].
5303 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5304 		/* Map 802.11 rate to HW rate index. */
5305 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5306 			if (iwm_rates[ridx].rate == rate)
5307 				break;
5308 		sc->sc_fixed_ridx = ridx;
5309 	}
5310 
5311 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5312 	    (IFF_UP | IFF_RUNNING)) {
5313 		iwm_stop(ifp, 0);
5314 		error = iwm_init(ifp);
5315 	}
5316 	return error;
5317 }
5318 
5319 void
5320 iwm_newstate_cb(void *wk)
5321 {
5322 	struct iwm_newstate_state *iwmns = (void *)wk;
5323 	struct ieee80211com *ic = iwmns->ns_ic;
5324 	enum ieee80211_state nstate = iwmns->ns_nstate;
5325 	int generation = iwmns->ns_generation;
5326 	struct iwm_node *in;
5327 	int arg = iwmns->ns_arg;
5328 	struct ifnet *ifp = IC2IFP(ic);
5329 	struct iwm_softc *sc = ifp->if_softc;
5330 	int error;
5331 
5332 	free(iwmns, M_DEVBUF, sizeof(*iwmns));
5333 
5334 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
5335 	if (sc->sc_generation != generation) {
5336 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5337 		if (nstate == IEEE80211_S_INIT) {
5338 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5339 			sc->sc_newstate(ic, nstate, arg);
5340 		}
5341 		return;
5342 	}
5343 
5344 	DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
5345 
5346 	/* disable beacon filtering if we're hopping out of RUN */
5347 	if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
5348 		iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
5349 
5350 		if (((in = (void *)ic->ic_bss) != NULL))
5351 			in->in_assoc = 0;
5352 		iwm_release(sc, NULL);
5353 
5354 		/*
5355 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
5356 		 * above then the card will be completely reinitialized,
5357 		 * so the driver must do everything necessary to bring the card
5358 		 * from INIT to SCAN.
5359 		 *
5360 		 * Additionally, upon receiving deauth frame from AP,
5361 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
5362 		 * state. This will also fail with this driver, so bring the FSM
5363 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
5364 		 */
5365 		if (nstate == IEEE80211_S_SCAN ||
5366 		    nstate == IEEE80211_S_AUTH ||
5367 		    nstate == IEEE80211_S_ASSOC) {
5368 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5369 			sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
5370 			DPRINTF(("Going INIT->SCAN\n"));
5371 			nstate = IEEE80211_S_SCAN;
5372 		}
5373 	}
5374 
5375 	switch (nstate) {
5376 	case IEEE80211_S_INIT:
5377 		sc->sc_scanband = 0;
5378 		break;
5379 
5380 	case IEEE80211_S_SCAN:
5381 		if (sc->sc_scanband)
5382 			break;
5383 
5384 		if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
5385 		    ic->ic_des_esslen != 0,
5386 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5387 			printf("%s: could not initiate scan\n", DEVNAME(sc));
5388 			return;
5389 		}
5390 		ic->ic_state = nstate;
5391 		return;
5392 
5393 	case IEEE80211_S_AUTH:
5394 		if ((error = iwm_auth(sc)) != 0) {
5395 			DPRINTF(("%s: could not move to auth state: %d\n",
5396 			    DEVNAME(sc), error));
5397 			return;
5398 		}
5399 
5400 		break;
5401 
5402 	case IEEE80211_S_ASSOC:
5403 		if ((error = iwm_assoc(sc)) != 0) {
5404 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5405 			    error));
5406 			return;
5407 		}
5408 		break;
5409 
5410 	case IEEE80211_S_RUN: {
5411 		struct iwm_host_cmd cmd = {
5412 			.id = IWM_LQ_CMD,
5413 			.len = { sizeof(in->in_lq), },
5414 			.flags = IWM_CMD_SYNC,
5415 		};
5416 
5417 		in = (struct iwm_node *)ic->ic_bss;
5418 		iwm_mvm_power_mac_update_mode(sc, in);
5419 		iwm_mvm_enable_beacon_filter(sc, in);
5420 		iwm_mvm_update_quotas(sc, in);
5421 		iwm_setrates(in);
5422 
5423 		cmd.data[0] = &in->in_lq;
5424 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
5425 			DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
5426 		}
5427 
5428 		timeout_add_msec(&sc->sc_calib_to, 500);
5429 
5430 		break; }
5431 
5432 	default:
5433 		break;
5434 	}
5435 
5436 	sc->sc_newstate(ic, nstate, arg);
5437 }
5438 
5439 int
5440 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5441 {
5442 	struct iwm_newstate_state *iwmns;
5443 	struct ifnet *ifp = IC2IFP(ic);
5444 	struct iwm_softc *sc = ifp->if_softc;
5445 
5446 	timeout_del(&sc->sc_calib_to);
5447 
5448 	iwmns = malloc(sizeof(*iwmns), M_DEVBUF, M_NOWAIT);
5449 	if (!iwmns) {
5450 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5451 		return ENOMEM;
5452 	}
5453 
5454 	iwmns->ns_ic = ic;
5455 	iwmns->ns_nstate = nstate;
5456 	iwmns->ns_arg = arg;
5457 	iwmns->ns_generation = sc->sc_generation;
5458 
5459 	task_set(&iwmns->ns_wk, iwm_newstate_cb, iwmns);
5460 	task_add(sc->sc_nswq, &iwmns->ns_wk);
5461 
5462 	return 0;
5463 }
5464 
5465 void
5466 iwm_endscan_cb(void *arg)
5467 {
5468 	struct iwm_softc *sc = arg;
5469 	struct ieee80211com *ic = &sc->sc_ic;
5470 	int done;
5471 
5472 	DPRINTF(("scan ended\n"));
5473 
5474 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
5475 	    sc->sc_nvm.sku_cap_band_52GHz_enable) {
5476 		int error;
5477 		done = 0;
5478 		if ((error = iwm_mvm_scan_request(sc,
5479 		    IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
5480 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5481 			printf("%s: could not initiate scan\n", DEVNAME(sc));
5482 			done = 1;
5483 		}
5484 	} else {
5485 		done = 1;
5486 	}
5487 
5488 	if (done) {
5489 		if (!sc->sc_scanband) {
5490 			ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;
5491 		} else {
5492 			ieee80211_end_scan(&ic->ic_if);
5493 		}
5494 		sc->sc_scanband = 0;
5495 	}
5496 }
5497 
5498 int
5499 iwm_init_hw(struct iwm_softc *sc)
5500 {
5501 	struct ieee80211com *ic = &sc->sc_ic;
5502 	int error, i, qid;
5503 
5504 	if ((error = iwm_preinit(sc)) != 0)
5505 		return error;
5506 
5507 	if ((error = iwm_start_hw(sc)) != 0)
5508 		return error;
5509 
5510 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
5511 		return error;
5512 	}
5513 
5514 	/*
5515 	 * should stop and start HW since that INIT
5516 	 * image just loaded
5517 	 */
5518 	iwm_stop_device(sc);
5519 	if ((error = iwm_start_hw(sc)) != 0) {
5520 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
5521 		return error;
5522 	}
5523 
5524 	/* omstart, this time with the regular firmware */
5525 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
5526 	if (error) {
5527 		printf("%s: could not load firmware\n", DEVNAME(sc));
5528 		goto error;
5529 	}
5530 
5531 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
5532 		goto error;
5533 
5534 	/* Send phy db control command and then phy db calibration*/
5535 	if ((error = iwm_send_phy_db_data(sc)) != 0)
5536 		goto error;
5537 
5538 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
5539 		goto error;
5540 
5541 	/* Add auxiliary station for scanning */
5542 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
5543 		goto error;
5544 
5545 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
5546 		/*
5547 		 * The channel used here isn't relevant as it's
5548 		 * going to be overwritten in the other flows.
5549 		 * For now use the first channel we have.
5550 		 */
5551 		if ((error = iwm_mvm_phy_ctxt_add(sc,
5552 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5553 			goto error;
5554 	}
5555 
5556 	error = iwm_mvm_power_update_device(sc);
5557 	if (error)
5558 		goto error;
5559 
5560 	/* Mark TX rings as active. */
5561 	for (qid = 0; qid < 4; qid++) {
5562 		iwm_enable_txq(sc, qid, qid);
5563 	}
5564 
5565 	return 0;
5566 
5567  error:
5568 	iwm_stop_device(sc);
5569 	return error;
5570 }
5571 
5572 /* Allow multicast from our BSSID. */
5573 int
5574 iwm_allow_mcast(struct iwm_softc *sc)
5575 {
5576 	struct ieee80211com *ic = &sc->sc_ic;
5577 	struct ieee80211_node *ni = ic->ic_bss;
5578 	struct iwm_mcast_filter_cmd *cmd;
5579 	size_t size;
5580 	int error;
5581 
5582 	size = roundup(sizeof(*cmd), 4);
5583 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
5584 	if (cmd == NULL)
5585 		return ENOMEM;
5586 	cmd->filter_own = 1;
5587 	cmd->port_id = 0;
5588 	cmd->count = 0;
5589 	cmd->pass_all = 1;
5590 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5591 
5592 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5593 	    IWM_CMD_SYNC, size, cmd);
5594 	free(cmd, M_DEVBUF, size);
5595 	return error;
5596 }
5597 
5598 /*
5599  * ifnet interfaces
5600  */
5601 
5602 int
5603 iwm_init(struct ifnet *ifp)
5604 {
5605 	struct iwm_softc *sc = ifp->if_softc;
5606 	int error;
5607 
5608 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5609 		return 0;
5610 	}
5611 	sc->sc_generation++;
5612 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
5613 
5614 	if ((error = iwm_init_hw(sc)) != 0) {
5615 		iwm_stop(ifp, 1);
5616 		return error;
5617 	}
5618 
5619 	/*
5620  	 * Ok, firmware loaded and we are jogging
5621 	 */
5622 
5623 	ifp->if_flags &= ~IFF_OACTIVE;
5624 	ifp->if_flags |= IFF_RUNNING;
5625 
5626 	ieee80211_begin_scan(ifp);
5627 	sc->sc_flags |= IWM_FLAG_HW_INITED;
5628 
5629 	return 0;
5630 }
5631 
5632 /*
5633  * Dequeue packets from sendq and call send.
5634  * mostly from iwn
5635  */
5636 void
5637 iwm_start(struct ifnet *ifp)
5638 {
5639 	struct iwm_softc *sc = ifp->if_softc;
5640 	struct ieee80211com *ic = &sc->sc_ic;
5641 	struct ieee80211_node *ni;
5642 	struct ether_header *eh;
5643 	struct mbuf *m;
5644 	int ac;
5645 
5646 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
5647 		return;
5648 
5649 	for (;;) {
5650 		/* why isn't this done per-queue? */
5651 		if (sc->qfullmsk != 0) {
5652 			ifp->if_flags |= IFF_OACTIVE;
5653 			break;
5654 		}
5655 
5656 		/* need to send management frames even if we're not RUNning */
5657 		IF_DEQUEUE(&ic->ic_mgtq, m);
5658 		if (m) {
5659 			ni = m->m_pkthdr.ph_cookie;
5660 			ac = 0;
5661 			goto sendit;
5662 		}
5663 		if (ic->ic_state != IEEE80211_S_RUN) {
5664 			break;
5665 		}
5666 
5667 		IFQ_DEQUEUE(&ifp->if_snd, m);
5668 		if (!m)
5669 			break;
5670 		if (m->m_len < sizeof (*eh) &&
5671 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
5672 			ifp->if_oerrors++;
5673 			continue;
5674 		}
5675 #if NBPFILTER > 0
5676 		if (ifp->if_bpf != NULL)
5677 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
5678 #endif
5679 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
5680 			ifp->if_oerrors++;
5681 			continue;
5682 		}
5683 
5684  sendit:
5685 #if NBPFILTER > 0
5686 		if (ic->ic_rawbpf != NULL)
5687 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
5688 #endif
5689 		if (iwm_tx(sc, m, ni, ac) != 0) {
5690 			ieee80211_release_node(ic, ni);
5691 			ifp->if_oerrors++;
5692 			continue;
5693 		}
5694 
5695 		if (ifp->if_flags & IFF_UP) {
5696 			sc->sc_tx_timer = 15;
5697 			ifp->if_timer = 1;
5698 		}
5699 	}
5700 
5701 	return;
5702 }
5703 
5704 void
5705 iwm_stop(struct ifnet *ifp, int disable)
5706 {
5707 	struct iwm_softc *sc = ifp->if_softc;
5708 	struct ieee80211com *ic = &sc->sc_ic;
5709 
5710 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5711 	sc->sc_flags |= IWM_FLAG_STOPPED;
5712 	sc->sc_generation++;
5713 	sc->sc_scanband = 0;
5714 	sc->sc_auth_prot = 0;
5715 	ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;
5716 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5717 
5718 	if (ic->ic_state != IEEE80211_S_INIT)
5719 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
5720 
5721 	timeout_del(&sc->sc_calib_to);
5722 	ifp->if_timer = sc->sc_tx_timer = 0;
5723 	iwm_stop_device(sc);
5724 }
5725 
5726 void
5727 iwm_watchdog(struct ifnet *ifp)
5728 {
5729 	struct iwm_softc *sc = ifp->if_softc;
5730 
5731 	ifp->if_timer = 0;
5732 	if (sc->sc_tx_timer > 0) {
5733 		if (--sc->sc_tx_timer == 0) {
5734 			printf("%s: device timeout\n", DEVNAME(sc));
5735 #ifdef IWM_DEBUG
5736 			iwm_nic_error(sc);
5737 #endif
5738 			ifp->if_flags &= ~IFF_UP;
5739 			iwm_stop(ifp, 1);
5740 			ifp->if_oerrors++;
5741 			return;
5742 		}
5743 		ifp->if_timer = 1;
5744 	}
5745 
5746 	ieee80211_watchdog(ifp);
5747 }
5748 
5749 int
5750 iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data)
5751 {
5752 	struct iwm_softc *sc = ifp->if_softc;
5753 	struct ieee80211com *ic = &sc->sc_ic;
5754 	struct ifaddr *ifa;
5755 	struct ifreq *ifr;
5756 	int s, error = 0;
5757 
5758 	s = splnet();
5759 
5760 	/*
5761 	 * Prevent processes from entering this function while another
5762 	 * process is tsleep'ing in it.
5763 	 */
5764 	while ((sc->sc_flags & IWM_FLAG_BUSY) && error == 0)
5765 		error = tsleep(&sc->sc_flags, PCATCH, "iwmioc", 0);
5766 	if (error != 0) {
5767 		splx(s);
5768 		return error;
5769 	}
5770 	sc->sc_flags |= IWM_FLAG_BUSY;
5771 
5772 	switch (cmd) {
5773 	case SIOCSIFADDR:
5774 		ifp->if_flags |= IFF_UP;
5775 		ifa = (struct ifaddr *)data;
5776 		if (ifa->ifa_addr->sa_family == AF_INET)
5777 			arp_ifinit(&ic->ic_ac, ifa);
5778 		/* FALLTHROUGH */
5779 	case SIOCSIFFLAGS:
5780 		if (ifp->if_flags & IFF_UP) {
5781 			if (!(ifp->if_flags & IFF_RUNNING)) {
5782 				if ((error = iwm_init(ifp)) != 0)
5783 					ifp->if_flags &= ~IFF_UP;
5784 			}
5785 		} else {
5786 			if (ifp->if_flags & IFF_RUNNING)
5787 				iwm_stop(ifp, 1);
5788 		}
5789 		break;
5790 
5791 	case SIOCADDMULTI:
5792 	case SIOCDELMULTI:
5793 		ifr = (struct ifreq *)data;
5794 		error = (cmd == SIOCADDMULTI) ?
5795 		    ether_addmulti(ifr, &ic->ic_ac) :
5796 		    ether_delmulti(ifr, &ic->ic_ac);
5797 		if (error == ENETRESET)
5798 			error = 0;
5799 		break;
5800 
5801 	default:
5802 		error = ieee80211_ioctl(ifp, cmd, data);
5803 	}
5804 
5805 	if (error == ENETRESET) {
5806 		error = 0;
5807 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5808 		    (IFF_UP | IFF_RUNNING)) {
5809 			iwm_stop(ifp, 0);
5810 			error = iwm_init(ifp);
5811 		}
5812 	}
5813 
5814 	sc->sc_flags &= ~IWM_FLAG_BUSY;
5815 	wakeup(&sc->sc_flags);
5816 	splx(s);
5817 	return error;
5818 }
5819 
5820 /*
5821  * The interrupt side of things
5822  */
5823 
5824 /*
5825  * error dumping routines are from iwlwifi/mvm/utils.c
5826  */
5827 
5828 /*
5829  * Note: This structure is read from the device with IO accesses,
5830  * and the reading already does the endian conversion. As it is
5831  * read with uint32_t-sized accesses, any members with a different size
5832  * need to be ordered correctly though!
5833  */
5834 struct iwm_error_event_table {
5835 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5836 	uint32_t error_id;		/* type of error */
5837 	uint32_t pc;			/* program counter */
5838 	uint32_t blink1;		/* branch link */
5839 	uint32_t blink2;		/* branch link */
5840 	uint32_t ilink1;		/* interrupt link */
5841 	uint32_t ilink2;		/* interrupt link */
5842 	uint32_t data1;		/* error-specific data */
5843 	uint32_t data2;		/* error-specific data */
5844 	uint32_t data3;		/* error-specific data */
5845 	uint32_t bcon_time;		/* beacon timer */
5846 	uint32_t tsf_low;		/* network timestamp function timer */
5847 	uint32_t tsf_hi;		/* network timestamp function timer */
5848 	uint32_t gp1;		/* GP1 timer register */
5849 	uint32_t gp2;		/* GP2 timer register */
5850 	uint32_t gp3;		/* GP3 timer register */
5851 	uint32_t ucode_ver;		/* uCode version */
5852 	uint32_t hw_ver;		/* HW Silicon version */
5853 	uint32_t brd_ver;		/* HW board version */
5854 	uint32_t log_pc;		/* log program counter */
5855 	uint32_t frame_ptr;		/* frame pointer */
5856 	uint32_t stack_ptr;		/* stack pointer */
5857 	uint32_t hcmd;		/* last host command header */
5858 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5859 				 * rxtx_flag */
5860 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5861 				 * host_flag */
5862 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5863 				 * enc_flag */
5864 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5865 				 * time_flag */
5866 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5867 				 * wico interrupt */
5868 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
5869 	uint32_t wait_event;		/* wait event() caller address */
5870 	uint32_t l2p_control;	/* L2pControlField */
5871 	uint32_t l2p_duration;	/* L2pDurationField */
5872 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5873 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5874 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5875 				 * (LMPM_PMG_SEL) */
5876 	uint32_t u_timestamp;	/* indicate when the date and time of the
5877 				 * compilation */
5878 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5879 } __packed;
5880 
5881 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5882 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5883 
5884 struct {
5885 	const char *name;
5886 	uint8_t num;
5887 } advanced_lookup[] = {
5888 	{ "NMI_INTERRUPT_WDG", 0x34 },
5889 	{ "SYSASSERT", 0x35 },
5890 	{ "UCODE_VERSION_MISMATCH", 0x37 },
5891 	{ "BAD_COMMAND", 0x38 },
5892 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5893 	{ "FATAL_ERROR", 0x3D },
5894 	{ "NMI_TRM_HW_ERR", 0x46 },
5895 	{ "NMI_INTERRUPT_TRM", 0x4C },
5896 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5897 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5898 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5899 	{ "NMI_INTERRUPT_HOST", 0x66 },
5900 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5901 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5902 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5903 	{ "ADVANCED_SYSASSERT", 0 },
5904 };
5905 
5906 const char *
5907 iwm_desc_lookup(uint32_t num)
5908 {
5909 	int i;
5910 
5911 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5912 		if (advanced_lookup[i].num == num)
5913 			return advanced_lookup[i].name;
5914 
5915 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5916 	return advanced_lookup[i].name;
5917 }
5918 
5919 #ifdef IWM_DEBUG
5920 /*
5921  * Support for dumping the error log seemed like a good idea ...
5922  * but it's mostly hex junk and the only sensible thing is the
5923  * hw/ucode revision (which we know anyway).  Since it's here,
5924  * I'll just leave it in, just in case e.g. the Intel guys want to
5925  * help us decipher some "ADVANCED_SYSASSERT" later.
5926  */
5927 void
5928 iwm_nic_error(struct iwm_softc *sc)
5929 {
5930 	struct iwm_error_event_table table;
5931 	uint32_t base;
5932 
5933 	printf("%s: dumping device error log\n", DEVNAME(sc));
5934 	base = sc->sc_uc.uc_error_event_table;
5935 	if (base < 0x800000 || base >= 0x80C000) {
5936 		printf("%s: Not valid error log pointer 0x%08x\n",
5937 		    DEVNAME(sc), base);
5938 		return;
5939 	}
5940 
5941 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
5942 		printf("%s: reading errlog failed\n", DEVNAME(sc));
5943 		return;
5944 	}
5945 
5946 	if (!table.valid) {
5947 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
5948 		return;
5949 	}
5950 
5951 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5952 		printf("%s: Start IWL Error Log Dump:\n", DEVNAME(sc));
5953 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
5954 		    sc->sc_flags, table.valid);
5955 	}
5956 
5957 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
5958 		iwm_desc_lookup(table.error_id));
5959 	printf("%s: %08X | uPc\n", DEVNAME(sc), table.pc);
5960 	printf("%s: %08X | branchlink1\n", DEVNAME(sc), table.blink1);
5961 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
5962 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
5963 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
5964 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
5965 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
5966 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
5967 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
5968 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
5969 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
5970 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
5971 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
5972 	printf("%s: %08X | time gp3\n", DEVNAME(sc), table.gp3);
5973 	printf("%s: %08X | uCode version\n", DEVNAME(sc), table.ucode_ver);
5974 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
5975 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
5976 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
5977 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
5978 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
5979 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
5980 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
5981 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
5982 	printf("%s: %08X | isr_pref\n", DEVNAME(sc), table.isr_pref);
5983 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
5984 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
5985 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
5986 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
5987 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
5988 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
5989 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
5990 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
5991 }
5992 #endif
5993 
5994 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
5995 do {									\
5996 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
5997 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
5998 	_var_ = (void *)((_pkt_)+1);					\
5999 } while (/*CONSTCOND*/0)
6000 
6001 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
6002 do {									\
6003 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
6004 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
6005 	_ptr_ = (void *)((_pkt_)+1);					\
6006 } while (/*CONSTCOND*/0)
6007 
6008 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6009 
6010 /*
6011  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
6012  * Basic structure from if_iwn
6013  */
6014 void
6015 iwm_notif_intr(struct iwm_softc *sc)
6016 {
6017 	uint16_t hw;
6018 
6019 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6020 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6021 
6022 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6023 	while (sc->rxq.cur != hw) {
6024 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6025 		struct iwm_rx_packet *pkt;
6026 		struct iwm_cmd_response *cresp;
6027 		int qid, idx;
6028 
6029 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6030 		    BUS_DMASYNC_POSTREAD);
6031 		pkt = mtod(data->m, struct iwm_rx_packet *);
6032 
6033 		qid = pkt->hdr.qid & ~0x80;
6034 		idx = pkt->hdr.idx;
6035 
6036 		DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
6037 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
6038 		    pkt->hdr.code, sc->rxq.cur, hw));
6039 
6040 		/*
6041 		 * randomly get these from the firmware, no idea why.
6042 		 * they at least seem harmless, so just ignore them for now
6043 		 */
6044 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6045 		    || pkt->len_n_flags == htole32(0x55550000))) {
6046 			ADVANCE_RXQ(sc);
6047 			continue;
6048 		}
6049 
6050 		switch (pkt->hdr.code) {
6051 		case IWM_REPLY_RX_PHY_CMD:
6052 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
6053 			break;
6054 
6055 		case IWM_REPLY_RX_MPDU_CMD:
6056 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
6057 			break;
6058 
6059 		case IWM_TX_CMD:
6060 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
6061 			break;
6062 
6063 		case IWM_MISSED_BEACONS_NOTIFICATION:
6064 			/* OpenBSD does not provide ieee80211_beacon_miss() */
6065 			break;
6066 
6067 		case IWM_MVM_ALIVE: {
6068 			struct iwm_mvm_alive_resp *resp;
6069 			SYNC_RESP_STRUCT(resp, pkt);
6070 
6071 			sc->sc_uc.uc_error_event_table
6072 			    = le32toh(resp->error_event_table_ptr);
6073 			sc->sc_uc.uc_log_event_table
6074 			    = le32toh(resp->log_event_table_ptr);
6075 			sc->sched_base = le32toh(resp->scd_base_ptr);
6076 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
6077 
6078 			sc->sc_uc.uc_intr = 1;
6079 			wakeup(&sc->sc_uc);
6080 			break; }
6081 
6082 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
6083 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
6084 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
6085 
6086 			iwm_phy_db_set_section(sc, phy_db_notif);
6087 
6088 			break; }
6089 
6090 		case IWM_STATISTICS_NOTIFICATION: {
6091 			struct iwm_notif_statistics *stats;
6092 			SYNC_RESP_STRUCT(stats, pkt);
6093 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6094 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
6095 			break; }
6096 
6097 		case IWM_NVM_ACCESS_CMD:
6098 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
6099 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6100 				    sizeof(sc->sc_cmd_resp),
6101 				    BUS_DMASYNC_POSTREAD);
6102 				memcpy(sc->sc_cmd_resp,
6103 				    pkt, sizeof(sc->sc_cmd_resp));
6104 			}
6105 			break;
6106 
6107 		case IWM_PHY_CONFIGURATION_CMD:
6108 		case IWM_TX_ANT_CONFIGURATION_CMD:
6109 		case IWM_ADD_STA:
6110 		case IWM_MAC_CONTEXT_CMD:
6111 		case IWM_REPLY_SF_CFG_CMD:
6112 		case IWM_POWER_TABLE_CMD:
6113 		case IWM_PHY_CONTEXT_CMD:
6114 		case IWM_BINDING_CONTEXT_CMD:
6115 		case IWM_TIME_EVENT_CMD:
6116 		case IWM_SCAN_REQUEST_CMD:
6117 		case IWM_REPLY_BEACON_FILTERING_CMD:
6118 		case IWM_MAC_PM_POWER_TABLE:
6119 		case IWM_TIME_QUOTA_CMD:
6120 		case IWM_REMOVE_STA:
6121 		case IWM_TXPATH_FLUSH:
6122 		case IWM_LQ_CMD:
6123 			SYNC_RESP_STRUCT(cresp, pkt);
6124 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
6125 				memcpy(sc->sc_cmd_resp,
6126 				    pkt, sizeof(*pkt)+sizeof(*cresp));
6127 			}
6128 			break;
6129 
6130 		/* ignore */
6131 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
6132 			break;
6133 
6134 		case IWM_INIT_COMPLETE_NOTIF:
6135 			sc->sc_init_complete = 1;
6136 			wakeup(&sc->sc_init_complete);
6137 			break;
6138 
6139 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
6140 			struct iwm_scan_complete_notif *notif;
6141 			SYNC_RESP_STRUCT(notif, pkt);
6142 
6143 			task_add(sc->sc_eswq, &sc->sc_eswk);
6144 			break; }
6145 
6146 		case IWM_REPLY_ERROR: {
6147 			struct iwm_error_resp *resp;
6148 			SYNC_RESP_STRUCT(resp, pkt);
6149 
6150 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
6151 				DEVNAME(sc), le32toh(resp->error_type),
6152 				resp->cmd_id);
6153 			break; }
6154 
6155 		case IWM_TIME_EVENT_NOTIFICATION: {
6156 			struct iwm_time_event_notif *notif;
6157 			SYNC_RESP_STRUCT(notif, pkt);
6158 
6159 			if (notif->status) {
6160 				if (le32toh(notif->action) &
6161 				    IWM_TE_V2_NOTIF_HOST_EVENT_START)
6162 					sc->sc_auth_prot = 2;
6163 				else
6164 					sc->sc_auth_prot = 0;
6165 			} else {
6166 				sc->sc_auth_prot = -1;
6167 			}
6168 			wakeup(&sc->sc_auth_prot);
6169 			break; }
6170 
6171 		case IWM_MCAST_FILTER_CMD:
6172 			break;
6173 
6174 		default:
6175 			printf("%s: frame %d/%d %x UNHANDLED (this should "
6176 			    "not happen)\n", DEVNAME(sc), qid, idx,
6177 			    pkt->len_n_flags);
6178 			break;
6179 		}
6180 
6181 		/*
6182 		 * Why test bit 0x80?  The Linux driver:
6183 		 *
6184 		 * There is one exception:  uCode sets bit 15 when it
6185 		 * originates the response/notification, i.e. when the
6186 		 * response/notification is not a direct response to a
6187 		 * command sent by the driver.  For example, uCode issues
6188 		 * IWM_REPLY_RX when it sends a received frame to the driver;
6189 		 * it is not a direct response to any driver command.
6190 		 *
6191 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
6192 		 * uses a slightly different format for pkt->hdr, and "qid"
6193 		 * is actually the upper byte of a two-byte field.
6194 		 */
6195 		if (!(pkt->hdr.qid & (1 << 7))) {
6196 			iwm_cmd_done(sc, pkt);
6197 		}
6198 
6199 		ADVANCE_RXQ(sc);
6200 	}
6201 
6202 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
6203 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6204 
6205 	/*
6206 	 * Tell the firmware what we have processed.
6207 	 * Seems like the hardware gets upset unless we align
6208 	 * the write by 8??
6209 	 */
6210 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
6211 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
6212 }
6213 
6214 int
6215 iwm_intr(void *arg)
6216 {
6217 	struct iwm_softc *sc = arg;
6218 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6219 	int handled = 0;
6220 	int r1, r2, rv = 0;
6221 	int isperiodic = 0;
6222 
6223 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
6224 
6225 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
6226 		uint32_t *ict = sc->ict_dma.vaddr;
6227 		int tmp;
6228 
6229 		tmp = htole32(ict[sc->ict_cur]);
6230 		if (!tmp)
6231 			goto out_ena;
6232 
6233 		/*
6234 		 * ok, there was something.  keep plowing until we have all.
6235 		 */
6236 		r1 = r2 = 0;
6237 		while (tmp) {
6238 			r1 |= tmp;
6239 			ict[sc->ict_cur] = 0;
6240 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
6241 			tmp = htole32(ict[sc->ict_cur]);
6242 		}
6243 
6244 		/* this is where the fun begins.  don't ask */
6245 		if (r1 == 0xffffffff)
6246 			r1 = 0;
6247 
6248 		/* i am not expected to understand this */
6249 		if (r1 & 0xc0000)
6250 			r1 |= 0x8000;
6251 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
6252 	} else {
6253 		r1 = IWM_READ(sc, IWM_CSR_INT);
6254 		/* "hardware gone" (where, fishing?) */
6255 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
6256 			goto out;
6257 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
6258 	}
6259 	if (r1 == 0 && r2 == 0) {
6260 		goto out_ena;
6261 	}
6262 
6263 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
6264 
6265 	/* ignored */
6266 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
6267 
6268 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
6269 #ifdef IWM_DEBUG
6270 		int i;
6271 
6272 		iwm_nic_error(sc);
6273 
6274 		/* Dump driver status (TX and RX rings) while we're here. */
6275 		DPRINTF(("driver status:\n"));
6276 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
6277 			struct iwm_tx_ring *ring = &sc->txq[i];
6278 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
6279 			    "queued=%-3d\n",
6280 			    i, ring->qid, ring->cur, ring->queued));
6281 		}
6282 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
6283 		DPRINTF(("  802.11 state %d\n", sc->sc_ic.ic_state));
6284 #endif
6285 
6286 		printf("%s: fatal firmware error\n", DEVNAME(sc));
6287 		ifp->if_flags &= ~IFF_UP;
6288 		iwm_stop(ifp, 1);
6289 		rv = 1;
6290 		goto out;
6291 
6292 	}
6293 
6294 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
6295 		handled |= IWM_CSR_INT_BIT_HW_ERR;
6296 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
6297 		ifp->if_flags &= ~IFF_UP;
6298 		iwm_stop(ifp, 1);
6299 		rv = 1;
6300 		goto out;
6301 	}
6302 
6303 	/* firmware chunk loaded */
6304 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
6305 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
6306 		handled |= IWM_CSR_INT_BIT_FH_TX;
6307 
6308 		sc->sc_fw_chunk_done = 1;
6309 		wakeup(&sc->sc_fw);
6310 	}
6311 
6312 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
6313 		handled |= IWM_CSR_INT_BIT_RF_KILL;
6314 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
6315 			DPRINTF(("%s: rfkill switch, disabling interface\n",
6316 			    DEVNAME(sc)));
6317 			ifp->if_flags &= ~IFF_UP;
6318 			iwm_stop(ifp, 1);
6319 		}
6320 	}
6321 
6322 	/*
6323 	 * The Linux driver uses periodic interrupts to avoid races.
6324 	 * We cargo-cult like it's going out of fashion.
6325 	 */
6326 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
6327 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
6328 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
6329 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
6330 			IWM_WRITE_1(sc,
6331 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
6332 		isperiodic = 1;
6333 	}
6334 
6335 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
6336 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
6337 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
6338 
6339 		iwm_notif_intr(sc);
6340 
6341 		/* enable periodic interrupt, see above */
6342 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
6343 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
6344 			    IWM_CSR_INT_PERIODIC_ENA);
6345 	}
6346 
6347 	if (__predict_false(r1 & ~handled))
6348 		DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
6349 	rv = 1;
6350 
6351  out_ena:
6352 	iwm_restore_interrupts(sc);
6353  out:
6354 	return rv;
6355 }
6356 
6357 /*
6358  * Autoconf glue-sniffing
6359  */
6360 
6361 typedef void *iwm_match_t;
6362 
6363 static const struct pci_matchid iwm_devices[] = {
6364 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
6365 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
6366 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
6367 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
6368 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
6369 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
6370 };
6371 
6372 int
6373 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
6374 {
6375 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
6376 	    nitems(iwm_devices));
6377 }
6378 
6379 int
6380 iwm_preinit(struct iwm_softc *sc)
6381 {
6382 	struct ieee80211com *ic = &sc->sc_ic;
6383 	struct ifnet *ifp = IC2IFP(ic);
6384 	int error;
6385 	static int attached;
6386 
6387 	if ((error = iwm_prepare_card_hw(sc)) != 0) {
6388 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6389 		return error;
6390 	}
6391 
6392 	if (attached)
6393 		return 0;
6394 
6395 	if ((error = iwm_start_hw(sc)) != 0) {
6396 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6397 		return error;
6398 	}
6399 
6400 	error = iwm_run_init_mvm_ucode(sc, 1);
6401 	iwm_stop_device(sc);
6402 	if (error)
6403 		return error;
6404 
6405 	/* Print version info and MAC address on first successful fw load. */
6406 	attached = 1;
6407 	printf("%s: hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
6408 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6409 	    IWM_UCODE_MAJOR(sc->sc_fwver),
6410 	    IWM_UCODE_MINOR(sc->sc_fwver),
6411 	    IWM_UCODE_API(sc->sc_fwver),
6412 	    ether_sprintf(sc->sc_nvm.hw_addr));
6413 
6414 	/* not all hardware can do 5GHz band */
6415 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
6416 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6417 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6418 
6419 	/* Reattach net80211 so MAC address and channel map are picked up. */
6420 	ieee80211_ifdetach(ifp);
6421 	ieee80211_ifattach(ifp);
6422 
6423 	ic->ic_node_alloc = iwm_node_alloc;
6424 
6425 	/* Override 802.11 state transition machine. */
6426 	sc->sc_newstate = ic->ic_newstate;
6427 	ic->ic_newstate = iwm_newstate;
6428 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
6429 
6430 	return 0;
6431 }
6432 
6433 void
6434 iwm_attach_hook(iwm_hookarg_t arg)
6435 {
6436 	struct iwm_softc *sc = arg;
6437 
6438 	KASSERT(!cold);
6439 
6440 	iwm_preinit(sc);
6441 }
6442 
6443 void
6444 iwm_attach(struct device *parent, struct device *self, void *aux)
6445 {
6446 	struct iwm_softc *sc = (void *)self;
6447 	struct pci_attach_args *pa = aux;
6448 	pci_intr_handle_t ih;
6449 	pcireg_t reg, memtype;
6450 	struct ieee80211com *ic = &sc->sc_ic;
6451 	struct ifnet *ifp = &ic->ic_if;
6452 	const char *intrstr;
6453 	int error;
6454 	int txq_i, i;
6455 
6456 	sc->sc_pct = pa->pa_pc;
6457 	sc->sc_pcitag = pa->pa_tag;
6458 	sc->sc_dmat = pa->pa_dmat;
6459 
6460 	task_set(&sc->sc_eswk, iwm_endscan_cb, sc);
6461 
6462 	/*
6463 	 * Get the offset of the PCI Express Capability Structure in PCI
6464 	 * Configuration Space.
6465 	 */
6466 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
6467 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
6468 	if (error == 0) {
6469 		printf("%s: PCIe capability structure not found!\n",
6470 		    DEVNAME(sc));
6471 		return;
6472 	}
6473 
6474 	/* Clear device-specific "PCI retry timeout" register (41h). */
6475 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6476 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6477 
6478 	/* Enable bus-mastering and hardware bug workaround. */
6479 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
6480 	reg |= PCI_COMMAND_MASTER_ENABLE;
6481 	/* if !MSI */
6482 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
6483 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
6484 	}
6485 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
6486 
6487 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
6488 	error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
6489 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
6490 	if (error != 0) {
6491 		printf("%s: can't map mem space\n", DEVNAME(sc));
6492 		return;
6493 	}
6494 
6495 	/* Install interrupt handler. */
6496 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
6497 		printf("%s: can't map interrupt\n", DEVNAME(sc));
6498 		return;
6499 	}
6500 
6501 	intrstr = pci_intr_string(sc->sc_pct, ih);
6502 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc,
6503 	    DEVNAME(sc));
6504 
6505 	if (sc->sc_ih == NULL) {
6506 		printf("\n");
6507 		printf("%s: can't establish interrupt", DEVNAME(sc));
6508 		if (intrstr != NULL)
6509 			printf(" at %s", intrstr);
6510 		printf("\n");
6511 		return;
6512 	}
6513 	printf(", %s\n", intrstr);
6514 
6515 	sc->sc_wantresp = -1;
6516 
6517 	switch (PCI_PRODUCT(pa->pa_id)) {
6518 	case PCI_PRODUCT_INTEL_WL_3160_1:
6519 	case PCI_PRODUCT_INTEL_WL_3160_2:
6520 		sc->sc_fwname = "iwm-3160-9";
6521 		sc->host_interrupt_operation_mode = 1;
6522 		break;
6523 	case PCI_PRODUCT_INTEL_WL_7260_1:
6524 	case PCI_PRODUCT_INTEL_WL_7260_2:
6525 		sc->sc_fwname = "iwm-7260-9";
6526 		sc->host_interrupt_operation_mode = 1;
6527 		break;
6528 	case PCI_PRODUCT_INTEL_WL_7265_1:
6529 	case PCI_PRODUCT_INTEL_WL_7265_2:
6530 		sc->sc_fwname = "iwm-7265-9";
6531 		sc->host_interrupt_operation_mode = 0;
6532 		break;
6533 	default:
6534 		printf("%s: unknown adapter type\n", DEVNAME(sc));
6535 		return;
6536 	}
6537 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6538 
6539 	/*
6540 	 * We now start fiddling with the hardware
6541 	 */
6542 
6543 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6544 	if (iwm_prepare_card_hw(sc) != 0) {
6545 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
6546 		return;
6547 	}
6548 
6549 	/* Allocate DMA memory for firmware transfers. */
6550 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6551 		printf("%s: could not allocate memory for firmware\n",
6552 		    DEVNAME(sc));
6553 		return;
6554 	}
6555 
6556 	/* Allocate "Keep Warm" page. */
6557 	if ((error = iwm_alloc_kw(sc)) != 0) {
6558 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
6559 		goto fail1;
6560 	}
6561 
6562 	/* We use ICT interrupts */
6563 	if ((error = iwm_alloc_ict(sc)) != 0) {
6564 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
6565 		goto fail2;
6566 	}
6567 
6568 	/* Allocate TX scheduler "rings". */
6569 	if ((error = iwm_alloc_sched(sc)) != 0) {
6570 		printf("%s: could not allocate TX scheduler rings\n",
6571 		    DEVNAME(sc));
6572 		goto fail3;
6573 	}
6574 
6575 	/* Allocate TX rings */
6576 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6577 		if ((error = iwm_alloc_tx_ring(sc,
6578 		    &sc->txq[txq_i], txq_i)) != 0) {
6579 			printf("%s: could not allocate TX ring %d\n",
6580 			    DEVNAME(sc), txq_i);
6581 			goto fail4;
6582 		}
6583 	}
6584 
6585 	/* Allocate RX ring. */
6586 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6587 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
6588 		goto fail4;
6589 	}
6590 
6591 	sc->sc_eswq = taskq_create("iwmes", 1, IPL_NET, 0);
6592 	if (sc->sc_eswq == NULL)
6593 		goto fail4;
6594 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
6595 	if (sc->sc_nswq == NULL)
6596 		goto fail4;
6597 
6598 	/* Clear pending interrupts. */
6599 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6600 
6601 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6602 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6603 	ic->ic_state = IEEE80211_S_INIT;
6604 
6605 	/* Set device capabilities. */
6606 	ic->ic_caps =
6607 	    IEEE80211_C_WEP |		/* WEP */
6608 	    IEEE80211_C_RSN |		/* WPA/RSN */
6609 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
6610 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6611 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
6612 
6613 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
6614 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
6615 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
6616 
6617 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6618 		sc->sc_phyctxt[i].id = i;
6619 	}
6620 
6621 	sc->sc_amrr.amrr_min_success_threshold =  1;
6622 	sc->sc_amrr.amrr_max_success_threshold = 15;
6623 
6624 	/* IBSS channel undefined for now. */
6625 	ic->ic_ibss_chan = &ic->ic_channels[1];
6626 
6627 	/* Max RSSI */
6628 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6629 
6630 	ifp->if_softc = sc;
6631 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
6632 	ifp->if_ioctl = iwm_ioctl;
6633 	ifp->if_start = iwm_start;
6634 	ifp->if_watchdog = iwm_watchdog;
6635 	IFQ_SET_READY(&ifp->if_snd);
6636 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
6637 
6638 	if_attach(ifp);
6639 	ieee80211_ifattach(ifp);
6640 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
6641 
6642 #if NBPFILTER > 0
6643 	iwm_radiotap_attach(sc);
6644 #endif
6645 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
6646 	task_set(&sc->init_task, iwm_init_task, sc);
6647 
6648 	/*
6649 	 * We cannot read the MAC address without loading the
6650 	 * firmware from disk. Postpone until mountroot is done.
6651 	 */
6652 	if (rootvp == NULL)
6653 		mountroothook_establish(iwm_attach_hook, sc);
6654 	else
6655 		iwm_attach_hook(sc);
6656 
6657 	return;
6658 
6659 	/* Free allocated memory if something failed during attachment. */
6660 fail4:	while (--txq_i >= 0)
6661 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
6662 	iwm_free_sched(sc);
6663 fail3:	if (sc->ict_dma.vaddr != NULL)
6664 		iwm_free_ict(sc);
6665 fail2:	iwm_free_kw(sc);
6666 fail1:	iwm_free_fwmem(sc);
6667 	return;
6668 }
6669 
6670 #if NBPFILTER > 0
6671 /*
6672  * Attach the interface to 802.11 radiotap.
6673  */
6674 void
6675 iwm_radiotap_attach(struct iwm_softc *sc)
6676 {
6677 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
6678 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
6679 
6680 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
6681 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
6682 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
6683 
6684 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
6685 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
6686 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
6687 }
6688 #endif
6689 
6690 void
6691 iwm_init_task(void *arg1)
6692 {
6693 	struct iwm_softc *sc = arg1;
6694 	struct ifnet *ifp = &sc->sc_ic.ic_if;
6695 	int s;
6696 
6697 	s = splnet();
6698 	while (sc->sc_flags & IWM_FLAG_BUSY)
6699 		tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
6700 	sc->sc_flags |= IWM_FLAG_BUSY;
6701 
6702 	iwm_stop(ifp, 0);
6703 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
6704 		iwm_init(ifp);
6705 
6706 	sc->sc_flags &= ~IWM_FLAG_BUSY;
6707 	wakeup(&sc->sc_flags);
6708 	splx(s);
6709 }
6710 
6711 void
6712 iwm_wakeup(struct iwm_softc *sc)
6713 {
6714 	pcireg_t reg;
6715 
6716 	/* Clear device-specific "PCI retry timeout" register (41h). */
6717 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6718 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6719 
6720 	iwm_init_task(sc);
6721 
6722 }
6723 
6724 int
6725 iwm_activate(struct device *self, int act)
6726 {
6727 	struct iwm_softc *sc = (struct iwm_softc *)self;
6728 	struct ifnet *ifp = &sc->sc_ic.ic_if;
6729 
6730 	switch (act) {
6731 	case DVACT_SUSPEND:
6732 		if (ifp->if_flags & IFF_RUNNING)
6733 			iwm_stop(ifp, 0);
6734 		break;
6735 	case DVACT_WAKEUP:
6736 		iwm_wakeup(sc);
6737 		break;
6738 	}
6739 
6740 	return 0;
6741 }
6742 
6743 struct cfdriver iwm_cd = {
6744 	NULL, "iwm", DV_IFNET
6745 };
6746 
6747 struct cfattach iwm_ca = {
6748 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
6749 	NULL, iwm_activate
6750 };
6751