xref: /openbsd/sys/dev/pci/if_iwm.c (revision 54fbbda3)
1 /*	$OpenBSD: if_iwm.c,v 1.417 2024/09/01 03:08:59 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 #include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/malloc.h>
113 #include <sys/mbuf.h>
114 #include <sys/rwlock.h>
115 #include <sys/socket.h>
116 #include <sys/sockio.h>
117 #include <sys/systm.h>
118 #include <sys/endian.h>
119 
120 #include <sys/refcnt.h>
121 #include <sys/task.h>
122 #include <machine/bus.h>
123 #include <machine/intr.h>
124 
125 #include <dev/pci/pcireg.h>
126 #include <dev/pci/pcivar.h>
127 #include <dev/pci/pcidevs.h>
128 
129 #if NBPFILTER > 0
130 #include <net/bpf.h>
131 #endif
132 #include <net/if.h>
133 #include <net/if_media.h>
134 
135 #include <netinet/in.h>
136 #include <netinet/if_ether.h>
137 
138 #include <net80211/ieee80211_var.h>
139 #include <net80211/ieee80211_amrr.h>
140 #include <net80211/ieee80211_ra.h>
141 #include <net80211/ieee80211_ra_vht.h>
142 #include <net80211/ieee80211_radiotap.h>
143 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
144 #undef DPRINTF /* defined in ieee80211_priv.h */
145 
146 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
147 
148 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
149 
150 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
151 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
152 
153 #ifdef IWM_DEBUG
154 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
155 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
156 int iwm_debug = 1;
157 #else
158 #define DPRINTF(x)	do { ; } while (0)
159 #define DPRINTFN(n, x)	do { ; } while (0)
160 #endif
161 
162 #include <dev/pci/if_iwmreg.h>
163 #include <dev/pci/if_iwmvar.h>
164 
165 const uint8_t iwm_nvm_channels[] = {
166 	/* 2.4 GHz */
167 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168 	/* 5 GHz */
169 	36, 40, 44 , 48, 52, 56, 60, 64,
170 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171 	149, 153, 157, 161, 165
172 };
173 
174 const uint8_t iwm_nvm_channels_8000[] = {
175 	/* 2.4 GHz */
176 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
177 	/* 5 GHz */
178 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
179 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
180 	149, 153, 157, 161, 165, 169, 173, 177, 181
181 };
182 
183 #define IWM_NUM_2GHZ_CHANNELS	14
184 
185 const struct iwm_rate {
186 	uint16_t rate;
187 	uint8_t plcp;
188 	uint8_t ht_plcp;
189 } iwm_rates[] = {
190 		/* Legacy */		/* HT */
191 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
192 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
194 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
196 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
197 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
198 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
199 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
200 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
201 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
202 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
203 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
204 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
205 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
206 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
207 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
208 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
209 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
210 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
211 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
212 };
213 #define IWM_RIDX_CCK	0
214 #define IWM_RIDX_OFDM	4
215 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
216 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
217 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
218 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
219 
220 /* Convert an MCS index into an iwm_rates[] index. */
221 const int iwm_ht_mcs2ridx[] = {
222 	IWM_RATE_MCS_0_INDEX,
223 	IWM_RATE_MCS_1_INDEX,
224 	IWM_RATE_MCS_2_INDEX,
225 	IWM_RATE_MCS_3_INDEX,
226 	IWM_RATE_MCS_4_INDEX,
227 	IWM_RATE_MCS_5_INDEX,
228 	IWM_RATE_MCS_6_INDEX,
229 	IWM_RATE_MCS_7_INDEX,
230 	IWM_RATE_MCS_8_INDEX,
231 	IWM_RATE_MCS_9_INDEX,
232 	IWM_RATE_MCS_10_INDEX,
233 	IWM_RATE_MCS_11_INDEX,
234 	IWM_RATE_MCS_12_INDEX,
235 	IWM_RATE_MCS_13_INDEX,
236 	IWM_RATE_MCS_14_INDEX,
237 	IWM_RATE_MCS_15_INDEX,
238 };
239 
240 struct iwm_nvm_section {
241 	uint16_t length;
242 	uint8_t *data;
243 };
244 
245 int	iwm_is_mimo_ht_plcp(uint8_t);
246 int	iwm_is_mimo_ht_mcs(int);
247 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
248 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
249 	    uint8_t *, size_t);
250 int	iwm_set_default_calib(struct iwm_softc *, const void *);
251 void	iwm_fw_info_free(struct iwm_fw_info *);
252 void	iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
253 int	iwm_read_firmware(struct iwm_softc *);
254 uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t);
255 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
256 void	iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t);
257 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
258 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
259 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
260 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
261 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
262 int	iwm_nic_lock(struct iwm_softc *);
263 void	iwm_nic_assert_locked(struct iwm_softc *);
264 void	iwm_nic_unlock(struct iwm_softc *);
265 int	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
266 	    uint32_t);
267 int	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
268 int	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
269 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
270 	    bus_size_t);
271 void	iwm_dma_contig_free(struct iwm_dma_info *);
272 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 void	iwm_disable_rx_dma(struct iwm_softc *);
274 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
277 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 void	iwm_enable_rfkill_int(struct iwm_softc *);
280 int	iwm_check_rfkill(struct iwm_softc *);
281 void	iwm_enable_interrupts(struct iwm_softc *);
282 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
283 void	iwm_restore_interrupts(struct iwm_softc *);
284 void	iwm_disable_interrupts(struct iwm_softc *);
285 void	iwm_ict_reset(struct iwm_softc *);
286 int	iwm_set_hw_ready(struct iwm_softc *);
287 int	iwm_prepare_card_hw(struct iwm_softc *);
288 void	iwm_apm_config(struct iwm_softc *);
289 int	iwm_apm_init(struct iwm_softc *);
290 void	iwm_apm_stop(struct iwm_softc *);
291 int	iwm_allow_mcast(struct iwm_softc *);
292 void	iwm_init_msix_hw(struct iwm_softc *);
293 void	iwm_conf_msix_hw(struct iwm_softc *, int);
294 int	iwm_clear_persistence_bit(struct iwm_softc *);
295 int	iwm_start_hw(struct iwm_softc *);
296 void	iwm_stop_device(struct iwm_softc *);
297 void	iwm_nic_config(struct iwm_softc *);
298 int	iwm_nic_rx_init(struct iwm_softc *);
299 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
300 int	iwm_nic_rx_mq_init(struct iwm_softc *);
301 int	iwm_nic_tx_init(struct iwm_softc *);
302 int	iwm_nic_init(struct iwm_softc *);
303 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
304 int	iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t,
305 	    uint16_t);
306 int	iwm_disable_txq(struct iwm_softc *, int, int, uint8_t);
307 int	iwm_post_alive(struct iwm_softc *);
308 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
309 	    uint16_t);
310 int	iwm_phy_db_set_section(struct iwm_softc *,
311 	    struct iwm_calib_res_notif_phy_db *);
312 int	iwm_is_valid_channel(uint16_t);
313 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
314 uint16_t iwm_channel_id_to_papd(uint16_t);
315 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
316 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
317 	    uint16_t *, uint16_t);
318 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
319 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
320 	    uint8_t);
321 int	iwm_send_phy_db_data(struct iwm_softc *);
322 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
323 	    uint32_t);
324 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
325 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
326 	    uint8_t *, uint16_t *);
327 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
328 	    uint16_t *, size_t);
329 uint8_t	iwm_fw_valid_tx_ant(struct iwm_softc *);
330 uint8_t	iwm_fw_valid_rx_ant(struct iwm_softc *);
331 int	iwm_valid_siso_ant_rate_mask(struct iwm_softc *);
332 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
333 	    const uint8_t *nvm_channels, int nchan);
334 int	iwm_mimo_enabled(struct iwm_softc *);
335 void	iwm_setup_ht_rates(struct iwm_softc *);
336 void	iwm_setup_vht_rates(struct iwm_softc *);
337 void	iwm_mac_ctxt_task(void *);
338 void	iwm_phy_ctxt_task(void *);
339 void	iwm_updateprot(struct ieee80211com *);
340 void	iwm_updateslot(struct ieee80211com *);
341 void	iwm_updateedca(struct ieee80211com *);
342 void	iwm_updatechan(struct ieee80211com *);
343 void	iwm_updatedtim(struct ieee80211com *);
344 void	iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
345 	    uint16_t);
346 void	iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
347 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
348 	    uint8_t);
349 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
350 	    uint8_t);
351 void	iwm_rx_ba_session_expired(void *);
352 void	iwm_reorder_timer_expired(void *);
353 int	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
354 	    uint16_t, uint16_t, int, int);
355 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
356 	    uint8_t);
357 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
358 	    uint8_t);
359 void	iwm_ba_task(void *);
360 
361 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
362 	    const uint16_t *, const uint16_t *,
363 	    const uint16_t *, const uint16_t *,
364 	    const uint16_t *, int);
365 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
366 	    const uint16_t *, const uint16_t *);
367 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
368 int	iwm_nvm_init(struct iwm_softc *);
369 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
370 	    uint32_t);
371 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
372 	    uint32_t);
373 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
374 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
375 	    int , int *);
376 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
377 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
378 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
379 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
380 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
381 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
382 int	iwm_send_dqa_cmd(struct iwm_softc *);
383 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
384 int	iwm_config_ltr(struct iwm_softc *);
385 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
386 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
387 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
388 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
389 	    struct iwm_rx_data *);
390 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
391 int	iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
392 	    struct ieee80211_rxinfo *);
393 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
394 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
395 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
396 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
397 void	iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
398 	    int, uint8_t, int);
399 void	iwm_vht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
400 	    int, int, uint8_t, int);
401 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
402 	    struct iwm_node *, int, int);
403 void	iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *);
404 void	iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int);
405 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
406 	    struct iwm_rx_data *);
407 void	iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *);
408 void	iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *,
409 	    struct iwm_tx_ring *, int, uint16_t, uint16_t);
410 void	iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *);
411 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
412 	    struct iwm_rx_data *);
413 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
414 uint8_t	iwm_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
415 int	iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
416 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
417 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
418 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
419 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
420 	    struct ieee80211_channel *, uint8_t, uint8_t, uint8_t, uint8_t);
421 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
422 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
423 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
424 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
425 	    const void *);
426 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
427 	    uint32_t *);
428 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
429 	    const void *, uint32_t *);
430 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
431 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
432 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
433 void	iwm_reset_sched(struct iwm_softc *, int, int, uint8_t);
434 uint8_t	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
435 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
436 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
437 int	iwm_flush_tx_path(struct iwm_softc *, int);
438 int	iwm_wait_tx_queues_empty(struct iwm_softc *);
439 void	iwm_led_enable(struct iwm_softc *);
440 void	iwm_led_disable(struct iwm_softc *);
441 int	iwm_led_is_enabled(struct iwm_softc *);
442 void	iwm_led_blink_timeout(void *);
443 void	iwm_led_blink_start(struct iwm_softc *);
444 void	iwm_led_blink_stop(struct iwm_softc *);
445 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
446 	    struct iwm_beacon_filter_cmd *);
447 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
448 	    struct iwm_beacon_filter_cmd *);
449 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
450 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
451 	    struct iwm_mac_power_cmd *);
452 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
453 int	iwm_power_update_device(struct iwm_softc *);
454 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
455 int	iwm_disable_beacon_filter(struct iwm_softc *);
456 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
457 int	iwm_add_aux_sta(struct iwm_softc *);
458 int	iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
459 int	iwm_flush_sta(struct iwm_softc *, struct iwm_node *);
460 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
461 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
462 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
463 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
464 	    struct iwm_scan_channel_cfg_lmac *, int, int);
465 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
466 int	iwm_lmac_scan(struct iwm_softc *, int);
467 int	iwm_config_umac_scan(struct iwm_softc *);
468 int	iwm_umac_scan(struct iwm_softc *, int);
469 void	iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
470 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
471 int	iwm_rval2ridx(int);
472 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
473 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
474 	    struct iwm_mac_ctx_cmd *, uint32_t);
475 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
476 	    struct iwm_mac_data_sta *, int);
477 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
478 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
479 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
480 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
481 int	iwm_scan(struct iwm_softc *);
482 int	iwm_bgscan(struct ieee80211com *);
483 void	iwm_bgscan_done(struct ieee80211com *,
484 	    struct ieee80211_node_switch_bss_arg *, size_t);
485 void	iwm_bgscan_done_task(void *);
486 int	iwm_umac_scan_abort(struct iwm_softc *);
487 int	iwm_lmac_scan_abort(struct iwm_softc *);
488 int	iwm_scan_abort(struct iwm_softc *);
489 int	iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
490 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
491 	    uint8_t);
492 int	iwm_auth(struct iwm_softc *);
493 int	iwm_deauth(struct iwm_softc *);
494 int	iwm_run(struct iwm_softc *);
495 int	iwm_run_stop(struct iwm_softc *);
496 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
497 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
498 	    struct ieee80211_key *);
499 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
500 	    struct ieee80211_key *);
501 void	iwm_delete_key_v1(struct ieee80211com *,
502 	    struct ieee80211_node *, struct ieee80211_key *);
503 void	iwm_delete_key(struct ieee80211com *,
504 	    struct ieee80211_node *, struct ieee80211_key *);
505 void	iwm_calib_timeout(void *);
506 void	iwm_set_rate_table_vht(struct iwm_node *, struct iwm_lq_cmd *);
507 void	iwm_set_rate_table(struct iwm_node *, struct iwm_lq_cmd *);
508 void	iwm_setrates(struct iwm_node *, int);
509 int	iwm_media_change(struct ifnet *);
510 void	iwm_newstate_task(void *);
511 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
512 void	iwm_endscan(struct iwm_softc *);
513 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
514 	    struct ieee80211_node *);
515 int	iwm_sf_config(struct iwm_softc *, int);
516 int	iwm_send_bt_init_conf(struct iwm_softc *);
517 int	iwm_send_soc_conf(struct iwm_softc *);
518 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
519 int	iwm_send_temp_report_ths_cmd(struct iwm_softc *);
520 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
521 void	iwm_free_fw_paging(struct iwm_softc *);
522 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
523 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
524 int	iwm_init_hw(struct iwm_softc *);
525 int	iwm_init(struct ifnet *);
526 void	iwm_start(struct ifnet *);
527 void	iwm_stop(struct ifnet *);
528 void	iwm_watchdog(struct ifnet *);
529 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
530 const char *iwm_desc_lookup(uint32_t);
531 void	iwm_nic_error(struct iwm_softc *);
532 void	iwm_dump_driver_status(struct iwm_softc *);
533 void	iwm_nic_umac_error(struct iwm_softc *);
534 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
535 	    struct mbuf_list *);
536 void	iwm_flip_address(uint8_t *);
537 int	iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
538 	    struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
539 int	iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
540 void	iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
541 	    struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
542 	    struct mbuf_list *);
543 int	iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
544 	    int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
545 int	iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
546 	    struct iwm_rx_mpdu_desc *, int, int, uint32_t,
547 	    struct ieee80211_rxinfo *, struct mbuf_list *);
548 void	iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
549 	    struct mbuf_list *);
550 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
551 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
552 	    struct mbuf_list *);
553 void	iwm_notif_intr(struct iwm_softc *);
554 int	iwm_intr(void *);
555 int	iwm_intr_msix(void *);
556 int	iwm_match(struct device *, void *, void *);
557 int	iwm_preinit(struct iwm_softc *);
558 void	iwm_attach_hook(struct device *);
559 void	iwm_attach(struct device *, struct device *, void *);
560 void	iwm_init_task(void *);
561 int	iwm_activate(struct device *, int);
562 void	iwm_resume(struct iwm_softc *);
563 int	iwm_wakeup(struct iwm_softc *);
564 
565 #if NBPFILTER > 0
566 void	iwm_radiotap_attach(struct iwm_softc *);
567 #endif
568 
569 uint8_t
iwm_lookup_cmd_ver(struct iwm_softc * sc,uint8_t grp,uint8_t cmd)570 iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
571 {
572 	const struct iwm_fw_cmd_version *entry;
573 	int i;
574 
575 	for (i = 0; i < sc->n_cmd_versions; i++) {
576 		entry = &sc->cmd_versions[i];
577 		if (entry->group == grp && entry->cmd == cmd)
578 			return entry->cmd_ver;
579 	}
580 
581 	return IWM_FW_CMD_VER_UNKNOWN;
582 }
583 
584 int
iwm_is_mimo_ht_plcp(uint8_t ht_plcp)585 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
586 {
587 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
588 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
589 }
590 
591 int
iwm_is_mimo_ht_mcs(int mcs)592 iwm_is_mimo_ht_mcs(int mcs)
593 {
594 	int ridx = iwm_ht_mcs2ridx[mcs];
595 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
596 
597 }
598 
599 int
iwm_store_cscheme(struct iwm_softc * sc,uint8_t * data,size_t dlen)600 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
601 {
602 	struct iwm_fw_cscheme_list *l = (void *)data;
603 
604 	if (dlen < sizeof(*l) ||
605 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
606 		return EINVAL;
607 
608 	/* we don't actually store anything for now, always use s/w crypto */
609 
610 	return 0;
611 }
612 
613 int
iwm_firmware_store_section(struct iwm_softc * sc,enum iwm_ucode_type type,uint8_t * data,size_t dlen)614 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
615     uint8_t *data, size_t dlen)
616 {
617 	struct iwm_fw_sects *fws;
618 	struct iwm_fw_onesect *fwone;
619 
620 	if (type >= IWM_UCODE_TYPE_MAX)
621 		return EINVAL;
622 	if (dlen < sizeof(uint32_t))
623 		return EINVAL;
624 
625 	fws = &sc->sc_fw.fw_sects[type];
626 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
627 		return EINVAL;
628 
629 	fwone = &fws->fw_sect[fws->fw_count];
630 
631 	/* first 32bit are device load offset */
632 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
633 
634 	/* rest is data */
635 	fwone->fws_data = data + sizeof(uint32_t);
636 	fwone->fws_len = dlen - sizeof(uint32_t);
637 
638 	fws->fw_count++;
639 	fws->fw_totlen += fwone->fws_len;
640 
641 	return 0;
642 }
643 
644 #define IWM_DEFAULT_SCAN_CHANNELS	40
645 /* Newer firmware might support more channels. Raise this value if needed. */
646 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
647 
648 struct iwm_tlv_calib_data {
649 	uint32_t ucode_type;
650 	struct iwm_tlv_calib_ctrl calib;
651 } __packed;
652 
653 int
iwm_set_default_calib(struct iwm_softc * sc,const void * data)654 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
655 {
656 	const struct iwm_tlv_calib_data *def_calib = data;
657 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
658 
659 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
660 		return EINVAL;
661 
662 	sc->sc_default_calib[ucode_type].flow_trigger =
663 	    def_calib->calib.flow_trigger;
664 	sc->sc_default_calib[ucode_type].event_trigger =
665 	    def_calib->calib.event_trigger;
666 
667 	return 0;
668 }
669 
670 void
iwm_fw_info_free(struct iwm_fw_info * fw)671 iwm_fw_info_free(struct iwm_fw_info *fw)
672 {
673 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
674 	fw->fw_rawdata = NULL;
675 	fw->fw_rawsize = 0;
676 	/* don't touch fw->fw_status */
677 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
678 }
679 
680 void
iwm_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)681 iwm_fw_version_str(char *buf, size_t bufsize,
682     uint32_t major, uint32_t minor, uint32_t api)
683 {
684 	/*
685 	 * Starting with major version 35 the Linux driver prints the minor
686 	 * version in hexadecimal.
687 	 */
688 	if (major >= 35)
689 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
690 	else
691 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
692 }
693 
694 int
iwm_read_firmware(struct iwm_softc * sc)695 iwm_read_firmware(struct iwm_softc *sc)
696 {
697 	struct iwm_fw_info *fw = &sc->sc_fw;
698 	struct iwm_tlv_ucode_header *uhdr;
699 	struct iwm_ucode_tlv tlv;
700 	uint32_t tlv_type;
701 	uint8_t *data;
702 	uint32_t usniffer_img;
703 	uint32_t paging_mem_size;
704 	int err;
705 	size_t len;
706 
707 	if (fw->fw_status == IWM_FW_STATUS_DONE)
708 		return 0;
709 
710 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
711 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
712 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
713 
714 	if (fw->fw_rawdata != NULL)
715 		iwm_fw_info_free(fw);
716 
717 	err = loadfirmware(sc->sc_fwname,
718 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
719 	if (err) {
720 		printf("%s: could not read firmware %s (error %d)\n",
721 		    DEVNAME(sc), sc->sc_fwname, err);
722 		goto out;
723 	}
724 
725 	sc->sc_capaflags = 0;
726 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
727 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
728 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
729 	sc->n_cmd_versions = 0;
730 
731 	uhdr = (void *)fw->fw_rawdata;
732 	if (*(uint32_t *)fw->fw_rawdata != 0
733 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
734 		printf("%s: invalid firmware %s\n",
735 		    DEVNAME(sc), sc->sc_fwname);
736 		err = EINVAL;
737 		goto out;
738 	}
739 
740 	iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
741 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
742 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
743 	    IWM_UCODE_API(le32toh(uhdr->ver)));
744 
745 	data = uhdr->data;
746 	len = fw->fw_rawsize - sizeof(*uhdr);
747 
748 	while (len >= sizeof(tlv)) {
749 		size_t tlv_len;
750 		void *tlv_data;
751 
752 		memcpy(&tlv, data, sizeof(tlv));
753 		tlv_len = le32toh(tlv.length);
754 		tlv_type = le32toh(tlv.type);
755 
756 		len -= sizeof(tlv);
757 		data += sizeof(tlv);
758 		tlv_data = data;
759 
760 		if (len < tlv_len) {
761 			printf("%s: firmware too short: %zu bytes\n",
762 			    DEVNAME(sc), len);
763 			err = EINVAL;
764 			goto parse_out;
765 		}
766 
767 		switch (tlv_type) {
768 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
769 			if (tlv_len < sizeof(uint32_t)) {
770 				err = EINVAL;
771 				goto parse_out;
772 			}
773 			sc->sc_capa_max_probe_len
774 			    = le32toh(*(uint32_t *)tlv_data);
775 			if (sc->sc_capa_max_probe_len >
776 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
777 				err = EINVAL;
778 				goto parse_out;
779 			}
780 			break;
781 		case IWM_UCODE_TLV_PAN:
782 			if (tlv_len) {
783 				err = EINVAL;
784 				goto parse_out;
785 			}
786 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
787 			break;
788 		case IWM_UCODE_TLV_FLAGS:
789 			if (tlv_len < sizeof(uint32_t)) {
790 				err = EINVAL;
791 				goto parse_out;
792 			}
793 			/*
794 			 * Apparently there can be many flags, but Linux driver
795 			 * parses only the first one, and so do we.
796 			 *
797 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
798 			 * Intentional or a bug?  Observations from
799 			 * current firmware file:
800 			 *  1) TLV_PAN is parsed first
801 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
802 			 * ==> this resets TLV_PAN to itself... hnnnk
803 			 */
804 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
805 			break;
806 		case IWM_UCODE_TLV_CSCHEME:
807 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
808 			if (err)
809 				goto parse_out;
810 			break;
811 		case IWM_UCODE_TLV_NUM_OF_CPU: {
812 			uint32_t num_cpu;
813 			if (tlv_len != sizeof(uint32_t)) {
814 				err = EINVAL;
815 				goto parse_out;
816 			}
817 			num_cpu = le32toh(*(uint32_t *)tlv_data);
818 			if (num_cpu < 1 || num_cpu > 2) {
819 				err = EINVAL;
820 				goto parse_out;
821 			}
822 			break;
823 		}
824 		case IWM_UCODE_TLV_SEC_RT:
825 			err = iwm_firmware_store_section(sc,
826 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
827 			if (err)
828 				goto parse_out;
829 			break;
830 		case IWM_UCODE_TLV_SEC_INIT:
831 			err = iwm_firmware_store_section(sc,
832 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
833 			if (err)
834 				goto parse_out;
835 			break;
836 		case IWM_UCODE_TLV_SEC_WOWLAN:
837 			err = iwm_firmware_store_section(sc,
838 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
839 			if (err)
840 				goto parse_out;
841 			break;
842 		case IWM_UCODE_TLV_DEF_CALIB:
843 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
844 				err = EINVAL;
845 				goto parse_out;
846 			}
847 			err = iwm_set_default_calib(sc, tlv_data);
848 			if (err)
849 				goto parse_out;
850 			break;
851 		case IWM_UCODE_TLV_PHY_SKU:
852 			if (tlv_len != sizeof(uint32_t)) {
853 				err = EINVAL;
854 				goto parse_out;
855 			}
856 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
857 			break;
858 
859 		case IWM_UCODE_TLV_API_CHANGES_SET: {
860 			struct iwm_ucode_api *api;
861 			int idx, i;
862 			if (tlv_len != sizeof(*api)) {
863 				err = EINVAL;
864 				goto parse_out;
865 			}
866 			api = (struct iwm_ucode_api *)tlv_data;
867 			idx = le32toh(api->api_index);
868 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
869 				err = EINVAL;
870 				goto parse_out;
871 			}
872 			for (i = 0; i < 32; i++) {
873 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
874 					continue;
875 				setbit(sc->sc_ucode_api, i + (32 * idx));
876 			}
877 			break;
878 		}
879 
880 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
881 			struct iwm_ucode_capa *capa;
882 			int idx, i;
883 			if (tlv_len != sizeof(*capa)) {
884 				err = EINVAL;
885 				goto parse_out;
886 			}
887 			capa = (struct iwm_ucode_capa *)tlv_data;
888 			idx = le32toh(capa->api_index);
889 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
890 				goto parse_out;
891 			}
892 			for (i = 0; i < 32; i++) {
893 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
894 					continue;
895 				setbit(sc->sc_enabled_capa, i + (32 * idx));
896 			}
897 			break;
898 		}
899 
900 		case IWM_UCODE_TLV_CMD_VERSIONS:
901 			if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
902 				tlv_len /= sizeof(struct iwm_fw_cmd_version);
903 				tlv_len *= sizeof(struct iwm_fw_cmd_version);
904 			}
905 			if (sc->n_cmd_versions != 0) {
906 				err = EINVAL;
907 				goto parse_out;
908 			}
909 			if (tlv_len > sizeof(sc->cmd_versions)) {
910 				err = EINVAL;
911 				goto parse_out;
912 			}
913 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
914 			sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
915 			break;
916 
917 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
918 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
919 			/* ignore, not used by current driver */
920 			break;
921 
922 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
923 			err = iwm_firmware_store_section(sc,
924 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
925 			    tlv_len);
926 			if (err)
927 				goto parse_out;
928 			break;
929 
930 		case IWM_UCODE_TLV_PAGING:
931 			if (tlv_len != sizeof(uint32_t)) {
932 				err = EINVAL;
933 				goto parse_out;
934 			}
935 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
936 
937 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
938 			    DEVNAME(sc), paging_mem_size));
939 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
940 				printf("%s: Driver only supports up to %u"
941 				    " bytes for paging image (%u requested)\n",
942 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
943 				    paging_mem_size);
944 				err = EINVAL;
945 				goto out;
946 			}
947 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
948 				printf("%s: Paging: image isn't multiple of %u\n",
949 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
950 				err = EINVAL;
951 				goto out;
952 			}
953 
954 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
955 			    paging_mem_size;
956 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
957 			fw->fw_sects[usniffer_img].paging_mem_size =
958 			    paging_mem_size;
959 			break;
960 
961 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
962 			if (tlv_len != sizeof(uint32_t)) {
963 				err = EINVAL;
964 				goto parse_out;
965 			}
966 			sc->sc_capa_n_scan_channels =
967 			  le32toh(*(uint32_t *)tlv_data);
968 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
969 				err = ERANGE;
970 				goto parse_out;
971 			}
972 			break;
973 
974 		case IWM_UCODE_TLV_FW_VERSION:
975 			if (tlv_len != sizeof(uint32_t) * 3) {
976 				err = EINVAL;
977 				goto parse_out;
978 			}
979 
980 			iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
981 			    le32toh(((uint32_t *)tlv_data)[0]),
982 			    le32toh(((uint32_t *)tlv_data)[1]),
983 			    le32toh(((uint32_t *)tlv_data)[2]));
984 			break;
985 
986 		case IWM_UCODE_TLV_FW_DBG_DEST:
987 		case IWM_UCODE_TLV_FW_DBG_CONF:
988 		case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS:
989 		case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS:
990 		case IWM_UCODE_TLV_TYPE_DEBUG_INFO:
991 		case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
992 		case IWM_UCODE_TLV_TYPE_HCMD:
993 		case IWM_UCODE_TLV_TYPE_REGIONS:
994 		case IWM_UCODE_TLV_TYPE_TRIGGERS:
995 			break;
996 
997 		case IWM_UCODE_TLV_HW_TYPE:
998 			break;
999 
1000 		case IWM_UCODE_TLV_FW_MEM_SEG:
1001 			break;
1002 
1003 		/* undocumented TLVs found in iwm-9000-43 image */
1004 		case 0x1000003:
1005 		case 0x1000004:
1006 			break;
1007 
1008 		default:
1009 			err = EINVAL;
1010 			goto parse_out;
1011 		}
1012 
1013 		/*
1014 		 * Check for size_t overflow and ignore missing padding at
1015 		 * end of firmware file.
1016 		 */
1017 		if (roundup(tlv_len, 4) > len)
1018 			break;
1019 
1020 		len -= roundup(tlv_len, 4);
1021 		data += roundup(tlv_len, 4);
1022 	}
1023 
1024 	KASSERT(err == 0);
1025 
1026  parse_out:
1027 	if (err) {
1028 		printf("%s: firmware parse error %d, "
1029 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1030 	}
1031 
1032  out:
1033 	if (err) {
1034 		fw->fw_status = IWM_FW_STATUS_NONE;
1035 		if (fw->fw_rawdata != NULL)
1036 			iwm_fw_info_free(fw);
1037 	} else
1038 		fw->fw_status = IWM_FW_STATUS_DONE;
1039 	wakeup(&sc->sc_fw);
1040 
1041 	return err;
1042 }
1043 
1044 uint32_t
iwm_read_prph_unlocked(struct iwm_softc * sc,uint32_t addr)1045 iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
1046 {
1047 	IWM_WRITE(sc,
1048 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1049 	IWM_BARRIER_READ_WRITE(sc);
1050 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
1051 }
1052 
1053 uint32_t
iwm_read_prph(struct iwm_softc * sc,uint32_t addr)1054 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1055 {
1056 	iwm_nic_assert_locked(sc);
1057 	return iwm_read_prph_unlocked(sc, addr);
1058 }
1059 
1060 void
iwm_write_prph_unlocked(struct iwm_softc * sc,uint32_t addr,uint32_t val)1061 iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1062 {
1063 	IWM_WRITE(sc,
1064 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1065 	IWM_BARRIER_WRITE(sc);
1066 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
1067 }
1068 
1069 void
iwm_write_prph(struct iwm_softc * sc,uint32_t addr,uint32_t val)1070 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1071 {
1072 	iwm_nic_assert_locked(sc);
1073 	iwm_write_prph_unlocked(sc, addr, val);
1074 }
1075 
1076 void
iwm_write_prph64(struct iwm_softc * sc,uint64_t addr,uint64_t val)1077 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
1078 {
1079 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1080 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1081 }
1082 
1083 int
iwm_read_mem(struct iwm_softc * sc,uint32_t addr,void * buf,int dwords)1084 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1085 {
1086 	int offs, err = 0;
1087 	uint32_t *vals = buf;
1088 
1089 	if (iwm_nic_lock(sc)) {
1090 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1091 		for (offs = 0; offs < dwords; offs++)
1092 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1093 		iwm_nic_unlock(sc);
1094 	} else {
1095 		err = EBUSY;
1096 	}
1097 	return err;
1098 }
1099 
1100 int
iwm_write_mem(struct iwm_softc * sc,uint32_t addr,const void * buf,int dwords)1101 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1102 {
1103 	int offs;
1104 	const uint32_t *vals = buf;
1105 
1106 	if (iwm_nic_lock(sc)) {
1107 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1108 		/* WADDR auto-increments */
1109 		for (offs = 0; offs < dwords; offs++) {
1110 			uint32_t val = vals ? vals[offs] : 0;
1111 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1112 		}
1113 		iwm_nic_unlock(sc);
1114 	} else {
1115 		return EBUSY;
1116 	}
1117 	return 0;
1118 }
1119 
1120 int
iwm_write_mem32(struct iwm_softc * sc,uint32_t addr,uint32_t val)1121 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1122 {
1123 	return iwm_write_mem(sc, addr, &val, 1);
1124 }
1125 
1126 int
iwm_poll_bit(struct iwm_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1127 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1128     int timo)
1129 {
1130 	for (;;) {
1131 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1132 			return 1;
1133 		}
1134 		if (timo < 10) {
1135 			return 0;
1136 		}
1137 		timo -= 10;
1138 		DELAY(10);
1139 	}
1140 }
1141 
1142 int
iwm_nic_lock(struct iwm_softc * sc)1143 iwm_nic_lock(struct iwm_softc *sc)
1144 {
1145 	if (sc->sc_nic_locks > 0) {
1146 		iwm_nic_assert_locked(sc);
1147 		sc->sc_nic_locks++;
1148 		return 1; /* already locked */
1149 	}
1150 
1151 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1152 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1153 
1154 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1155 		DELAY(2);
1156 
1157 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1158 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1159 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1160 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1161 		sc->sc_nic_locks++;
1162 		return 1;
1163 	}
1164 
1165 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1166 	return 0;
1167 }
1168 
1169 void
iwm_nic_assert_locked(struct iwm_softc * sc)1170 iwm_nic_assert_locked(struct iwm_softc *sc)
1171 {
1172 	if (sc->sc_nic_locks <= 0)
1173 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1174 }
1175 
1176 void
iwm_nic_unlock(struct iwm_softc * sc)1177 iwm_nic_unlock(struct iwm_softc *sc)
1178 {
1179 	if (sc->sc_nic_locks > 0) {
1180 		if (--sc->sc_nic_locks == 0)
1181 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1182 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1183 	} else
1184 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1185 }
1186 
1187 int
iwm_set_bits_mask_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1188 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1189     uint32_t mask)
1190 {
1191 	uint32_t val;
1192 
1193 	if (iwm_nic_lock(sc)) {
1194 		val = iwm_read_prph(sc, reg) & mask;
1195 		val |= bits;
1196 		iwm_write_prph(sc, reg, val);
1197 		iwm_nic_unlock(sc);
1198 		return 0;
1199 	}
1200 	return EBUSY;
1201 }
1202 
1203 int
iwm_set_bits_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits)1204 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1205 {
1206 	return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1207 }
1208 
1209 int
iwm_clear_bits_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits)1210 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1211 {
1212 	return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1213 }
1214 
1215 int
iwm_dma_contig_alloc(bus_dma_tag_t tag,struct iwm_dma_info * dma,bus_size_t size,bus_size_t alignment)1216 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1217     bus_size_t size, bus_size_t alignment)
1218 {
1219 	int nsegs, err;
1220 	caddr_t va;
1221 
1222 	dma->tag = tag;
1223 	dma->size = size;
1224 
1225 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1226 	    &dma->map);
1227 	if (err)
1228 		goto fail;
1229 
1230 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1231 	    BUS_DMA_NOWAIT);
1232 	if (err)
1233 		goto fail;
1234 
1235 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1236 	    BUS_DMA_NOWAIT);
1237 	if (err)
1238 		goto fail;
1239 	dma->vaddr = va;
1240 
1241 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1242 	    BUS_DMA_NOWAIT);
1243 	if (err)
1244 		goto fail;
1245 
1246 	memset(dma->vaddr, 0, size);
1247 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1248 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1249 
1250 	return 0;
1251 
1252 fail:	iwm_dma_contig_free(dma);
1253 	return err;
1254 }
1255 
1256 void
iwm_dma_contig_free(struct iwm_dma_info * dma)1257 iwm_dma_contig_free(struct iwm_dma_info *dma)
1258 {
1259 	if (dma->map != NULL) {
1260 		if (dma->vaddr != NULL) {
1261 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1262 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1263 			bus_dmamap_unload(dma->tag, dma->map);
1264 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1265 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1266 			dma->vaddr = NULL;
1267 		}
1268 		bus_dmamap_destroy(dma->tag, dma->map);
1269 		dma->map = NULL;
1270 	}
1271 }
1272 
1273 int
iwm_alloc_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1274 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1275 {
1276 	bus_size_t size;
1277 	size_t descsz;
1278 	int count, i, err;
1279 
1280 	ring->cur = 0;
1281 
1282 	if (sc->sc_mqrx_supported) {
1283 		count = IWM_RX_MQ_RING_COUNT;
1284 		descsz = sizeof(uint64_t);
1285 	} else {
1286 		count = IWM_RX_RING_COUNT;
1287 		descsz = sizeof(uint32_t);
1288 	}
1289 
1290 	/* Allocate RX descriptors (256-byte aligned). */
1291 	size = count * descsz;
1292 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1293 	if (err) {
1294 		printf("%s: could not allocate RX ring DMA memory\n",
1295 		    DEVNAME(sc));
1296 		goto fail;
1297 	}
1298 	ring->desc = ring->free_desc_dma.vaddr;
1299 
1300 	/* Allocate RX status area (16-byte aligned). */
1301 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1302 	    sizeof(*ring->stat), 16);
1303 	if (err) {
1304 		printf("%s: could not allocate RX status DMA memory\n",
1305 		    DEVNAME(sc));
1306 		goto fail;
1307 	}
1308 	ring->stat = ring->stat_dma.vaddr;
1309 
1310 	if (sc->sc_mqrx_supported) {
1311 		size = count * sizeof(uint32_t);
1312 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1313 		    size, 256);
1314 		if (err) {
1315 			printf("%s: could not allocate RX ring DMA memory\n",
1316 			    DEVNAME(sc));
1317 			goto fail;
1318 		}
1319 	}
1320 
1321 	for (i = 0; i < count; i++) {
1322 		struct iwm_rx_data *data = &ring->data[i];
1323 
1324 		memset(data, 0, sizeof(*data));
1325 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1326 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1327 		    &data->map);
1328 		if (err) {
1329 			printf("%s: could not create RX buf DMA map\n",
1330 			    DEVNAME(sc));
1331 			goto fail;
1332 		}
1333 
1334 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1335 		if (err)
1336 			goto fail;
1337 	}
1338 	return 0;
1339 
1340 fail:	iwm_free_rx_ring(sc, ring);
1341 	return err;
1342 }
1343 
1344 void
iwm_disable_rx_dma(struct iwm_softc * sc)1345 iwm_disable_rx_dma(struct iwm_softc *sc)
1346 {
1347 	int ntries;
1348 
1349 	if (iwm_nic_lock(sc)) {
1350 		if (sc->sc_mqrx_supported) {
1351 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1352 			for (ntries = 0; ntries < 1000; ntries++) {
1353 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1354 				    IWM_RXF_DMA_IDLE)
1355 					break;
1356 				DELAY(10);
1357 			}
1358 		} else {
1359 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1360 			for (ntries = 0; ntries < 1000; ntries++) {
1361 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1362 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1363 					break;
1364 				DELAY(10);
1365 			}
1366 		}
1367 		iwm_nic_unlock(sc);
1368 	}
1369 }
1370 
1371 void
iwm_reset_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1372 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1373 {
1374 	ring->cur = 0;
1375 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1376 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1377 	memset(ring->stat, 0, sizeof(*ring->stat));
1378 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1379 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1380 
1381 }
1382 
1383 void
iwm_free_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1384 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1385 {
1386 	int count, i;
1387 
1388 	iwm_dma_contig_free(&ring->free_desc_dma);
1389 	iwm_dma_contig_free(&ring->stat_dma);
1390 	iwm_dma_contig_free(&ring->used_desc_dma);
1391 
1392 	if (sc->sc_mqrx_supported)
1393 		count = IWM_RX_MQ_RING_COUNT;
1394 	else
1395 		count = IWM_RX_RING_COUNT;
1396 
1397 	for (i = 0; i < count; i++) {
1398 		struct iwm_rx_data *data = &ring->data[i];
1399 
1400 		if (data->m != NULL) {
1401 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1402 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1403 			bus_dmamap_unload(sc->sc_dmat, data->map);
1404 			m_freem(data->m);
1405 			data->m = NULL;
1406 		}
1407 		if (data->map != NULL)
1408 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1409 	}
1410 }
1411 
1412 int
iwm_alloc_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring,int qid)1413 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1414 {
1415 	bus_addr_t paddr;
1416 	bus_size_t size;
1417 	int i, err;
1418 
1419 	ring->qid = qid;
1420 	ring->queued = 0;
1421 	ring->cur = 0;
1422 	ring->tail = 0;
1423 
1424 	/* Allocate TX descriptors (256-byte aligned). */
1425 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1426 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1427 	if (err) {
1428 		printf("%s: could not allocate TX ring DMA memory\n",
1429 		    DEVNAME(sc));
1430 		goto fail;
1431 	}
1432 	ring->desc = ring->desc_dma.vaddr;
1433 
1434 	/*
1435 	 * There is no need to allocate DMA buffers for unused rings.
1436 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1437 	 * than we currently need.
1438 	 *
1439 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1440 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1441 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1442 	 * in order to provide one queue per EDCA category.
1443 	 * Tx aggregation requires additional queues, one queue per TID for
1444 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1445 	 *
1446 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
1447 	 * and Tx aggregation is not supported.
1448 	 *
1449 	 * Unfortunately, we cannot tell if DQA will be used until the
1450 	 * firmware gets loaded later, so just allocate sufficient rings
1451 	 * in order to satisfy both cases.
1452 	 */
1453 	if (qid > IWM_LAST_AGG_TX_QUEUE)
1454 		return 0;
1455 
1456 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1457 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1458 	if (err) {
1459 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1460 		goto fail;
1461 	}
1462 	ring->cmd = ring->cmd_dma.vaddr;
1463 
1464 	paddr = ring->cmd_dma.paddr;
1465 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1466 		struct iwm_tx_data *data = &ring->data[i];
1467 		size_t mapsize;
1468 
1469 		data->cmd_paddr = paddr;
1470 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1471 		    + offsetof(struct iwm_tx_cmd, scratch);
1472 		paddr += sizeof(struct iwm_device_cmd);
1473 
1474 		/* FW commands may require more mapped space than packets. */
1475 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1476 			mapsize = (sizeof(struct iwm_cmd_header) +
1477 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1478 		else
1479 			mapsize = MCLBYTES;
1480 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1481 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1482 		    &data->map);
1483 		if (err) {
1484 			printf("%s: could not create TX buf DMA map\n",
1485 			    DEVNAME(sc));
1486 			goto fail;
1487 		}
1488 	}
1489 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1490 	return 0;
1491 
1492 fail:	iwm_free_tx_ring(sc, ring);
1493 	return err;
1494 }
1495 
1496 void
iwm_reset_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1497 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1498 {
1499 	int i;
1500 
1501 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1502 		struct iwm_tx_data *data = &ring->data[i];
1503 
1504 		if (data->m != NULL) {
1505 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1506 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1507 			bus_dmamap_unload(sc->sc_dmat, data->map);
1508 			m_freem(data->m);
1509 			data->m = NULL;
1510 		}
1511 	}
1512 	/* Clear TX descriptors. */
1513 	memset(ring->desc, 0, ring->desc_dma.size);
1514 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1515 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1516 	sc->qfullmsk &= ~(1 << ring->qid);
1517 	sc->qenablemsk &= ~(1 << ring->qid);
1518 	/* 7000 family NICs are locked while commands are in progress. */
1519 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1520 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1521 			iwm_nic_unlock(sc);
1522 	}
1523 	ring->queued = 0;
1524 	ring->cur = 0;
1525 	ring->tail = 0;
1526 }
1527 
1528 void
iwm_free_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1529 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1530 {
1531 	int i;
1532 
1533 	iwm_dma_contig_free(&ring->desc_dma);
1534 	iwm_dma_contig_free(&ring->cmd_dma);
1535 
1536 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1537 		struct iwm_tx_data *data = &ring->data[i];
1538 
1539 		if (data->m != NULL) {
1540 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1541 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1542 			bus_dmamap_unload(sc->sc_dmat, data->map);
1543 			m_freem(data->m);
1544 			data->m = NULL;
1545 		}
1546 		if (data->map != NULL)
1547 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1548 	}
1549 }
1550 
1551 void
iwm_enable_rfkill_int(struct iwm_softc * sc)1552 iwm_enable_rfkill_int(struct iwm_softc *sc)
1553 {
1554 	if (!sc->sc_msix) {
1555 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1556 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1557 	} else {
1558 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1559 		    sc->sc_fh_init_mask);
1560 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1561 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1562 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1563 	}
1564 
1565 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1566 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1567 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1568 }
1569 
1570 int
iwm_check_rfkill(struct iwm_softc * sc)1571 iwm_check_rfkill(struct iwm_softc *sc)
1572 {
1573 	uint32_t v;
1574 	int rv;
1575 
1576 	/*
1577 	 * "documentation" is not really helpful here:
1578 	 *  27:	HW_RF_KILL_SW
1579 	 *	Indicates state of (platform's) hardware RF-Kill switch
1580 	 *
1581 	 * But apparently when it's off, it's on ...
1582 	 */
1583 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1584 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1585 	if (rv) {
1586 		sc->sc_flags |= IWM_FLAG_RFKILL;
1587 	} else {
1588 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1589 	}
1590 
1591 	return rv;
1592 }
1593 
1594 void
iwm_enable_interrupts(struct iwm_softc * sc)1595 iwm_enable_interrupts(struct iwm_softc *sc)
1596 {
1597 	if (!sc->sc_msix) {
1598 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1599 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1600 	} else {
1601 		/*
1602 		 * fh/hw_mask keeps all the unmasked causes.
1603 		 * Unlike msi, in msix cause is enabled when it is unset.
1604 		 */
1605 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1606 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1607 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1608 		    ~sc->sc_fh_mask);
1609 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1610 		    ~sc->sc_hw_mask);
1611 	}
1612 }
1613 
1614 void
iwm_enable_fwload_interrupt(struct iwm_softc * sc)1615 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1616 {
1617 	if (!sc->sc_msix) {
1618 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1619 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1620 	} else {
1621 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1622 		    sc->sc_hw_init_mask);
1623 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1624 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1625 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1626 	}
1627 }
1628 
1629 void
iwm_restore_interrupts(struct iwm_softc * sc)1630 iwm_restore_interrupts(struct iwm_softc *sc)
1631 {
1632 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1633 }
1634 
1635 void
iwm_disable_interrupts(struct iwm_softc * sc)1636 iwm_disable_interrupts(struct iwm_softc *sc)
1637 {
1638 	if (!sc->sc_msix) {
1639 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1640 
1641 		/* acknowledge all interrupts */
1642 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1643 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1644 	} else {
1645 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1646 		    sc->sc_fh_init_mask);
1647 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1648 		    sc->sc_hw_init_mask);
1649 	}
1650 }
1651 
1652 void
iwm_ict_reset(struct iwm_softc * sc)1653 iwm_ict_reset(struct iwm_softc *sc)
1654 {
1655 	iwm_disable_interrupts(sc);
1656 
1657 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1658 	sc->ict_cur = 0;
1659 
1660 	/* Set physical address of ICT (4KB aligned). */
1661 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1662 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1663 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1664 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1665 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1666 
1667 	/* Switch to ICT interrupt mode in driver. */
1668 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1669 
1670 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1671 	iwm_enable_interrupts(sc);
1672 }
1673 
1674 #define IWM_HW_READY_TIMEOUT 50
1675 int
iwm_set_hw_ready(struct iwm_softc * sc)1676 iwm_set_hw_ready(struct iwm_softc *sc)
1677 {
1678 	int ready;
1679 
1680 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1681 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1682 
1683 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1684 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1685 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1686 	    IWM_HW_READY_TIMEOUT);
1687 	if (ready)
1688 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1689 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1690 
1691 	return ready;
1692 }
1693 #undef IWM_HW_READY_TIMEOUT
1694 
1695 int
iwm_prepare_card_hw(struct iwm_softc * sc)1696 iwm_prepare_card_hw(struct iwm_softc *sc)
1697 {
1698 	int t = 0;
1699 	int ntries;
1700 
1701 	if (iwm_set_hw_ready(sc))
1702 		return 0;
1703 
1704 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1705 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1706 	DELAY(1000);
1707 
1708 	for (ntries = 0; ntries < 10; ntries++) {
1709 		/* If HW is not ready, prepare the conditions to check again */
1710 		IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1711 		    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1712 
1713 		do {
1714 			if (iwm_set_hw_ready(sc))
1715 				return 0;
1716 			DELAY(200);
1717 			t += 200;
1718 		} while (t < 150000);
1719 		DELAY(25000);
1720 	}
1721 
1722 	return ETIMEDOUT;
1723 }
1724 
1725 void
iwm_apm_config(struct iwm_softc * sc)1726 iwm_apm_config(struct iwm_softc *sc)
1727 {
1728 	pcireg_t lctl, cap;
1729 
1730 	/*
1731 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1732 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1733 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1734 	 *    costs negligible amount of power savings.
1735 	 * If not (unlikely), enable L0S, so there is at least some
1736 	 *    power savings, even without L1.
1737 	 */
1738 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1739 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1740 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1741 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1742 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1743 	} else {
1744 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1745 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1746 	}
1747 
1748 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1749 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1750 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1751 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1752 	    DEVNAME(sc),
1753 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1754 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1755 }
1756 
1757 /*
1758  * Start up NIC's basic functionality after it has been reset
1759  * e.g. after platform boot or shutdown.
1760  * NOTE:  This does not load uCode nor start the embedded processor
1761  */
1762 int
iwm_apm_init(struct iwm_softc * sc)1763 iwm_apm_init(struct iwm_softc *sc)
1764 {
1765 	int err = 0;
1766 
1767 	/* Disable L0S exit timer (platform NMI workaround) */
1768 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1769 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1770 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1771 
1772 	/*
1773 	 * Disable L0s without affecting L1;
1774 	 *  don't wait for ICH L0s (ICH bug W/A)
1775 	 */
1776 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1777 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1778 
1779 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1780 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1781 
1782 	/*
1783 	 * Enable HAP INTA (interrupt from management bus) to
1784 	 * wake device's PCI Express link L1a -> L0s
1785 	 */
1786 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1787 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1788 
1789 	iwm_apm_config(sc);
1790 
1791 #if 0 /* not for 7k/8k */
1792 	/* Configure analog phase-lock-loop before activating to D0A */
1793 	if (trans->cfg->base_params->pll_cfg_val)
1794 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1795 		    trans->cfg->base_params->pll_cfg_val);
1796 #endif
1797 
1798 	/*
1799 	 * Set "initialization complete" bit to move adapter from
1800 	 * D0U* --> D0A* (powered-up active) state.
1801 	 */
1802 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1803 
1804 	/*
1805 	 * Wait for clock stabilization; once stabilized, access to
1806 	 * device-internal resources is supported, e.g. iwm_write_prph()
1807 	 * and accesses to uCode SRAM.
1808 	 */
1809 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1810 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1811 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1812 		printf("%s: timeout waiting for clock stabilization\n",
1813 		    DEVNAME(sc));
1814 		err = ETIMEDOUT;
1815 		goto out;
1816 	}
1817 
1818 	if (sc->host_interrupt_operation_mode) {
1819 		/*
1820 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1821 		 * only check host_interrupt_operation_mode even if this is
1822 		 * not related to host_interrupt_operation_mode.
1823 		 *
1824 		 * Enable the oscillator to count wake up time for L1 exit. This
1825 		 * consumes slightly more power (100uA) - but allows to be sure
1826 		 * that we wake up from L1 on time.
1827 		 *
1828 		 * This looks weird: read twice the same register, discard the
1829 		 * value, set a bit, and yet again, read that same register
1830 		 * just to discard the value. But that's the way the hardware
1831 		 * seems to like it.
1832 		 */
1833 		if (iwm_nic_lock(sc)) {
1834 			iwm_read_prph(sc, IWM_OSC_CLK);
1835 			iwm_read_prph(sc, IWM_OSC_CLK);
1836 			iwm_nic_unlock(sc);
1837 		}
1838 		err = iwm_set_bits_prph(sc, IWM_OSC_CLK,
1839 		    IWM_OSC_CLK_FORCE_CONTROL);
1840 		if (err)
1841 			goto out;
1842 		if (iwm_nic_lock(sc)) {
1843 			iwm_read_prph(sc, IWM_OSC_CLK);
1844 			iwm_read_prph(sc, IWM_OSC_CLK);
1845 			iwm_nic_unlock(sc);
1846 		}
1847 	}
1848 
1849 	/*
1850 	 * Enable DMA clock and wait for it to stabilize.
1851 	 *
1852 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1853 	 * do not disable clocks.  This preserves any hardware bits already
1854 	 * set by default in "CLK_CTRL_REG" after reset.
1855 	 */
1856 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1857 		if (iwm_nic_lock(sc)) {
1858 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1859 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1860 			iwm_nic_unlock(sc);
1861 		}
1862 		DELAY(20);
1863 
1864 		/* Disable L1-Active */
1865 		err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1866 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1867 		if (err)
1868 			goto out;
1869 
1870 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1871 		if (iwm_nic_lock(sc)) {
1872 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1873 			    IWM_APMG_RTC_INT_STT_RFKILL);
1874 			iwm_nic_unlock(sc);
1875 		}
1876 	}
1877  out:
1878 	if (err)
1879 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1880 	return err;
1881 }
1882 
1883 void
iwm_apm_stop(struct iwm_softc * sc)1884 iwm_apm_stop(struct iwm_softc *sc)
1885 {
1886 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1887 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1888 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1889 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1890 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1891 	DELAY(1000);
1892 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1893 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1894 	DELAY(5000);
1895 
1896 	/* stop device's busmaster DMA activity */
1897 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1898 
1899 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1900 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1901 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1902 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1903 
1904 	/*
1905 	 * Clear "initialization complete" bit to move adapter from
1906 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1907 	 */
1908 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1909 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1910 }
1911 
1912 void
iwm_init_msix_hw(struct iwm_softc * sc)1913 iwm_init_msix_hw(struct iwm_softc *sc)
1914 {
1915 	iwm_conf_msix_hw(sc, 0);
1916 
1917 	if (!sc->sc_msix)
1918 		return;
1919 
1920 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1921 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1922 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1923 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1924 }
1925 
1926 void
iwm_conf_msix_hw(struct iwm_softc * sc,int stopped)1927 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1928 {
1929 	int vector = 0;
1930 
1931 	if (!sc->sc_msix) {
1932 		/* Newer chips default to MSIX. */
1933 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1934 			iwm_write_prph(sc, IWM_UREG_CHICK,
1935 			    IWM_UREG_CHICK_MSI_ENABLE);
1936 			iwm_nic_unlock(sc);
1937 		}
1938 		return;
1939 	}
1940 
1941 	if (!stopped && iwm_nic_lock(sc)) {
1942 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1943 		iwm_nic_unlock(sc);
1944 	}
1945 
1946 	/* Disable all interrupts */
1947 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1948 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1949 
1950 	/* Map fallback-queue (command/mgmt) to a single vector */
1951 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1952 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1953 	/* Map RSS queue (data) to the same vector */
1954 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1955 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1956 
1957 	/* Enable the RX queues cause interrupts */
1958 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1959 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1960 
1961 	/* Map non-RX causes to the same vector */
1962 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1963 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1964 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1965 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1966 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1967 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1968 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1969 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1970 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1971 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1972 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1973 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1974 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1975 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1976 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1977 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1978 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1979 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1980 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1981 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1982 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1983 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1984 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1985 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1986 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
1987 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1988 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
1989 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1990 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
1991 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1992 
1993 	/* Enable non-RX causes interrupts */
1994 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1995 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
1996 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
1997 	    IWM_MSIX_FH_INT_CAUSES_S2D |
1998 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
1999 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
2000 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
2001 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2002 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
2003 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2004 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2005 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2006 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2007 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
2008 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
2009 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2010 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
2011 }
2012 
2013 int
iwm_clear_persistence_bit(struct iwm_softc * sc)2014 iwm_clear_persistence_bit(struct iwm_softc *sc)
2015 {
2016 	uint32_t hpm, wprot;
2017 
2018 	hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG);
2019 	if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT)) {
2020 		wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_9000);
2021 		if (wprot & IWM_PREG_WFPM_ACCESS) {
2022 			printf("%s: cannot clear persistence bit\n",
2023 			    DEVNAME(sc));
2024 			return EPERM;
2025 		}
2026 		iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG,
2027 		    hpm & ~IWM_HPM_PERSISTENCE_BIT);
2028 	}
2029 
2030 	return 0;
2031 }
2032 
2033 int
iwm_start_hw(struct iwm_softc * sc)2034 iwm_start_hw(struct iwm_softc *sc)
2035 {
2036 	int err;
2037 
2038 	err = iwm_prepare_card_hw(sc);
2039 	if (err)
2040 		return err;
2041 
2042 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000) {
2043 		err = iwm_clear_persistence_bit(sc);
2044 		if (err)
2045 			return err;
2046 	}
2047 
2048 	/* Reset the entire device */
2049 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2050 	DELAY(5000);
2051 
2052 	err = iwm_apm_init(sc);
2053 	if (err)
2054 		return err;
2055 
2056 	iwm_init_msix_hw(sc);
2057 
2058 	iwm_enable_rfkill_int(sc);
2059 	iwm_check_rfkill(sc);
2060 
2061 	return 0;
2062 }
2063 
2064 
2065 void
iwm_stop_device(struct iwm_softc * sc)2066 iwm_stop_device(struct iwm_softc *sc)
2067 {
2068 	int chnl, ntries;
2069 	int qid;
2070 
2071 	iwm_disable_interrupts(sc);
2072 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
2073 
2074 	/* Stop all DMA channels. */
2075 	if (iwm_nic_lock(sc)) {
2076 		/* Deactivate TX scheduler. */
2077 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2078 
2079 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2080 			IWM_WRITE(sc,
2081 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
2082 			for (ntries = 0; ntries < 200; ntries++) {
2083 				uint32_t r;
2084 
2085 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
2086 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
2087 				    chnl))
2088 					break;
2089 				DELAY(20);
2090 			}
2091 		}
2092 		iwm_nic_unlock(sc);
2093 	}
2094 	iwm_disable_rx_dma(sc);
2095 
2096 	iwm_reset_rx_ring(sc, &sc->rxq);
2097 
2098 	for (qid = 0; qid < nitems(sc->txq); qid++)
2099 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
2100 
2101 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2102 		if (iwm_nic_lock(sc)) {
2103 			/* Power-down device's busmaster DMA clocks */
2104 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
2105 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
2106 			iwm_nic_unlock(sc);
2107 		}
2108 		DELAY(5);
2109 	}
2110 
2111 	/* Make sure (redundant) we've released our request to stay awake */
2112 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
2113 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2114 	if (sc->sc_nic_locks > 0)
2115 		printf("%s: %d active NIC locks forcefully cleared\n",
2116 		    DEVNAME(sc), sc->sc_nic_locks);
2117 	sc->sc_nic_locks = 0;
2118 
2119 	/* Stop the device, and put it in low power state */
2120 	iwm_apm_stop(sc);
2121 
2122 	/* Reset the on-board processor. */
2123 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2124 	DELAY(5000);
2125 
2126 	/*
2127 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2128 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2129 	 * that enables radio won't fire on the correct irq, and the
2130 	 * driver won't be able to handle the interrupt.
2131 	 * Configure the IVAR table again after reset.
2132 	 */
2133 	iwm_conf_msix_hw(sc, 1);
2134 
2135 	/*
2136 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2137 	 * Clear the interrupt again.
2138 	 */
2139 	iwm_disable_interrupts(sc);
2140 
2141 	/* Even though we stop the HW we still want the RF kill interrupt. */
2142 	iwm_enable_rfkill_int(sc);
2143 	iwm_check_rfkill(sc);
2144 
2145 	iwm_prepare_card_hw(sc);
2146 }
2147 
2148 void
iwm_nic_config(struct iwm_softc * sc)2149 iwm_nic_config(struct iwm_softc *sc)
2150 {
2151 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2152 	uint32_t mask, val, reg_val = 0;
2153 
2154 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
2155 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
2156 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
2157 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
2158 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
2159 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
2160 
2161 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2162 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2163 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2164 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2165 
2166 	/* radio configuration */
2167 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2168 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2169 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2170 
2171 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2172 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2173 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2174 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2175 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2176 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2177 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2178 
2179 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2180 	val &= ~mask;
2181 	val |= reg_val;
2182 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2183 
2184 	/*
2185 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2186 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2187 	 * to lose ownership and not being able to obtain it back.
2188 	 */
2189 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2190 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2191 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2192 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2193 }
2194 
2195 int
iwm_nic_rx_init(struct iwm_softc * sc)2196 iwm_nic_rx_init(struct iwm_softc *sc)
2197 {
2198 	if (sc->sc_mqrx_supported)
2199 		return iwm_nic_rx_mq_init(sc);
2200 	else
2201 		return iwm_nic_rx_legacy_init(sc);
2202 }
2203 
2204 int
iwm_nic_rx_mq_init(struct iwm_softc * sc)2205 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2206 {
2207 	int enabled;
2208 
2209 	if (!iwm_nic_lock(sc))
2210 		return EBUSY;
2211 
2212 	/* Stop RX DMA. */
2213 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2214 	/* Disable RX used and free queue operation. */
2215 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2216 
2217 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2218 	    sc->rxq.free_desc_dma.paddr);
2219 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2220 	    sc->rxq.used_desc_dma.paddr);
2221 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2222 	    sc->rxq.stat_dma.paddr);
2223 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2224 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2225 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2226 
2227 	/* We configure only queue 0 for now. */
2228 	enabled = ((1 << 0) << 16) | (1 << 0);
2229 
2230 	/* Enable RX DMA, 4KB buffer size. */
2231 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2232 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2233 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2234 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2235 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2236 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2237 
2238 	/* Enable RX DMA snooping. */
2239 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2240 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2241 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2242 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2243 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2244 
2245 	/* Enable the configured queue(s). */
2246 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2247 
2248 	iwm_nic_unlock(sc);
2249 
2250 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2251 
2252 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2253 
2254 	return 0;
2255 }
2256 
2257 int
iwm_nic_rx_legacy_init(struct iwm_softc * sc)2258 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2259 {
2260 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2261 
2262 	iwm_disable_rx_dma(sc);
2263 
2264 	if (!iwm_nic_lock(sc))
2265 		return EBUSY;
2266 
2267 	/* reset and flush pointers */
2268 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2269 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2270 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2271 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2272 
2273 	/* Set physical address of RX ring (256-byte aligned). */
2274 	IWM_WRITE(sc,
2275 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2276 
2277 	/* Set physical address of RX status (16-byte aligned). */
2278 	IWM_WRITE(sc,
2279 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2280 
2281 	/* Enable RX. */
2282 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2283 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2284 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2285 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2286 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2287 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2288 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2289 
2290 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2291 
2292 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2293 	if (sc->host_interrupt_operation_mode)
2294 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2295 
2296 	iwm_nic_unlock(sc);
2297 
2298 	/*
2299 	 * This value should initially be 0 (before preparing any RBs),
2300 	 * and should be 8 after preparing the first 8 RBs (for example).
2301 	 */
2302 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2303 
2304 	return 0;
2305 }
2306 
2307 int
iwm_nic_tx_init(struct iwm_softc * sc)2308 iwm_nic_tx_init(struct iwm_softc *sc)
2309 {
2310 	int qid, err;
2311 
2312 	if (!iwm_nic_lock(sc))
2313 		return EBUSY;
2314 
2315 	/* Deactivate TX scheduler. */
2316 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2317 
2318 	/* Set physical address of "keep warm" page (16-byte aligned). */
2319 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2320 
2321 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2322 		struct iwm_tx_ring *txq = &sc->txq[qid];
2323 
2324 		/* Set physical address of TX ring (256-byte aligned). */
2325 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2326 		    txq->desc_dma.paddr >> 8);
2327 	}
2328 
2329 	err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2330 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2331 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2332 
2333 	iwm_nic_unlock(sc);
2334 
2335 	return err;
2336 }
2337 
2338 int
iwm_nic_init(struct iwm_softc * sc)2339 iwm_nic_init(struct iwm_softc *sc)
2340 {
2341 	int err;
2342 
2343 	iwm_apm_init(sc);
2344 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2345 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2346 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2347 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2348 
2349 	iwm_nic_config(sc);
2350 
2351 	err = iwm_nic_rx_init(sc);
2352 	if (err)
2353 		return err;
2354 
2355 	err = iwm_nic_tx_init(sc);
2356 	if (err)
2357 		return err;
2358 
2359 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2360 
2361 	return 0;
2362 }
2363 
2364 /* Map a TID to an ieee80211_edca_ac category. */
2365 const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT] = {
2366 	EDCA_AC_BE,
2367 	EDCA_AC_BK,
2368 	EDCA_AC_BK,
2369 	EDCA_AC_BE,
2370 	EDCA_AC_VI,
2371 	EDCA_AC_VI,
2372 	EDCA_AC_VO,
2373 	EDCA_AC_VO,
2374 };
2375 
2376 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2377 const uint8_t iwm_ac_to_tx_fifo[] = {
2378 	IWM_TX_FIFO_BE,
2379 	IWM_TX_FIFO_BK,
2380 	IWM_TX_FIFO_VI,
2381 	IWM_TX_FIFO_VO,
2382 };
2383 
2384 int
iwm_enable_ac_txq(struct iwm_softc * sc,int qid,int fifo)2385 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2386 {
2387 	int err;
2388 	iwm_nic_assert_locked(sc);
2389 
2390 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2391 
2392 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2393 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2394 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2395 
2396 	err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2397 	if (err) {
2398 		return err;
2399 	}
2400 
2401 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2402 
2403 	iwm_write_mem32(sc,
2404 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2405 
2406 	/* Set scheduler window size and frame limit. */
2407 	iwm_write_mem32(sc,
2408 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2409 	    sizeof(uint32_t),
2410 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2411 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2412 	    ((IWM_FRAME_LIMIT
2413 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2414 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2415 
2416 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2417 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2418 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2419 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2420 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2421 
2422 	if (qid == sc->cmdqid)
2423 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2424 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2425 
2426 	return 0;
2427 }
2428 
2429 int
iwm_enable_txq(struct iwm_softc * sc,int sta_id,int qid,int fifo,int aggregate,uint8_t tid,uint16_t ssn)2430 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2431     int aggregate, uint8_t tid, uint16_t ssn)
2432 {
2433 	struct iwm_tx_ring *ring = &sc->txq[qid];
2434 	struct iwm_scd_txq_cfg_cmd cmd;
2435 	int err, idx, scd_bug;
2436 
2437 	iwm_nic_assert_locked(sc);
2438 
2439 	/*
2440 	 * If we need to move the SCD write pointer by steps of
2441 	 * 0x40, 0x80 or 0xc0, it gets stuck.
2442 	 * This is really ugly, but this is the easiest way out for
2443 	 * this sad hardware issue.
2444 	 * This bug has been fixed on devices 9000 and up.
2445 	 */
2446 	scd_bug = !sc->sc_mqrx_supported &&
2447 		!((ssn - ring->cur) & 0x3f) &&
2448 		(ssn != ring->cur);
2449 	if (scd_bug)
2450 		ssn = (ssn + 1) & 0xfff;
2451 
2452 	idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
2453 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
2454 	ring->cur = idx;
2455 	ring->tail = idx;
2456 
2457 	memset(&cmd, 0, sizeof(cmd));
2458 	cmd.tid = tid;
2459 	cmd.scd_queue = qid;
2460 	cmd.enable = 1;
2461 	cmd.sta_id = sta_id;
2462 	cmd.tx_fifo = fifo;
2463 	cmd.aggregate = aggregate;
2464 	cmd.ssn = htole16(ssn);
2465 	cmd.window = IWM_FRAME_LIMIT;
2466 
2467 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2468 	    sizeof(cmd), &cmd);
2469 	if (err)
2470 		return err;
2471 
2472 	sc->qenablemsk |= (1 << qid);
2473 	return 0;
2474 }
2475 
2476 int
iwm_disable_txq(struct iwm_softc * sc,int sta_id,int qid,uint8_t tid)2477 iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid)
2478 {
2479 	struct iwm_scd_txq_cfg_cmd cmd;
2480 	int err;
2481 
2482 	memset(&cmd, 0, sizeof(cmd));
2483 	cmd.tid = tid;
2484 	cmd.scd_queue = qid;
2485 	cmd.enable = 0;
2486 	cmd.sta_id = sta_id;
2487 
2488 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
2489 	if (err)
2490 		return err;
2491 
2492 	sc->qenablemsk &= ~(1 << qid);
2493 	return 0;
2494 }
2495 
2496 int
iwm_post_alive(struct iwm_softc * sc)2497 iwm_post_alive(struct iwm_softc *sc)
2498 {
2499 	int nwords;
2500 	int err, chnl;
2501 	uint32_t base;
2502 
2503 	if (!iwm_nic_lock(sc))
2504 		return EBUSY;
2505 
2506 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2507 
2508 	iwm_ict_reset(sc);
2509 
2510 	iwm_nic_unlock(sc);
2511 
2512 	/* Clear TX scheduler state in SRAM. */
2513 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2514 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2515 	    / sizeof(uint32_t);
2516 	err = iwm_write_mem(sc,
2517 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2518 	    NULL, nwords);
2519 	if (err)
2520 		return err;
2521 
2522 	if (!iwm_nic_lock(sc))
2523 		return EBUSY;
2524 
2525 	/* Set physical address of TX scheduler rings (1KB aligned). */
2526 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2527 
2528 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2529 
2530 	/* enable command channel */
2531 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2532 	if (err) {
2533 		iwm_nic_unlock(sc);
2534 		return err;
2535 	}
2536 
2537 	/* Activate TX scheduler. */
2538 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2539 
2540 	/* Enable DMA channels. */
2541 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2542 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2543 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2544 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2545 	}
2546 
2547 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2548 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2549 
2550 	iwm_nic_unlock(sc);
2551 
2552 	/* Enable L1-Active */
2553 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
2554 		err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2555 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2556 	}
2557 
2558 	return err;
2559 }
2560 
2561 struct iwm_phy_db_entry *
iwm_phy_db_get_section(struct iwm_softc * sc,uint16_t type,uint16_t chg_id)2562 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2563 {
2564 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2565 
2566 	if (type >= IWM_PHY_DB_MAX)
2567 		return NULL;
2568 
2569 	switch (type) {
2570 	case IWM_PHY_DB_CFG:
2571 		return &phy_db->cfg;
2572 	case IWM_PHY_DB_CALIB_NCH:
2573 		return &phy_db->calib_nch;
2574 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2575 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2576 			return NULL;
2577 		return &phy_db->calib_ch_group_papd[chg_id];
2578 	case IWM_PHY_DB_CALIB_CHG_TXP:
2579 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2580 			return NULL;
2581 		return &phy_db->calib_ch_group_txp[chg_id];
2582 	default:
2583 		return NULL;
2584 	}
2585 	return NULL;
2586 }
2587 
2588 int
iwm_phy_db_set_section(struct iwm_softc * sc,struct iwm_calib_res_notif_phy_db * phy_db_notif)2589 iwm_phy_db_set_section(struct iwm_softc *sc,
2590     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2591 {
2592 	uint16_t type = le16toh(phy_db_notif->type);
2593 	uint16_t size  = le16toh(phy_db_notif->length);
2594 	struct iwm_phy_db_entry *entry;
2595 	uint16_t chg_id = 0;
2596 
2597 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2598 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2599 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2600 
2601 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2602 	if (!entry)
2603 		return EINVAL;
2604 
2605 	if (entry->data)
2606 		free(entry->data, M_DEVBUF, entry->size);
2607 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2608 	if (!entry->data) {
2609 		entry->size = 0;
2610 		return ENOMEM;
2611 	}
2612 	memcpy(entry->data, phy_db_notif->data, size);
2613 	entry->size = size;
2614 
2615 	return 0;
2616 }
2617 
2618 int
iwm_is_valid_channel(uint16_t ch_id)2619 iwm_is_valid_channel(uint16_t ch_id)
2620 {
2621 	if (ch_id <= 14 ||
2622 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2623 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2624 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2625 		return 1;
2626 	return 0;
2627 }
2628 
2629 uint8_t
iwm_ch_id_to_ch_index(uint16_t ch_id)2630 iwm_ch_id_to_ch_index(uint16_t ch_id)
2631 {
2632 	if (!iwm_is_valid_channel(ch_id))
2633 		return 0xff;
2634 
2635 	if (ch_id <= 14)
2636 		return ch_id - 1;
2637 	if (ch_id <= 64)
2638 		return (ch_id + 20) / 4;
2639 	if (ch_id <= 140)
2640 		return (ch_id - 12) / 4;
2641 	return (ch_id - 13) / 4;
2642 }
2643 
2644 
2645 uint16_t
iwm_channel_id_to_papd(uint16_t ch_id)2646 iwm_channel_id_to_papd(uint16_t ch_id)
2647 {
2648 	if (!iwm_is_valid_channel(ch_id))
2649 		return 0xff;
2650 
2651 	if (1 <= ch_id && ch_id <= 14)
2652 		return 0;
2653 	if (36 <= ch_id && ch_id <= 64)
2654 		return 1;
2655 	if (100 <= ch_id && ch_id <= 140)
2656 		return 2;
2657 	return 3;
2658 }
2659 
2660 uint16_t
iwm_channel_id_to_txp(struct iwm_softc * sc,uint16_t ch_id)2661 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2662 {
2663 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2664 	struct iwm_phy_db_chg_txp *txp_chg;
2665 	int i;
2666 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2667 
2668 	if (ch_index == 0xff)
2669 		return 0xff;
2670 
2671 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2672 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2673 		if (!txp_chg)
2674 			return 0xff;
2675 		/*
2676 		 * Looking for the first channel group the max channel
2677 		 * of which is higher than the requested channel.
2678 		 */
2679 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2680 			return i;
2681 	}
2682 	return 0xff;
2683 }
2684 
2685 int
iwm_phy_db_get_section_data(struct iwm_softc * sc,uint32_t type,uint8_t ** data,uint16_t * size,uint16_t ch_id)2686 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2687     uint16_t *size, uint16_t ch_id)
2688 {
2689 	struct iwm_phy_db_entry *entry;
2690 	uint16_t ch_group_id = 0;
2691 
2692 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2693 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2694 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2695 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2696 
2697 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2698 	if (!entry)
2699 		return EINVAL;
2700 
2701 	*data = entry->data;
2702 	*size = entry->size;
2703 
2704 	return 0;
2705 }
2706 
2707 int
iwm_send_phy_db_cmd(struct iwm_softc * sc,uint16_t type,uint16_t length,void * data)2708 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2709     void *data)
2710 {
2711 	struct iwm_phy_db_cmd phy_db_cmd;
2712 	struct iwm_host_cmd cmd = {
2713 		.id = IWM_PHY_DB_CMD,
2714 		.flags = IWM_CMD_ASYNC,
2715 	};
2716 
2717 	phy_db_cmd.type = le16toh(type);
2718 	phy_db_cmd.length = le16toh(length);
2719 
2720 	cmd.data[0] = &phy_db_cmd;
2721 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2722 	cmd.data[1] = data;
2723 	cmd.len[1] = length;
2724 
2725 	return iwm_send_cmd(sc, &cmd);
2726 }
2727 
2728 int
iwm_phy_db_send_all_channel_groups(struct iwm_softc * sc,uint16_t type,uint8_t max_ch_groups)2729 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2730     uint8_t max_ch_groups)
2731 {
2732 	uint16_t i;
2733 	int err;
2734 	struct iwm_phy_db_entry *entry;
2735 
2736 	for (i = 0; i < max_ch_groups; i++) {
2737 		entry = iwm_phy_db_get_section(sc, type, i);
2738 		if (!entry)
2739 			return EINVAL;
2740 
2741 		if (!entry->size)
2742 			continue;
2743 
2744 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2745 		if (err)
2746 			return err;
2747 
2748 		DELAY(1000);
2749 	}
2750 
2751 	return 0;
2752 }
2753 
2754 int
iwm_send_phy_db_data(struct iwm_softc * sc)2755 iwm_send_phy_db_data(struct iwm_softc *sc)
2756 {
2757 	uint8_t *data = NULL;
2758 	uint16_t size = 0;
2759 	int err;
2760 
2761 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2762 	if (err)
2763 		return err;
2764 
2765 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2766 	if (err)
2767 		return err;
2768 
2769 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2770 	    &data, &size, 0);
2771 	if (err)
2772 		return err;
2773 
2774 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2775 	if (err)
2776 		return err;
2777 
2778 	err = iwm_phy_db_send_all_channel_groups(sc,
2779 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2780 	if (err)
2781 		return err;
2782 
2783 	err = iwm_phy_db_send_all_channel_groups(sc,
2784 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2785 	if (err)
2786 		return err;
2787 
2788 	return 0;
2789 }
2790 
2791 /*
2792  * For the high priority TE use a time event type that has similar priority to
2793  * the FW's action scan priority.
2794  */
2795 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2796 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2797 
2798 int
iwm_send_time_event_cmd(struct iwm_softc * sc,const struct iwm_time_event_cmd * cmd)2799 iwm_send_time_event_cmd(struct iwm_softc *sc,
2800     const struct iwm_time_event_cmd *cmd)
2801 {
2802 	struct iwm_rx_packet *pkt;
2803 	struct iwm_time_event_resp *resp;
2804 	struct iwm_host_cmd hcmd = {
2805 		.id = IWM_TIME_EVENT_CMD,
2806 		.flags = IWM_CMD_WANT_RESP,
2807 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2808 	};
2809 	uint32_t resp_len;
2810 	int err;
2811 
2812 	hcmd.data[0] = cmd;
2813 	hcmd.len[0] = sizeof(*cmd);
2814 	err = iwm_send_cmd(sc, &hcmd);
2815 	if (err)
2816 		return err;
2817 
2818 	pkt = hcmd.resp_pkt;
2819 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2820 		err = EIO;
2821 		goto out;
2822 	}
2823 
2824 	resp_len = iwm_rx_packet_payload_len(pkt);
2825 	if (resp_len != sizeof(*resp)) {
2826 		err = EIO;
2827 		goto out;
2828 	}
2829 
2830 	resp = (void *)pkt->data;
2831 	if (le32toh(resp->status) == 0)
2832 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2833 	else
2834 		err = EIO;
2835 out:
2836 	iwm_free_resp(sc, &hcmd);
2837 	return err;
2838 }
2839 
2840 void
iwm_protect_session(struct iwm_softc * sc,struct iwm_node * in,uint32_t duration,uint32_t max_delay)2841 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2842     uint32_t duration, uint32_t max_delay)
2843 {
2844 	struct iwm_time_event_cmd time_cmd;
2845 
2846 	/* Do nothing if a time event is already scheduled. */
2847 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2848 		return;
2849 
2850 	memset(&time_cmd, 0, sizeof(time_cmd));
2851 
2852 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2853 	time_cmd.id_and_color =
2854 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2855 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2856 
2857 	time_cmd.apply_time = htole32(0);
2858 
2859 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2860 	time_cmd.max_delay = htole32(max_delay);
2861 	/* TODO: why do we need to interval = bi if it is not periodic? */
2862 	time_cmd.interval = htole32(1);
2863 	time_cmd.duration = htole32(duration);
2864 	time_cmd.repeat = 1;
2865 	time_cmd.policy
2866 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2867 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2868 		IWM_T2_V2_START_IMMEDIATELY);
2869 
2870 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2871 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2872 
2873 	DELAY(100);
2874 }
2875 
2876 void
iwm_unprotect_session(struct iwm_softc * sc,struct iwm_node * in)2877 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2878 {
2879 	struct iwm_time_event_cmd time_cmd;
2880 
2881 	/* Do nothing if the time event has already ended. */
2882 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2883 		return;
2884 
2885 	memset(&time_cmd, 0, sizeof(time_cmd));
2886 
2887 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2888 	time_cmd.id_and_color =
2889 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2890 	time_cmd.id = htole32(sc->sc_time_event_uid);
2891 
2892 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2893 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2894 
2895 	DELAY(100);
2896 }
2897 
2898 /*
2899  * NVM read access and content parsing.  We do not support
2900  * external NVM or writing NVM.
2901  */
2902 
2903 /* list of NVM sections we are allowed/need to read */
2904 const int iwm_nvm_to_read[] = {
2905 	IWM_NVM_SECTION_TYPE_HW,
2906 	IWM_NVM_SECTION_TYPE_SW,
2907 	IWM_NVM_SECTION_TYPE_REGULATORY,
2908 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2909 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2910 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2911 	IWM_NVM_SECTION_TYPE_HW_8000,
2912 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2913 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2914 };
2915 
2916 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2917 
2918 #define IWM_NVM_WRITE_OPCODE 1
2919 #define IWM_NVM_READ_OPCODE 0
2920 
2921 int
iwm_nvm_read_chunk(struct iwm_softc * sc,uint16_t section,uint16_t offset,uint16_t length,uint8_t * data,uint16_t * len)2922 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2923     uint16_t length, uint8_t *data, uint16_t *len)
2924 {
2925 	offset = 0;
2926 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2927 		.offset = htole16(offset),
2928 		.length = htole16(length),
2929 		.type = htole16(section),
2930 		.op_code = IWM_NVM_READ_OPCODE,
2931 	};
2932 	struct iwm_nvm_access_resp *nvm_resp;
2933 	struct iwm_rx_packet *pkt;
2934 	struct iwm_host_cmd cmd = {
2935 		.id = IWM_NVM_ACCESS_CMD,
2936 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2937 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2938 		.data = { &nvm_access_cmd, },
2939 	};
2940 	int err, offset_read;
2941 	size_t bytes_read;
2942 	uint8_t *resp_data;
2943 
2944 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2945 
2946 	err = iwm_send_cmd(sc, &cmd);
2947 	if (err)
2948 		return err;
2949 
2950 	pkt = cmd.resp_pkt;
2951 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2952 		err = EIO;
2953 		goto exit;
2954 	}
2955 
2956 	/* Extract NVM response */
2957 	nvm_resp = (void *)pkt->data;
2958 	if (nvm_resp == NULL)
2959 		return EIO;
2960 
2961 	err = le16toh(nvm_resp->status);
2962 	bytes_read = le16toh(nvm_resp->length);
2963 	offset_read = le16toh(nvm_resp->offset);
2964 	resp_data = nvm_resp->data;
2965 	if (err) {
2966 		err = EINVAL;
2967 		goto exit;
2968 	}
2969 
2970 	if (offset_read != offset) {
2971 		err = EINVAL;
2972 		goto exit;
2973 	}
2974 
2975 	if (bytes_read > length) {
2976 		err = EINVAL;
2977 		goto exit;
2978 	}
2979 
2980 	memcpy(data + offset, resp_data, bytes_read);
2981 	*len = bytes_read;
2982 
2983  exit:
2984 	iwm_free_resp(sc, &cmd);
2985 	return err;
2986 }
2987 
2988 /*
2989  * Reads an NVM section completely.
2990  * NICs prior to 7000 family doesn't have a real NVM, but just read
2991  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2992  * by uCode, we need to manually check in this case that we don't
2993  * overflow and try to read more than the EEPROM size.
2994  */
2995 int
iwm_nvm_read_section(struct iwm_softc * sc,uint16_t section,uint8_t * data,uint16_t * len,size_t max_len)2996 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2997     uint16_t *len, size_t max_len)
2998 {
2999 	uint16_t chunklen, seglen;
3000 	int err = 0;
3001 
3002 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
3003 	*len = 0;
3004 
3005 	/* Read NVM chunks until exhausted (reading less than requested) */
3006 	while (seglen == chunklen && *len < max_len) {
3007 		err = iwm_nvm_read_chunk(sc,
3008 		    section, *len, chunklen, data, &seglen);
3009 		if (err)
3010 			return err;
3011 
3012 		*len += seglen;
3013 	}
3014 
3015 	return err;
3016 }
3017 
3018 uint8_t
iwm_fw_valid_tx_ant(struct iwm_softc * sc)3019 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
3020 {
3021 	uint8_t tx_ant;
3022 
3023 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
3024 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
3025 
3026 	if (sc->sc_nvm.valid_tx_ant)
3027 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3028 
3029 	return tx_ant;
3030 }
3031 
3032 uint8_t
iwm_fw_valid_rx_ant(struct iwm_softc * sc)3033 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
3034 {
3035 	uint8_t rx_ant;
3036 
3037 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
3038 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
3039 
3040 	if (sc->sc_nvm.valid_rx_ant)
3041 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3042 
3043 	return rx_ant;
3044 }
3045 
3046 int
iwm_valid_siso_ant_rate_mask(struct iwm_softc * sc)3047 iwm_valid_siso_ant_rate_mask(struct iwm_softc *sc)
3048 {
3049 	uint8_t valid_tx_ant = iwm_fw_valid_tx_ant(sc);
3050 
3051 	/*
3052 	 * According to the Linux driver, antenna B should be preferred
3053 	 * on 9k devices since it is not shared with bluetooth. However,
3054 	 * there are 9k devices which do not support antenna B at all.
3055 	 */
3056 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
3057 	    (valid_tx_ant & IWM_ANT_B))
3058 		return IWM_RATE_MCS_ANT_B_MSK;
3059 
3060 	return IWM_RATE_MCS_ANT_A_MSK;
3061 }
3062 
3063 void
iwm_init_channel_map(struct iwm_softc * sc,const uint16_t * const nvm_ch_flags,const uint8_t * nvm_channels,int nchan)3064 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
3065     const uint8_t *nvm_channels, int nchan)
3066 {
3067 	struct ieee80211com *ic = &sc->sc_ic;
3068 	struct iwm_nvm_data *data = &sc->sc_nvm;
3069 	int ch_idx;
3070 	struct ieee80211_channel *channel;
3071 	uint16_t ch_flags;
3072 	int is_5ghz;
3073 	int flags, hw_value;
3074 
3075 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
3076 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
3077 
3078 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
3079 		    !data->sku_cap_band_52GHz_enable)
3080 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
3081 
3082 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
3083 			continue;
3084 
3085 		hw_value = nvm_channels[ch_idx];
3086 		channel = &ic->ic_channels[hw_value];
3087 
3088 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
3089 		if (!is_5ghz) {
3090 			flags = IEEE80211_CHAN_2GHZ;
3091 			channel->ic_flags
3092 			    = IEEE80211_CHAN_CCK
3093 			    | IEEE80211_CHAN_OFDM
3094 			    | IEEE80211_CHAN_DYN
3095 			    | IEEE80211_CHAN_2GHZ;
3096 		} else {
3097 			flags = IEEE80211_CHAN_5GHZ;
3098 			channel->ic_flags =
3099 			    IEEE80211_CHAN_A;
3100 		}
3101 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3102 
3103 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
3104 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3105 
3106 		if (data->sku_cap_11n_enable) {
3107 			channel->ic_flags |= IEEE80211_CHAN_HT;
3108 			if (ch_flags & IWM_NVM_CHANNEL_40MHZ)
3109 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3110 		}
3111 
3112 		if (is_5ghz && data->sku_cap_11ac_enable) {
3113 			channel->ic_flags |= IEEE80211_CHAN_VHT;
3114 			if (ch_flags & IWM_NVM_CHANNEL_80MHZ)
3115 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3116 		}
3117 	}
3118 }
3119 
3120 int
iwm_mimo_enabled(struct iwm_softc * sc)3121 iwm_mimo_enabled(struct iwm_softc *sc)
3122 {
3123 	struct ieee80211com *ic = &sc->sc_ic;
3124 
3125 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3126 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3127 }
3128 
3129 void
iwm_setup_ht_rates(struct iwm_softc * sc)3130 iwm_setup_ht_rates(struct iwm_softc *sc)
3131 {
3132 	struct ieee80211com *ic = &sc->sc_ic;
3133 	uint8_t rx_ant;
3134 
3135 	/* TX is supported with the same MCS as RX. */
3136 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3137 
3138 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3139 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3140 
3141 	if (!iwm_mimo_enabled(sc))
3142 		return;
3143 
3144 	rx_ant = iwm_fw_valid_rx_ant(sc);
3145 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3146 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
3147 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3148 }
3149 
3150 void
iwm_setup_vht_rates(struct iwm_softc * sc)3151 iwm_setup_vht_rates(struct iwm_softc *sc)
3152 {
3153 	struct ieee80211com *ic = &sc->sc_ic;
3154 	uint8_t rx_ant = iwm_fw_valid_rx_ant(sc);
3155 	int n;
3156 
3157 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3158 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3159 
3160 	if (iwm_mimo_enabled(sc) &&
3161 	    ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3162 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)) {
3163 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3164 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3165 	} else {
3166 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3167 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3168 	}
3169 
3170 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3171 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3172 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3173 	}
3174 
3175 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3176 }
3177 
3178 void
iwm_init_reorder_buffer(struct iwm_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3179 iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
3180     uint16_t ssn, uint16_t buf_size)
3181 {
3182 	reorder_buf->head_sn = ssn;
3183 	reorder_buf->num_stored = 0;
3184 	reorder_buf->buf_size = buf_size;
3185 	reorder_buf->last_amsdu = 0;
3186 	reorder_buf->last_sub_index = 0;
3187 	reorder_buf->removed = 0;
3188 	reorder_buf->valid = 0;
3189 	reorder_buf->consec_oldsn_drops = 0;
3190 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3191 	reorder_buf->consec_oldsn_prev_drop = 0;
3192 }
3193 
3194 void
iwm_clear_reorder_buffer(struct iwm_softc * sc,struct iwm_rxba_data * rxba)3195 iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
3196 {
3197 	int i;
3198 	struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3199 	struct iwm_reorder_buf_entry *entry;
3200 
3201 	for (i = 0; i < reorder_buf->buf_size; i++) {
3202 		entry = &rxba->entries[i];
3203 		ml_purge(&entry->frames);
3204 		timerclear(&entry->reorder_time);
3205 	}
3206 
3207 	reorder_buf->removed = 1;
3208 	timeout_del(&reorder_buf->reorder_timer);
3209 	timerclear(&rxba->last_rx);
3210 	timeout_del(&rxba->session_timer);
3211 	rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
3212 }
3213 
3214 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3215 
3216 void
iwm_rx_ba_session_expired(void * arg)3217 iwm_rx_ba_session_expired(void *arg)
3218 {
3219 	struct iwm_rxba_data *rxba = arg;
3220 	struct iwm_softc *sc = rxba->sc;
3221 	struct ieee80211com *ic = &sc->sc_ic;
3222 	struct ieee80211_node *ni = ic->ic_bss;
3223 	struct timeval now, timeout, expiry;
3224 	int s;
3225 
3226 	s = splnet();
3227 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
3228 	    ic->ic_state == IEEE80211_S_RUN &&
3229 	    rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3230 		getmicrouptime(&now);
3231 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3232 		timeradd(&rxba->last_rx, &timeout, &expiry);
3233 		if (timercmp(&now, &expiry, <)) {
3234 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3235 		} else {
3236 			ic->ic_stats.is_ht_rx_ba_timeout++;
3237 			ieee80211_delba_request(ic, ni,
3238 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3239 		}
3240 	}
3241 	splx(s);
3242 }
3243 
3244 void
iwm_reorder_timer_expired(void * arg)3245 iwm_reorder_timer_expired(void *arg)
3246 {
3247 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3248 	struct iwm_reorder_buffer *buf = arg;
3249 	struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3250 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3251 	struct iwm_softc *sc = rxba->sc;
3252 	struct ieee80211com *ic = &sc->sc_ic;
3253 	struct ieee80211_node *ni = ic->ic_bss;
3254 	int i, s;
3255 	uint16_t sn = 0, index = 0;
3256 	int expired = 0;
3257 	int cont = 0;
3258 	struct timeval now, timeout, expiry;
3259 
3260 	if (!buf->num_stored || buf->removed)
3261 		return;
3262 
3263 	s = splnet();
3264 	getmicrouptime(&now);
3265 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3266 
3267 	for (i = 0; i < buf->buf_size ; i++) {
3268 		index = (buf->head_sn + i) % buf->buf_size;
3269 
3270 		if (ml_empty(&entries[index].frames)) {
3271 			/*
3272 			 * If there is a hole and the next frame didn't expire
3273 			 * we want to break and not advance SN.
3274 			 */
3275 			cont = 0;
3276 			continue;
3277 		}
3278 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3279 		if (!cont && timercmp(&now, &expiry, <))
3280 			break;
3281 
3282 		expired = 1;
3283 		/* continue until next hole after this expired frame */
3284 		cont = 1;
3285 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3286 	}
3287 
3288 	if (expired) {
3289 		/* SN is set to the last expired frame + 1 */
3290 		iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3291 		if_input(&sc->sc_ic.ic_if, &ml);
3292 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3293 	} else {
3294 		/*
3295 		 * If no frame expired and there are stored frames, index is now
3296 		 * pointing to the first unexpired frame - modify reorder timeout
3297 		 * accordingly.
3298 		 */
3299 		timeout_add_usec(&buf->reorder_timer,
3300 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3301 	}
3302 
3303 	splx(s);
3304 }
3305 
3306 #define IWM_MAX_RX_BA_SESSIONS 16
3307 
3308 int
iwm_sta_rx_agg(struct iwm_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3309 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3310     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3311 {
3312 	struct ieee80211com *ic = &sc->sc_ic;
3313 	struct iwm_add_sta_cmd cmd;
3314 	struct iwm_node *in = (void *)ni;
3315 	int err, s;
3316 	uint32_t status;
3317 	size_t cmdsize;
3318 	struct iwm_rxba_data *rxba = NULL;
3319 	uint8_t baid = 0;
3320 
3321 	s = splnet();
3322 
3323 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
3324 		ieee80211_addba_req_refuse(ic, ni, tid);
3325 		splx(s);
3326 		return 0;
3327 	}
3328 
3329 	memset(&cmd, 0, sizeof(cmd));
3330 
3331 	cmd.sta_id = IWM_STATION_ID;
3332 	cmd.mac_id_n_color
3333 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3334 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3335 
3336 	if (start) {
3337 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3338 		cmd.add_immediate_ba_ssn = ssn;
3339 		cmd.rx_ba_window = winsize;
3340 	} else {
3341 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3342 	}
3343 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
3344 	    IWM_STA_MODIFY_REMOVE_BA_TID;
3345 
3346 	status = IWM_ADD_STA_SUCCESS;
3347 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3348 		cmdsize = sizeof(cmd);
3349 	else
3350 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3351 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
3352 	    &status);
3353 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3354 		err = EIO;
3355 	if (err) {
3356 		if (start)
3357 			ieee80211_addba_req_refuse(ic, ni, tid);
3358 		splx(s);
3359 		return err;
3360 	}
3361 
3362 	if (sc->sc_mqrx_supported) {
3363 		/* Deaggregation is done in hardware. */
3364 		if (start) {
3365 			if (!(status & IWM_ADD_STA_BAID_VALID_MASK)) {
3366 				ieee80211_addba_req_refuse(ic, ni, tid);
3367 				splx(s);
3368 				return EIO;
3369 			}
3370 			baid = (status & IWM_ADD_STA_BAID_MASK) >>
3371 			    IWM_ADD_STA_BAID_SHIFT;
3372 			if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
3373 			    baid >= nitems(sc->sc_rxba_data)) {
3374 				ieee80211_addba_req_refuse(ic, ni, tid);
3375 				splx(s);
3376 				return EIO;
3377 			}
3378 			rxba = &sc->sc_rxba_data[baid];
3379 			if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3380 				ieee80211_addba_req_refuse(ic, ni, tid);
3381 				splx(s);
3382 				return 0;
3383 			}
3384 			rxba->sta_id = IWM_STATION_ID;
3385 			rxba->tid = tid;
3386 			rxba->baid = baid;
3387 			rxba->timeout = timeout_val;
3388 			getmicrouptime(&rxba->last_rx);
3389 			iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3390 			    winsize);
3391 			if (timeout_val != 0) {
3392 				struct ieee80211_rx_ba *ba;
3393 				timeout_add_usec(&rxba->session_timer,
3394 				    timeout_val);
3395 				/* XXX disable net80211's BA timeout handler */
3396 				ba = &ni->ni_rx_ba[tid];
3397 				ba->ba_timeout_val = 0;
3398 			}
3399 		} else {
3400 			int i;
3401 			for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3402 				rxba = &sc->sc_rxba_data[i];
3403 				if (rxba->baid ==
3404 				    IWM_RX_REORDER_DATA_INVALID_BAID)
3405 					continue;
3406 				if (rxba->tid != tid)
3407 					continue;
3408 				iwm_clear_reorder_buffer(sc, rxba);
3409 				break;
3410 			}
3411 		}
3412 	}
3413 
3414 	if (start) {
3415 		sc->sc_rx_ba_sessions++;
3416 		ieee80211_addba_req_accept(ic, ni, tid);
3417 	} else if (sc->sc_rx_ba_sessions > 0)
3418 		sc->sc_rx_ba_sessions--;
3419 
3420 	splx(s);
3421 	return 0;
3422 }
3423 
3424 void
iwm_mac_ctxt_task(void * arg)3425 iwm_mac_ctxt_task(void *arg)
3426 {
3427 	struct iwm_softc *sc = arg;
3428 	struct ieee80211com *ic = &sc->sc_ic;
3429 	struct iwm_node *in = (void *)ic->ic_bss;
3430 	int err, s = splnet();
3431 
3432 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3433 	    ic->ic_state != IEEE80211_S_RUN) {
3434 		refcnt_rele_wake(&sc->task_refs);
3435 		splx(s);
3436 		return;
3437 	}
3438 
3439 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
3440 	if (err)
3441 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3442 
3443 	iwm_unprotect_session(sc, in);
3444 
3445 	refcnt_rele_wake(&sc->task_refs);
3446 	splx(s);
3447 }
3448 
3449 void
iwm_updateprot(struct ieee80211com * ic)3450 iwm_updateprot(struct ieee80211com *ic)
3451 {
3452 	struct iwm_softc *sc = ic->ic_softc;
3453 
3454 	if (ic->ic_state == IEEE80211_S_RUN &&
3455 	    !task_pending(&sc->newstate_task))
3456 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3457 }
3458 
3459 void
iwm_updateslot(struct ieee80211com * ic)3460 iwm_updateslot(struct ieee80211com *ic)
3461 {
3462 	struct iwm_softc *sc = ic->ic_softc;
3463 
3464 	if (ic->ic_state == IEEE80211_S_RUN &&
3465 	    !task_pending(&sc->newstate_task))
3466 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3467 }
3468 
3469 void
iwm_updateedca(struct ieee80211com * ic)3470 iwm_updateedca(struct ieee80211com *ic)
3471 {
3472 	struct iwm_softc *sc = ic->ic_softc;
3473 
3474 	if (ic->ic_state == IEEE80211_S_RUN &&
3475 	    !task_pending(&sc->newstate_task))
3476 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3477 }
3478 
3479 void
iwm_phy_ctxt_task(void * arg)3480 iwm_phy_ctxt_task(void *arg)
3481 {
3482 	struct iwm_softc *sc = arg;
3483 	struct ieee80211com *ic = &sc->sc_ic;
3484 	struct iwm_node *in = (void *)ic->ic_bss;
3485 	struct ieee80211_node *ni = &in->in_ni;
3486 	uint8_t chains, sco, vht_chan_width;
3487 	int err, s = splnet();
3488 
3489 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3490 	    ic->ic_state != IEEE80211_S_RUN ||
3491 	    in->in_phyctxt == NULL) {
3492 		refcnt_rele_wake(&sc->task_refs);
3493 		splx(s);
3494 		return;
3495 	}
3496 
3497 	chains = iwm_mimo_enabled(sc) ? 2 : 1;
3498 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3499 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3500 	    ieee80211_node_supports_ht_chan40(ni))
3501 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3502 	else
3503 		sco = IEEE80211_HTOP0_SCO_SCN;
3504 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3505 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3506 	    ieee80211_node_supports_vht_chan80(ni))
3507 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3508 	else
3509 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3510 	if (in->in_phyctxt->sco != sco ||
3511 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3512 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
3513 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3514 		    vht_chan_width);
3515 		if (err)
3516 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3517 		iwm_setrates(in, 0);
3518 	}
3519 
3520 	refcnt_rele_wake(&sc->task_refs);
3521 	splx(s);
3522 }
3523 
3524 void
iwm_updatechan(struct ieee80211com * ic)3525 iwm_updatechan(struct ieee80211com *ic)
3526 {
3527 	struct iwm_softc *sc = ic->ic_softc;
3528 
3529 	if (ic->ic_state == IEEE80211_S_RUN &&
3530 	    !task_pending(&sc->newstate_task))
3531 		iwm_add_task(sc, systq, &sc->phy_ctxt_task);
3532 }
3533 
3534 void
iwm_updatedtim(struct ieee80211com * ic)3535 iwm_updatedtim(struct ieee80211com *ic)
3536 {
3537 	struct iwm_softc *sc = ic->ic_softc;
3538 
3539 	if (ic->ic_state == IEEE80211_S_RUN &&
3540 	    !task_pending(&sc->newstate_task))
3541 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3542 }
3543 
3544 int
iwm_sta_tx_agg(struct iwm_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int start)3545 iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3546     uint16_t ssn, uint16_t winsize, int start)
3547 {
3548 	struct iwm_add_sta_cmd cmd;
3549 	struct ieee80211com *ic = &sc->sc_ic;
3550 	struct iwm_node *in = (void *)ni;
3551 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3552 	struct iwm_tx_ring *ring;
3553 	enum ieee80211_edca_ac ac;
3554 	int fifo;
3555 	uint32_t status;
3556 	int err;
3557 	size_t cmdsize;
3558 
3559 	/* Ensure we can map this TID to an aggregation queue. */
3560 	if (tid >= IWM_MAX_TID_COUNT || qid > IWM_LAST_AGG_TX_QUEUE)
3561 		return ENOSPC;
3562 
3563 	if (start) {
3564 		if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3565 			return 0;
3566 	} else {
3567 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3568 			return 0;
3569 	}
3570 
3571 	ring = &sc->txq[qid];
3572 	ac = iwm_tid_to_ac[tid];
3573 	fifo = iwm_ac_to_tx_fifo[ac];
3574 
3575 	memset(&cmd, 0, sizeof(cmd));
3576 
3577 	cmd.sta_id = IWM_STATION_ID;
3578 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
3579 	    in->in_color));
3580 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3581 
3582 	if (start) {
3583 		/* Enable Tx aggregation for this queue. */
3584 		in->tid_disable_ampdu &= ~(1 << tid);
3585 		in->tfd_queue_msk |= (1 << qid);
3586 	} else {
3587 		in->tid_disable_ampdu |= (1 << tid);
3588 		/*
3589 		 * Queue remains enabled in the TFD queue mask
3590 		 * until we leave RUN state.
3591 		 */
3592 		err = iwm_flush_sta(sc, in);
3593 		if (err)
3594 			return err;
3595 	}
3596 
3597 	cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk);
3598 	cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
3599 	cmd.modify_mask = (IWM_STA_MODIFY_QUEUES |
3600 	    IWM_STA_MODIFY_TID_DISABLE_TX);
3601 
3602 	if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3603 		if (!iwm_nic_lock(sc)) {
3604 			if (start)
3605 				ieee80211_addba_resp_refuse(ic, ni, tid,
3606 				    IEEE80211_STATUS_UNSPECIFIED);
3607 			return EBUSY;
3608 		}
3609 		err = iwm_enable_txq(sc, IWM_STATION_ID, qid, fifo, 1, tid,
3610 		    ssn);
3611 		iwm_nic_unlock(sc);
3612 		if (err) {
3613 			printf("%s: could not enable Tx queue %d (error %d)\n",
3614 			    DEVNAME(sc), qid, err);
3615 			if (start)
3616 				ieee80211_addba_resp_refuse(ic, ni, tid,
3617 				    IEEE80211_STATUS_UNSPECIFIED);
3618 			return err;
3619 		}
3620 		/*
3621 		 * If iwm_enable_txq() employed the SCD hardware bug
3622 		 * workaround we must skip the frame with seqnum SSN.
3623 		 */
3624 		if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)) {
3625 			ssn = (ssn + 1) & 0xfff;
3626 			KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn));
3627 			ieee80211_output_ba_move_window(ic, ni, tid, ssn);
3628 			ni->ni_qos_txseqs[tid] = ssn;
3629 		}
3630 	}
3631 
3632 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3633 		cmdsize = sizeof(cmd);
3634 	else
3635 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3636 
3637 	status = 0;
3638 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd, &status);
3639 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3640 		err = EIO;
3641 	if (err) {
3642 		printf("%s: could not update sta (error %d)\n",
3643 		    DEVNAME(sc), err);
3644 		if (start)
3645 			ieee80211_addba_resp_refuse(ic, ni, tid,
3646 			    IEEE80211_STATUS_UNSPECIFIED);
3647 		return err;
3648 	}
3649 
3650 	if (start) {
3651 		sc->tx_ba_queue_mask |= (1 << qid);
3652 		ieee80211_addba_resp_accept(ic, ni, tid);
3653 	} else {
3654 		sc->tx_ba_queue_mask &= ~(1 << qid);
3655 
3656 		/*
3657 		 * Clear pending frames but keep the queue enabled.
3658 		 * Firmware panics if we disable the queue here.
3659 		 */
3660 		iwm_txq_advance(sc, ring, ring->cur);
3661 		iwm_clear_oactive(sc, ring);
3662 	}
3663 
3664 	return 0;
3665 }
3666 
3667 void
iwm_ba_task(void * arg)3668 iwm_ba_task(void *arg)
3669 {
3670 	struct iwm_softc *sc = arg;
3671 	struct ieee80211com *ic = &sc->sc_ic;
3672 	struct ieee80211_node *ni = ic->ic_bss;
3673 	int s = splnet();
3674 	int tid, err = 0;
3675 
3676 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3677 	    ic->ic_state != IEEE80211_S_RUN) {
3678 		refcnt_rele_wake(&sc->task_refs);
3679 		splx(s);
3680 		return;
3681 	}
3682 
3683 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3684 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3685 			break;
3686 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3687 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3688 			err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3689 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3690 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3691 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3692 			err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3693 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3694 		}
3695 	}
3696 
3697 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3698 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3699 			break;
3700 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3701 			struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3702 			err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3703 			    ba->ba_winsize, 1);
3704 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3705 		} else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3706 			err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3707 			sc->ba_tx.stop_tidmask &= ~(1 << tid);
3708 		}
3709 	}
3710 
3711 	/*
3712 	 * We "recover" from failure to start or stop a BA session
3713 	 * by resetting the device.
3714 	 */
3715 	if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
3716 		task_add(systq, &sc->init_task);
3717 
3718 	refcnt_rele_wake(&sc->task_refs);
3719 	splx(s);
3720 }
3721 
3722 /*
3723  * This function is called by upper layer when an ADDBA request is received
3724  * from another STA and before the ADDBA response is sent.
3725  */
3726 int
iwm_ampdu_rx_start(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3727 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3728     uint8_t tid)
3729 {
3730 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3731 
3732 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
3733 	    tid > IWM_MAX_TID_COUNT)
3734 		return ENOSPC;
3735 
3736 	if (sc->ba_rx.start_tidmask & (1 << tid))
3737 		return EBUSY;
3738 
3739 	sc->ba_rx.start_tidmask |= (1 << tid);
3740 	iwm_add_task(sc, systq, &sc->ba_task);
3741 
3742 	return EBUSY;
3743 }
3744 
3745 /*
3746  * This function is called by upper layer on teardown of an HT-immediate
3747  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3748  */
3749 void
iwm_ampdu_rx_stop(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3750 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3751     uint8_t tid)
3752 {
3753 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3754 
3755 	if (tid > IWM_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3756 		return;
3757 
3758 	sc->ba_rx.stop_tidmask |= (1 << tid);
3759 	iwm_add_task(sc, systq, &sc->ba_task);
3760 }
3761 
3762 int
iwm_ampdu_tx_start(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3763 iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3764     uint8_t tid)
3765 {
3766 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3767 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3768 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3769 
3770 	/* We only implement Tx aggregation with DQA-capable firmware. */
3771 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3772 		return ENOTSUP;
3773 
3774 	/* Ensure we can map this TID to an aggregation queue. */
3775 	if (tid >= IWM_MAX_TID_COUNT)
3776 		return EINVAL;
3777 
3778 	/* We only support a fixed Tx aggregation window size, for now. */
3779 	if (ba->ba_winsize != IWM_FRAME_LIMIT)
3780 		return ENOTSUP;
3781 
3782 	/* Is firmware already using Tx aggregation on this queue? */
3783 	if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3784 		return ENOSPC;
3785 
3786 	/* Are we already processing an ADDBA request? */
3787 	if (sc->ba_tx.start_tidmask & (1 << tid))
3788 		return EBUSY;
3789 
3790 	sc->ba_tx.start_tidmask |= (1 << tid);
3791 	iwm_add_task(sc, systq, &sc->ba_task);
3792 
3793 	return EBUSY;
3794 }
3795 
3796 void
iwm_ampdu_tx_stop(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3797 iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3798     uint8_t tid)
3799 {
3800 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3801 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3802 
3803 	if (tid > IWM_MAX_TID_COUNT || sc->ba_tx.stop_tidmask & (1 << tid))
3804 		return;
3805 
3806 	/* Is firmware currently using Tx aggregation on this queue? */
3807 	if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3808 		return;
3809 
3810 	sc->ba_tx.stop_tidmask |= (1 << tid);
3811 	iwm_add_task(sc, systq, &sc->ba_task);
3812 }
3813 
3814 void
iwm_set_hw_address_8000(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * mac_override,const uint16_t * nvm_hw)3815 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3816     const uint16_t *mac_override, const uint16_t *nvm_hw)
3817 {
3818 	const uint8_t *hw_addr;
3819 
3820 	if (mac_override) {
3821 		static const uint8_t reserved_mac[] = {
3822 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3823 		};
3824 
3825 		hw_addr = (const uint8_t *)(mac_override +
3826 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3827 
3828 		/*
3829 		 * Store the MAC address from MAO section.
3830 		 * No byte swapping is required in MAO section
3831 		 */
3832 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3833 
3834 		/*
3835 		 * Force the use of the OTP MAC address in case of reserved MAC
3836 		 * address in the NVM, or if address is given but invalid.
3837 		 */
3838 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3839 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3840 		    sizeof(etherbroadcastaddr)) != 0) &&
3841 		    (memcmp(etheranyaddr, data->hw_addr,
3842 		    sizeof(etheranyaddr)) != 0) &&
3843 		    !ETHER_IS_MULTICAST(data->hw_addr))
3844 			return;
3845 	}
3846 
3847 	if (nvm_hw) {
3848 		/* Read the mac address from WFMP registers. */
3849 		uint32_t mac_addr0, mac_addr1;
3850 
3851 		if (!iwm_nic_lock(sc))
3852 			goto out;
3853 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3854 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3855 		iwm_nic_unlock(sc);
3856 
3857 		hw_addr = (const uint8_t *)&mac_addr0;
3858 		data->hw_addr[0] = hw_addr[3];
3859 		data->hw_addr[1] = hw_addr[2];
3860 		data->hw_addr[2] = hw_addr[1];
3861 		data->hw_addr[3] = hw_addr[0];
3862 
3863 		hw_addr = (const uint8_t *)&mac_addr1;
3864 		data->hw_addr[4] = hw_addr[1];
3865 		data->hw_addr[5] = hw_addr[0];
3866 
3867 		return;
3868 	}
3869 out:
3870 	printf("%s: mac address not found\n", DEVNAME(sc));
3871 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3872 }
3873 
3874 int
iwm_parse_nvm_data(struct iwm_softc * sc,const uint16_t * nvm_hw,const uint16_t * nvm_sw,const uint16_t * nvm_calib,const uint16_t * mac_override,const uint16_t * phy_sku,const uint16_t * regulatory,int n_regulatory)3875 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3876     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3877     const uint16_t *mac_override, const uint16_t *phy_sku,
3878     const uint16_t *regulatory, int n_regulatory)
3879 {
3880 	struct iwm_nvm_data *data = &sc->sc_nvm;
3881 	uint8_t hw_addr[ETHER_ADDR_LEN];
3882 	uint32_t sku;
3883 	uint16_t lar_config;
3884 
3885 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3886 
3887 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3888 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3889 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3890 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3891 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3892 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3893 
3894 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3895 	} else {
3896 		uint32_t radio_cfg =
3897 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3898 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3899 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3900 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3901 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3902 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3903 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3904 
3905 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3906 	}
3907 
3908 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3909 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3910 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3911 	data->sku_cap_11ac_enable = sku & IWM_NVM_SKU_CAP_11AC_ENABLE;
3912 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3913 
3914 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3915 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3916 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3917 				       IWM_NVM_LAR_OFFSET_8000;
3918 
3919 		lar_config = le16_to_cpup(regulatory + lar_offset);
3920 		data->lar_enabled = !!(lar_config &
3921 				       IWM_NVM_LAR_ENABLED_8000);
3922 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3923 	} else
3924 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3925 
3926 
3927 	/* The byte order is little endian 16 bit, meaning 214365 */
3928 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3929 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3930 		data->hw_addr[0] = hw_addr[1];
3931 		data->hw_addr[1] = hw_addr[0];
3932 		data->hw_addr[2] = hw_addr[3];
3933 		data->hw_addr[3] = hw_addr[2];
3934 		data->hw_addr[4] = hw_addr[5];
3935 		data->hw_addr[5] = hw_addr[4];
3936 	} else
3937 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3938 
3939 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3940 		if (sc->nvm_type == IWM_NVM_SDP) {
3941 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3942 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3943 		} else {
3944 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3945 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3946 		}
3947 	} else
3948 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3949 		    iwm_nvm_channels_8000,
3950 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3951 
3952 	data->calib_version = 255;   /* TODO:
3953 					this value will prevent some checks from
3954 					failing, we need to check if this
3955 					field is still needed, and if it does,
3956 					where is it in the NVM */
3957 
3958 	return 0;
3959 }
3960 
3961 int
iwm_parse_nvm_sections(struct iwm_softc * sc,struct iwm_nvm_section * sections)3962 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3963 {
3964 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3965 	const uint16_t *regulatory = NULL;
3966 	int n_regulatory = 0;
3967 
3968 	/* Checking for required sections */
3969 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3970 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3971 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3972 			return ENOENT;
3973 		}
3974 
3975 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3976 
3977 		if (sc->nvm_type == IWM_NVM_SDP) {
3978 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3979 				return ENOENT;
3980 			regulatory = (const uint16_t *)
3981 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3982 			n_regulatory =
3983 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3984 		}
3985 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3986 		/* SW and REGULATORY sections are mandatory */
3987 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3988 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3989 			return ENOENT;
3990 		}
3991 		/* MAC_OVERRIDE or at least HW section must exist */
3992 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3993 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3994 			return ENOENT;
3995 		}
3996 
3997 		/* PHY_SKU section is mandatory in B0 */
3998 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3999 			return ENOENT;
4000 		}
4001 
4002 		regulatory = (const uint16_t *)
4003 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
4004 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
4005 		hw = (const uint16_t *)
4006 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
4007 		mac_override =
4008 			(const uint16_t *)
4009 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
4010 		phy_sku = (const uint16_t *)
4011 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
4012 	} else {
4013 		panic("unknown device family %d", sc->sc_device_family);
4014 	}
4015 
4016 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
4017 	calib = (const uint16_t *)
4018 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
4019 
4020 	/* XXX should pass in the length of every section */
4021 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
4022 	    phy_sku, regulatory, n_regulatory);
4023 }
4024 
4025 int
iwm_nvm_init(struct iwm_softc * sc)4026 iwm_nvm_init(struct iwm_softc *sc)
4027 {
4028 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
4029 	int i, section, err;
4030 	uint16_t len;
4031 	uint8_t *buf;
4032 	const size_t bufsz = sc->sc_nvm_max_section_size;
4033 
4034 	memset(nvm_sections, 0, sizeof(nvm_sections));
4035 
4036 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
4037 	if (buf == NULL)
4038 		return ENOMEM;
4039 
4040 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
4041 		section = iwm_nvm_to_read[i];
4042 		KASSERT(section <= nitems(nvm_sections));
4043 
4044 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
4045 		if (err) {
4046 			err = 0;
4047 			continue;
4048 		}
4049 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
4050 		if (nvm_sections[section].data == NULL) {
4051 			err = ENOMEM;
4052 			break;
4053 		}
4054 		memcpy(nvm_sections[section].data, buf, len);
4055 		nvm_sections[section].length = len;
4056 	}
4057 	free(buf, M_DEVBUF, bufsz);
4058 	if (err == 0)
4059 		err = iwm_parse_nvm_sections(sc, nvm_sections);
4060 
4061 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
4062 		if (nvm_sections[i].data != NULL)
4063 			free(nvm_sections[i].data, M_DEVBUF,
4064 			    nvm_sections[i].length);
4065 	}
4066 
4067 	return err;
4068 }
4069 
4070 int
iwm_firmware_load_sect(struct iwm_softc * sc,uint32_t dst_addr,const uint8_t * section,uint32_t byte_cnt)4071 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
4072     const uint8_t *section, uint32_t byte_cnt)
4073 {
4074 	int err = EINVAL;
4075 	uint32_t chunk_sz, offset;
4076 
4077 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
4078 
4079 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
4080 		uint32_t addr, len;
4081 		const uint8_t *data;
4082 
4083 		addr = dst_addr + offset;
4084 		len = MIN(chunk_sz, byte_cnt - offset);
4085 		data = section + offset;
4086 
4087 		err = iwm_firmware_load_chunk(sc, addr, data, len);
4088 		if (err)
4089 			break;
4090 	}
4091 
4092 	return err;
4093 }
4094 
4095 int
iwm_firmware_load_chunk(struct iwm_softc * sc,uint32_t dst_addr,const uint8_t * chunk,uint32_t byte_cnt)4096 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
4097     const uint8_t *chunk, uint32_t byte_cnt)
4098 {
4099 	struct iwm_dma_info *dma = &sc->fw_dma;
4100 	int err;
4101 
4102 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
4103 	memcpy(dma->vaddr, chunk, byte_cnt);
4104 	bus_dmamap_sync(sc->sc_dmat,
4105 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
4106 
4107 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4108 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4109 		err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
4110 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4111 		if (err)
4112 			return err;
4113 	}
4114 
4115 	sc->sc_fw_chunk_done = 0;
4116 
4117 	if (!iwm_nic_lock(sc))
4118 		return EBUSY;
4119 
4120 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4121 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4122 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
4123 	    dst_addr);
4124 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
4125 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
4126 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
4127 	    (iwm_get_dma_hi_addr(dma->paddr)
4128 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
4129 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
4130 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
4131 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
4132 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4133 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4134 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
4135 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
4136 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4137 
4138 	iwm_nic_unlock(sc);
4139 
4140 	/* Wait for this segment to load. */
4141 	err = 0;
4142 	while (!sc->sc_fw_chunk_done) {
4143 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
4144 		if (err)
4145 			break;
4146 	}
4147 
4148 	if (!sc->sc_fw_chunk_done)
4149 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
4150 		    DEVNAME(sc), dst_addr, byte_cnt);
4151 
4152 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4153 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4154 		int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
4155 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4156 		if (!err)
4157 			err = err2;
4158 	}
4159 
4160 	return err;
4161 }
4162 
4163 int
iwm_load_firmware_7000(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4164 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4165 {
4166 	struct iwm_fw_sects *fws;
4167 	int err, i;
4168 	void *data;
4169 	uint32_t dlen;
4170 	uint32_t offset;
4171 
4172 	fws = &sc->sc_fw.fw_sects[ucode_type];
4173 	for (i = 0; i < fws->fw_count; i++) {
4174 		data = fws->fw_sect[i].fws_data;
4175 		dlen = fws->fw_sect[i].fws_len;
4176 		offset = fws->fw_sect[i].fws_devoff;
4177 		if (dlen > sc->sc_fwdmasegsz) {
4178 			err = EFBIG;
4179 		} else
4180 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4181 		if (err) {
4182 			printf("%s: could not load firmware chunk %u of %u\n",
4183 			    DEVNAME(sc), i, fws->fw_count);
4184 			return err;
4185 		}
4186 	}
4187 
4188 	iwm_enable_interrupts(sc);
4189 
4190 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
4191 
4192 	return 0;
4193 }
4194 
4195 int
iwm_load_cpu_sections_8000(struct iwm_softc * sc,struct iwm_fw_sects * fws,int cpu,int * first_ucode_section)4196 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
4197     int cpu, int *first_ucode_section)
4198 {
4199 	int shift_param;
4200 	int i, err = 0, sec_num = 0x1;
4201 	uint32_t val, last_read_idx = 0;
4202 	void *data;
4203 	uint32_t dlen;
4204 	uint32_t offset;
4205 
4206 	if (cpu == 1) {
4207 		shift_param = 0;
4208 		*first_ucode_section = 0;
4209 	} else {
4210 		shift_param = 16;
4211 		(*first_ucode_section)++;
4212 	}
4213 
4214 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
4215 		last_read_idx = i;
4216 		data = fws->fw_sect[i].fws_data;
4217 		dlen = fws->fw_sect[i].fws_len;
4218 		offset = fws->fw_sect[i].fws_devoff;
4219 
4220 		/*
4221 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4222 		 * CPU1 to CPU2.
4223 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
4224 		 * CPU2 non paged to CPU2 paging sec.
4225 		 */
4226 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
4227 		    offset == IWM_PAGING_SEPARATOR_SECTION)
4228 			break;
4229 
4230 		if (dlen > sc->sc_fwdmasegsz) {
4231 			err = EFBIG;
4232 		} else
4233 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4234 		if (err) {
4235 			printf("%s: could not load firmware chunk %d "
4236 			    "(error %d)\n", DEVNAME(sc), i, err);
4237 			return err;
4238 		}
4239 
4240 		/* Notify the ucode of the loaded section number and status */
4241 		if (iwm_nic_lock(sc)) {
4242 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
4243 			val = val | (sec_num << shift_param);
4244 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
4245 			sec_num = (sec_num << 1) | 0x1;
4246 			iwm_nic_unlock(sc);
4247 		} else {
4248 			err = EBUSY;
4249 			printf("%s: could not load firmware chunk %d "
4250 			    "(error %d)\n", DEVNAME(sc), i, err);
4251 			return err;
4252 		}
4253 	}
4254 
4255 	*first_ucode_section = last_read_idx;
4256 
4257 	if (iwm_nic_lock(sc)) {
4258 		if (cpu == 1)
4259 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
4260 		else
4261 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
4262 		iwm_nic_unlock(sc);
4263 	} else {
4264 		err = EBUSY;
4265 		printf("%s: could not finalize firmware loading (error %d)\n",
4266 		    DEVNAME(sc), err);
4267 		return err;
4268 	}
4269 
4270 	return 0;
4271 }
4272 
4273 int
iwm_load_firmware_8000(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4274 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4275 {
4276 	struct iwm_fw_sects *fws;
4277 	int err = 0;
4278 	int first_ucode_section;
4279 
4280 	fws = &sc->sc_fw.fw_sects[ucode_type];
4281 
4282 	/* configure the ucode to be ready to get the secured image */
4283 	/* release CPU reset */
4284 	if (iwm_nic_lock(sc)) {
4285 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
4286 		    IWM_RELEASE_CPU_RESET_BIT);
4287 		iwm_nic_unlock(sc);
4288 	}
4289 
4290 	/* load to FW the binary Secured sections of CPU1 */
4291 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4292 	if (err)
4293 		return err;
4294 
4295 	/* load to FW the binary sections of CPU2 */
4296 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4297 	if (err)
4298 		return err;
4299 
4300 	iwm_enable_interrupts(sc);
4301 	return 0;
4302 }
4303 
4304 int
iwm_load_firmware(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4305 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4306 {
4307 	int err;
4308 
4309 	splassert(IPL_NET);
4310 
4311 	sc->sc_uc.uc_intr = 0;
4312 	sc->sc_uc.uc_ok = 0;
4313 
4314 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
4315 		err = iwm_load_firmware_8000(sc, ucode_type);
4316 	else
4317 		err = iwm_load_firmware_7000(sc, ucode_type);
4318 
4319 	if (err)
4320 		return err;
4321 
4322 	/* wait for the firmware to load */
4323 	err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1));
4324 	if (err || !sc->sc_uc.uc_ok)
4325 		printf("%s: could not load firmware\n", DEVNAME(sc));
4326 
4327 	return err;
4328 }
4329 
4330 int
iwm_start_fw(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4331 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4332 {
4333 	int err;
4334 
4335 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4336 
4337 	err = iwm_nic_init(sc);
4338 	if (err) {
4339 		printf("%s: unable to init nic\n", DEVNAME(sc));
4340 		return err;
4341 	}
4342 
4343 	/* make sure rfkill handshake bits are cleared */
4344 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4345 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
4346 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4347 
4348 	/* clear (again), then enable firmware load interrupt */
4349 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4350 	iwm_enable_fwload_interrupt(sc);
4351 
4352 	/* really make sure rfkill handshake bits are cleared */
4353 	/* maybe we should write a few times more?  just to make sure */
4354 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4355 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4356 
4357 	return iwm_load_firmware(sc, ucode_type);
4358 }
4359 
4360 int
iwm_send_tx_ant_cfg(struct iwm_softc * sc,uint8_t valid_tx_ant)4361 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4362 {
4363 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
4364 		.valid = htole32(valid_tx_ant),
4365 	};
4366 
4367 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
4368 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4369 }
4370 
4371 int
iwm_send_phy_cfg_cmd(struct iwm_softc * sc)4372 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4373 {
4374 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
4375 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4376 
4377 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |
4378 	    sc->sc_extra_phy_config);
4379 	phy_cfg_cmd.calib_control.event_trigger =
4380 	    sc->sc_default_calib[ucode_type].event_trigger;
4381 	phy_cfg_cmd.calib_control.flow_trigger =
4382 	    sc->sc_default_calib[ucode_type].flow_trigger;
4383 
4384 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
4385 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4386 }
4387 
4388 int
iwm_send_dqa_cmd(struct iwm_softc * sc)4389 iwm_send_dqa_cmd(struct iwm_softc *sc)
4390 {
4391 	struct iwm_dqa_enable_cmd dqa_cmd = {
4392 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
4393 	};
4394 	uint32_t cmd_id;
4395 
4396 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
4397 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4398 }
4399 
4400 int
iwm_load_ucode_wait_alive(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4401 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4402 	enum iwm_ucode_type ucode_type)
4403 {
4404 	enum iwm_ucode_type old_type = sc->sc_uc_current;
4405 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4406 	int err;
4407 
4408 	err = iwm_read_firmware(sc);
4409 	if (err)
4410 		return err;
4411 
4412 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4413 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
4414 	else
4415 		sc->cmdqid = IWM_CMD_QUEUE;
4416 
4417 	sc->sc_uc_current = ucode_type;
4418 	err = iwm_start_fw(sc, ucode_type);
4419 	if (err) {
4420 		sc->sc_uc_current = old_type;
4421 		return err;
4422 	}
4423 
4424 	err = iwm_post_alive(sc);
4425 	if (err)
4426 		return err;
4427 
4428 	/*
4429 	 * configure and operate fw paging mechanism.
4430 	 * driver configures the paging flow only once, CPU2 paging image
4431 	 * included in the IWM_UCODE_INIT image.
4432 	 */
4433 	if (fw->paging_mem_size) {
4434 		err = iwm_save_fw_paging(sc, fw);
4435 		if (err) {
4436 			printf("%s: failed to save the FW paging image\n",
4437 			    DEVNAME(sc));
4438 			return err;
4439 		}
4440 
4441 		err = iwm_send_paging_cmd(sc, fw);
4442 		if (err) {
4443 			printf("%s: failed to send the paging cmd\n",
4444 			    DEVNAME(sc));
4445 			iwm_free_fw_paging(sc);
4446 			return err;
4447 		}
4448 	}
4449 
4450 	return 0;
4451 }
4452 
4453 int
iwm_run_init_mvm_ucode(struct iwm_softc * sc,int justnvm)4454 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4455 {
4456 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
4457 	int err, s;
4458 
4459 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
4460 		printf("%s: radio is disabled by hardware switch\n",
4461 		    DEVNAME(sc));
4462 		return EPERM;
4463 	}
4464 
4465 	s = splnet();
4466 	sc->sc_init_complete = 0;
4467 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4468 	if (err) {
4469 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4470 		splx(s);
4471 		return err;
4472 	}
4473 
4474 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
4475 		err = iwm_send_bt_init_conf(sc);
4476 		if (err) {
4477 			printf("%s: could not init bt coex (error %d)\n",
4478 			    DEVNAME(sc), err);
4479 			splx(s);
4480 			return err;
4481 		}
4482 	}
4483 
4484 	if (justnvm) {
4485 		err = iwm_nvm_init(sc);
4486 		if (err) {
4487 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4488 			splx(s);
4489 			return err;
4490 		}
4491 
4492 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4493 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4494 			    sc->sc_nvm.hw_addr);
4495 
4496 		splx(s);
4497 		return 0;
4498 	}
4499 
4500 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
4501 	if (err) {
4502 		splx(s);
4503 		return err;
4504 	}
4505 
4506 	/* Send TX valid antennas before triggering calibrations */
4507 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4508 	if (err) {
4509 		splx(s);
4510 		return err;
4511 	}
4512 
4513 	/*
4514 	 * Send phy configurations command to init uCode
4515 	 * to start the 16.0 uCode init image internal calibrations.
4516 	 */
4517 	err = iwm_send_phy_cfg_cmd(sc);
4518 	if (err) {
4519 		splx(s);
4520 		return err;
4521 	}
4522 
4523 	/*
4524 	 * Nothing to do but wait for the init complete and phy DB
4525 	 * notifications from the firmware.
4526 	 */
4527 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4528 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4529 		    SEC_TO_NSEC(2));
4530 		if (err)
4531 			break;
4532 	}
4533 
4534 	splx(s);
4535 	return err;
4536 }
4537 
4538 int
iwm_config_ltr(struct iwm_softc * sc)4539 iwm_config_ltr(struct iwm_softc *sc)
4540 {
4541 	struct iwm_ltr_config_cmd cmd = {
4542 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
4543 	};
4544 
4545 	if (!sc->sc_ltr_enabled)
4546 		return 0;
4547 
4548 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4549 }
4550 
4551 int
iwm_rx_addbuf(struct iwm_softc * sc,int size,int idx)4552 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4553 {
4554 	struct iwm_rx_ring *ring = &sc->rxq;
4555 	struct iwm_rx_data *data = &ring->data[idx];
4556 	struct mbuf *m;
4557 	int err;
4558 	int fatal = 0;
4559 
4560 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4561 	if (m == NULL)
4562 		return ENOBUFS;
4563 
4564 	if (size <= MCLBYTES) {
4565 		MCLGET(m, M_DONTWAIT);
4566 	} else {
4567 		MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE);
4568 	}
4569 	if ((m->m_flags & M_EXT) == 0) {
4570 		m_freem(m);
4571 		return ENOBUFS;
4572 	}
4573 
4574 	if (data->m != NULL) {
4575 		bus_dmamap_unload(sc->sc_dmat, data->map);
4576 		fatal = 1;
4577 	}
4578 
4579 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4580 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4581 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4582 	if (err) {
4583 		/* XXX */
4584 		if (fatal)
4585 			panic("iwm: could not load RX mbuf");
4586 		m_freem(m);
4587 		return err;
4588 	}
4589 	data->m = m;
4590 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4591 
4592 	/* Update RX descriptor. */
4593 	if (sc->sc_mqrx_supported) {
4594 		((uint64_t *)ring->desc)[idx] =
4595 		    htole64(data->map->dm_segs[0].ds_addr);
4596 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4597 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4598 		    BUS_DMASYNC_PREWRITE);
4599 	} else {
4600 		((uint32_t *)ring->desc)[idx] =
4601 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
4602 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4603 		    idx * sizeof(uint32_t), sizeof(uint32_t),
4604 		    BUS_DMASYNC_PREWRITE);
4605 	}
4606 
4607 	return 0;
4608 }
4609 
4610 /*
4611  * RSSI values are reported by the FW as positive values - need to negate
4612  * to obtain their dBM.  Account for missing antennas by replacing 0
4613  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4614  */
4615 int
iwm_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)4616 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4617 {
4618 	int energy_a, energy_b, energy_c, max_energy;
4619 	uint32_t val;
4620 
4621 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
4622 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
4623 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
4624 	energy_a = energy_a ? -energy_a : -256;
4625 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
4626 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
4627 	energy_b = energy_b ? -energy_b : -256;
4628 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
4629 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
4630 	energy_c = energy_c ? -energy_c : -256;
4631 	max_energy = MAX(energy_a, energy_b);
4632 	max_energy = MAX(max_energy, energy_c);
4633 
4634 	return max_energy;
4635 }
4636 
4637 int
iwm_rxmq_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_mpdu_desc * desc)4638 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4639     struct iwm_rx_mpdu_desc *desc)
4640 {
4641 	int energy_a, energy_b;
4642 
4643 	energy_a = desc->v1.energy_a;
4644 	energy_b = desc->v1.energy_b;
4645 	energy_a = energy_a ? -energy_a : -256;
4646 	energy_b = energy_b ? -energy_b : -256;
4647 	return MAX(energy_a, energy_b);
4648 }
4649 
4650 void
iwm_rx_rx_phy_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)4651 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4652     struct iwm_rx_data *data)
4653 {
4654 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4655 
4656 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4657 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4658 
4659 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4660 }
4661 
4662 /*
4663  * Retrieve the average noise (in dBm) among receivers.
4664  */
4665 int
iwm_get_noise(const struct iwm_statistics_rx_non_phy * stats)4666 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4667 {
4668 	int i, total, nbant, noise;
4669 
4670 	total = nbant = noise = 0;
4671 	for (i = 0; i < 3; i++) {
4672 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4673 		if (noise) {
4674 			total += noise;
4675 			nbant++;
4676 		}
4677 	}
4678 
4679 	/* There should be at least one antenna but check anyway. */
4680 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4681 }
4682 
4683 int
iwm_ccmp_decap(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,struct ieee80211_rxinfo * rxi)4684 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4685     struct ieee80211_rxinfo *rxi)
4686 {
4687 	struct ieee80211com *ic = &sc->sc_ic;
4688 	struct ieee80211_key *k = &ni->ni_pairwise_key;
4689 	struct ieee80211_frame *wh;
4690 	uint64_t pn, *prsc;
4691 	uint8_t *ivp;
4692 	uint8_t tid;
4693 	int hdrlen, hasqos;
4694 
4695 	wh = mtod(m, struct ieee80211_frame *);
4696 	hdrlen = ieee80211_get_hdrlen(wh);
4697 	ivp = (uint8_t *)wh + hdrlen;
4698 
4699 	/* Check that ExtIV bit is set. */
4700 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4701 		return 1;
4702 
4703 	hasqos = ieee80211_has_qos(wh);
4704 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4705 	prsc = &k->k_rsc[tid];
4706 
4707 	/* Extract the 48-bit PN from the CCMP header. */
4708 	pn = (uint64_t)ivp[0]       |
4709 	     (uint64_t)ivp[1] <<  8 |
4710 	     (uint64_t)ivp[4] << 16 |
4711 	     (uint64_t)ivp[5] << 24 |
4712 	     (uint64_t)ivp[6] << 32 |
4713 	     (uint64_t)ivp[7] << 40;
4714 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4715 		if (pn < *prsc) {
4716 			ic->ic_stats.is_ccmp_replays++;
4717 			return 1;
4718 		}
4719 	} else if (pn <= *prsc) {
4720 		ic->ic_stats.is_ccmp_replays++;
4721 		return 1;
4722 	}
4723 	/* Last seen packet number is updated in ieee80211_inputm(). */
4724 
4725 	/*
4726 	 * Some firmware versions strip the MIC, and some don't. It is not
4727 	 * clear which of the capability flags could tell us what to expect.
4728 	 * For now, keep things simple and just leave the MIC in place if
4729 	 * it is present.
4730 	 *
4731 	 * The IV will be stripped by ieee80211_inputm().
4732 	 */
4733 	return 0;
4734 }
4735 
4736 int
iwm_rx_hwdecrypt(struct iwm_softc * sc,struct mbuf * m,uint32_t rx_pkt_status,struct ieee80211_rxinfo * rxi)4737 iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4738     struct ieee80211_rxinfo *rxi)
4739 {
4740 	struct ieee80211com *ic = &sc->sc_ic;
4741 	struct ifnet *ifp = IC2IFP(ic);
4742 	struct ieee80211_frame *wh;
4743 	struct ieee80211_node *ni;
4744 	int ret = 0;
4745 	uint8_t type, subtype;
4746 
4747 	wh = mtod(m, struct ieee80211_frame *);
4748 
4749 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4750 	if (type == IEEE80211_FC0_TYPE_CTL)
4751 		return 0;
4752 
4753 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4754 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4755 		return 0;
4756 
4757 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4758 	    !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4759 		return 0;
4760 
4761 	ni = ieee80211_find_rxnode(ic, wh);
4762 	/* Handle hardware decryption. */
4763 	if ((ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4764 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4765 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4766 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4767 			ic->ic_stats.is_ccmp_dec_errs++;
4768 			ret = 1;
4769 			goto out;
4770 		}
4771 		/* Check whether decryption was successful or not. */
4772 		if ((rx_pkt_status &
4773 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4774 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4775 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4776 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4777 			ic->ic_stats.is_ccmp_dec_errs++;
4778 			ret = 1;
4779 			goto out;
4780 		}
4781 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4782 	}
4783 out:
4784 	if (ret)
4785 		ifp->if_ierrors++;
4786 	ieee80211_release_node(ic, ni);
4787 	return ret;
4788 }
4789 
4790 void
iwm_rx_frame(struct iwm_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,struct ieee80211_rxinfo * rxi,struct mbuf_list * ml)4791 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4792     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4793     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4794     struct mbuf_list *ml)
4795 {
4796 	struct ieee80211com *ic = &sc->sc_ic;
4797 	struct ifnet *ifp = IC2IFP(ic);
4798 	struct ieee80211_frame *wh;
4799 	struct ieee80211_node *ni;
4800 
4801 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4802 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4803 
4804 	wh = mtod(m, struct ieee80211_frame *);
4805 	ni = ieee80211_find_rxnode(ic, wh);
4806 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4807 	    iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4808 		ifp->if_ierrors++;
4809 		m_freem(m);
4810 		ieee80211_release_node(ic, ni);
4811 		return;
4812 	}
4813 
4814 #if NBPFILTER > 0
4815 	if (sc->sc_drvbpf != NULL) {
4816 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4817 		uint16_t chan_flags;
4818 
4819 		tap->wr_flags = 0;
4820 		if (is_shortpre)
4821 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4822 		tap->wr_chan_freq =
4823 		    htole16(ic->ic_channels[chanidx].ic_freq);
4824 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4825 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4826 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4827 			chan_flags &= ~IEEE80211_CHAN_HT;
4828 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4829 		}
4830 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4831 			chan_flags &= ~IEEE80211_CHAN_VHT;
4832 		tap->wr_chan_flags = htole16(chan_flags);
4833 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4834 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4835 		tap->wr_tsft = device_timestamp;
4836 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4837 			uint8_t mcs = (rate_n_flags &
4838 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4839 			    IWM_RATE_HT_MCS_NSS_MSK));
4840 			tap->wr_rate = (0x80 | mcs);
4841 		} else {
4842 			uint8_t rate = (rate_n_flags &
4843 			    IWM_RATE_LEGACY_RATE_MSK);
4844 			switch (rate) {
4845 			/* CCK rates. */
4846 			case  10: tap->wr_rate =   2; break;
4847 			case  20: tap->wr_rate =   4; break;
4848 			case  55: tap->wr_rate =  11; break;
4849 			case 110: tap->wr_rate =  22; break;
4850 			/* OFDM rates. */
4851 			case 0xd: tap->wr_rate =  12; break;
4852 			case 0xf: tap->wr_rate =  18; break;
4853 			case 0x5: tap->wr_rate =  24; break;
4854 			case 0x7: tap->wr_rate =  36; break;
4855 			case 0x9: tap->wr_rate =  48; break;
4856 			case 0xb: tap->wr_rate =  72; break;
4857 			case 0x1: tap->wr_rate =  96; break;
4858 			case 0x3: tap->wr_rate = 108; break;
4859 			/* Unknown rate: should not happen. */
4860 			default:  tap->wr_rate =   0;
4861 			}
4862 		}
4863 
4864 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4865 		    m, BPF_DIRECTION_IN);
4866 	}
4867 #endif
4868 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4869 	ieee80211_release_node(ic, ni);
4870 }
4871 
4872 void
iwm_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen,struct mbuf_list * ml)4873 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4874     size_t maxlen, struct mbuf_list *ml)
4875 {
4876 	struct ieee80211com *ic = &sc->sc_ic;
4877 	struct ieee80211_rxinfo rxi;
4878 	struct iwm_rx_phy_info *phy_info;
4879 	struct iwm_rx_mpdu_res_start *rx_res;
4880 	int device_timestamp;
4881 	uint16_t phy_flags;
4882 	uint32_t len;
4883 	uint32_t rx_pkt_status;
4884 	int rssi, chanidx, rate_n_flags;
4885 
4886 	memset(&rxi, 0, sizeof(rxi));
4887 
4888 	phy_info = &sc->sc_last_phy_info;
4889 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4890 	len = le16toh(rx_res->byte_count);
4891 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4892 		/* Allow control frames in monitor mode. */
4893 		if (len < sizeof(struct ieee80211_frame_cts)) {
4894 			ic->ic_stats.is_rx_tooshort++;
4895 			IC2IFP(ic)->if_ierrors++;
4896 			m_freem(m);
4897 			return;
4898 		}
4899 	} else if (len < sizeof(struct ieee80211_frame)) {
4900 		ic->ic_stats.is_rx_tooshort++;
4901 		IC2IFP(ic)->if_ierrors++;
4902 		m_freem(m);
4903 		return;
4904 	}
4905 	if (len > maxlen - sizeof(*rx_res)) {
4906 		IC2IFP(ic)->if_ierrors++;
4907 		m_freem(m);
4908 		return;
4909 	}
4910 
4911 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4912 		m_freem(m);
4913 		return;
4914 	}
4915 
4916 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4917 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4918 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4919 		m_freem(m);
4920 		return; /* drop */
4921 	}
4922 
4923 	m->m_data = pktdata + sizeof(*rx_res);
4924 	m->m_pkthdr.len = m->m_len = len;
4925 
4926 	if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4927 		m_freem(m);
4928 		return;
4929 	}
4930 
4931 	chanidx = letoh32(phy_info->channel);
4932 	device_timestamp = le32toh(phy_info->system_timestamp);
4933 	phy_flags = letoh16(phy_info->phy_flags);
4934 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4935 
4936 	rssi = iwm_get_signal_strength(sc, phy_info);
4937 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4938 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4939 
4940 	rxi.rxi_rssi = rssi;
4941 	rxi.rxi_tstamp = device_timestamp;
4942 	rxi.rxi_chan = chanidx;
4943 
4944 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4945 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4946 	    rate_n_flags, device_timestamp, &rxi, ml);
4947 }
4948 
4949 void
iwm_flip_address(uint8_t * addr)4950 iwm_flip_address(uint8_t *addr)
4951 {
4952 	int i;
4953 	uint8_t mac_addr[ETHER_ADDR_LEN];
4954 
4955 	for (i = 0; i < ETHER_ADDR_LEN; i++)
4956 		mac_addr[i] = addr[ETHER_ADDR_LEN - i - 1];
4957 	IEEE80211_ADDR_COPY(addr, mac_addr);
4958 }
4959 
4960 /*
4961  * Drop duplicate 802.11 retransmissions
4962  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4963  * and handle pseudo-duplicate frames which result from deaggregation
4964  * of A-MSDU frames in hardware.
4965  */
4966 int
iwm_detect_duplicate(struct iwm_softc * sc,struct mbuf * m,struct iwm_rx_mpdu_desc * desc,struct ieee80211_rxinfo * rxi)4967 iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4968     struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4969 {
4970 	struct ieee80211com *ic = &sc->sc_ic;
4971 	struct iwm_node *in = (void *)ic->ic_bss;
4972 	struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4973 	uint8_t tid = IWM_MAX_TID_COUNT, subframe_idx;
4974 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4975 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4976 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4977 	int hasqos = ieee80211_has_qos(wh);
4978 	uint16_t seq;
4979 
4980 	if (type == IEEE80211_FC0_TYPE_CTL ||
4981 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4982 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4983 		return 0;
4984 
4985 	if (hasqos) {
4986 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4987 		if (tid > IWM_MAX_TID_COUNT)
4988 			tid = IWM_MAX_TID_COUNT;
4989 	}
4990 
4991 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4992 	subframe_idx = desc->amsdu_info &
4993 		IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4994 
4995 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4996 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4997 	    dup_data->last_seq[tid] == seq &&
4998 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4999 		return 1;
5000 
5001 	/*
5002 	 * Allow the same frame sequence number for all A-MSDU subframes
5003 	 * following the first subframe.
5004 	 * Otherwise these subframes would be discarded as replays.
5005 	 */
5006 	if (dup_data->last_seq[tid] == seq &&
5007 	    subframe_idx > dup_data->last_sub_frame[tid] &&
5008 	    (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU)) {
5009 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5010 	}
5011 
5012 	dup_data->last_seq[tid] = seq;
5013 	dup_data->last_sub_frame[tid] = subframe_idx;
5014 
5015 	return 0;
5016 }
5017 
5018 /*
5019  * Returns true if sn2 - buffer_size < sn1 < sn2.
5020  * To be used only in order to compare reorder buffer head with NSSN.
5021  * We fully trust NSSN unless it is behind us due to reorder timeout.
5022  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
5023  */
5024 int
iwm_is_sn_less(uint16_t sn1,uint16_t sn2,uint16_t buffer_size)5025 iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
5026 {
5027 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
5028 }
5029 
5030 void
iwm_release_frames(struct iwm_softc * sc,struct ieee80211_node * ni,struct iwm_rxba_data * rxba,struct iwm_reorder_buffer * reorder_buf,uint16_t nssn,struct mbuf_list * ml)5031 iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
5032     struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
5033     uint16_t nssn, struct mbuf_list *ml)
5034 {
5035 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
5036 	uint16_t ssn = reorder_buf->head_sn;
5037 
5038 	/* ignore nssn smaller than head sn - this can happen due to timeout */
5039 	if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
5040 		goto set_timer;
5041 
5042 	while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
5043 		int index = ssn % reorder_buf->buf_size;
5044 		struct mbuf *m;
5045 		int chanidx, is_shortpre;
5046 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
5047 		struct ieee80211_rxinfo *rxi;
5048 
5049 		/* This data is the same for all A-MSDU subframes. */
5050 		chanidx = entries[index].chanidx;
5051 		rx_pkt_status = entries[index].rx_pkt_status;
5052 		is_shortpre = entries[index].is_shortpre;
5053 		rate_n_flags = entries[index].rate_n_flags;
5054 		device_timestamp = entries[index].device_timestamp;
5055 		rxi = &entries[index].rxi;
5056 
5057 		/*
5058 		 * Empty the list. Will have more than one frame for A-MSDU.
5059 		 * Empty list is valid as well since nssn indicates frames were
5060 		 * received.
5061 		 */
5062 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
5063 			iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
5064 			    rate_n_flags, device_timestamp, rxi, ml);
5065 			reorder_buf->num_stored--;
5066 
5067 			/*
5068 			 * Allow the same frame sequence number and CCMP PN for
5069 			 * all A-MSDU subframes following the first subframe.
5070 			 * Otherwise they would be discarded as replays.
5071 			 */
5072 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5073 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5074 		}
5075 
5076 		ssn = (ssn + 1) & 0xfff;
5077 	}
5078 	reorder_buf->head_sn = nssn;
5079 
5080 set_timer:
5081 	if (reorder_buf->num_stored && !reorder_buf->removed) {
5082 		timeout_add_usec(&reorder_buf->reorder_timer,
5083 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
5084 	} else
5085 		timeout_del(&reorder_buf->reorder_timer);
5086 }
5087 
5088 int
iwm_oldsn_workaround(struct iwm_softc * sc,struct ieee80211_node * ni,int tid,struct iwm_reorder_buffer * buffer,uint32_t reorder_data,uint32_t gp2)5089 iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
5090     struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
5091 {
5092 	struct ieee80211com *ic = &sc->sc_ic;
5093 
5094 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
5095 		/* we have a new (A-)MPDU ... */
5096 
5097 		/*
5098 		 * reset counter to 0 if we didn't have any oldsn in
5099 		 * the last A-MPDU (as detected by GP2 being identical)
5100 		 */
5101 		if (!buffer->consec_oldsn_prev_drop)
5102 			buffer->consec_oldsn_drops = 0;
5103 
5104 		/* either way, update our tracking state */
5105 		buffer->consec_oldsn_ampdu_gp2 = gp2;
5106 	} else if (buffer->consec_oldsn_prev_drop) {
5107 		/*
5108 		 * tracking state didn't change, and we had an old SN
5109 		 * indication before - do nothing in this case, we
5110 		 * already noted this one down and are waiting for the
5111 		 * next A-MPDU (by GP2)
5112 		 */
5113 		return 0;
5114 	}
5115 
5116 	/* return unless this MPDU has old SN */
5117 	if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN))
5118 		return 0;
5119 
5120 	/* update state */
5121 	buffer->consec_oldsn_prev_drop = 1;
5122 	buffer->consec_oldsn_drops++;
5123 
5124 	/* if limit is reached, send del BA and reset state */
5125 	if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA) {
5126 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
5127 		    0, tid);
5128 		buffer->consec_oldsn_prev_drop = 0;
5129 		buffer->consec_oldsn_drops = 0;
5130 		return 1;
5131 	}
5132 
5133 	return 0;
5134 }
5135 
5136 /*
5137  * Handle re-ordering of frames which were de-aggregated in hardware.
5138  * Returns 1 if the MPDU was consumed (buffered or dropped).
5139  * Returns 0 if the MPDU should be passed to upper layer.
5140  */
5141 int
iwm_rx_reorder(struct iwm_softc * sc,struct mbuf * m,int chanidx,struct iwm_rx_mpdu_desc * desc,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,struct ieee80211_rxinfo * rxi,struct mbuf_list * ml)5142 iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
5143     struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
5144     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
5145     struct mbuf_list *ml)
5146 {
5147 	struct ieee80211com *ic = &sc->sc_ic;
5148 	struct ieee80211_frame *wh;
5149 	struct ieee80211_node *ni;
5150 	struct iwm_rxba_data *rxba;
5151 	struct iwm_reorder_buffer *buffer;
5152 	uint32_t reorder_data = le32toh(desc->reorder_data);
5153 	int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU);
5154 	int last_subframe =
5155 		(desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME);
5156 	uint8_t tid;
5157 	uint8_t subframe_idx = (desc->amsdu_info &
5158 	    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5159 	struct iwm_reorder_buf_entry *entries;
5160 	int index;
5161 	uint16_t nssn, sn;
5162 	uint8_t baid, type, subtype;
5163 	int hasqos;
5164 
5165 	wh = mtod(m, struct ieee80211_frame *);
5166 	hasqos = ieee80211_has_qos(wh);
5167 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
5168 
5169 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5170 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5171 
5172 	/*
5173 	 * We are only interested in Block Ack requests and unicast QoS data.
5174 	 */
5175 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
5176 		return 0;
5177 	if (hasqos) {
5178 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
5179 			return 0;
5180 	} else {
5181 		if (type != IEEE80211_FC0_TYPE_CTL ||
5182 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
5183 			return 0;
5184 	}
5185 
5186 	baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK) >>
5187 		IWM_RX_MPDU_REORDER_BAID_SHIFT;
5188 	if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5189 	    baid >= nitems(sc->sc_rxba_data))
5190 		return 0;
5191 
5192 	rxba = &sc->sc_rxba_data[baid];
5193 	if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5194 	    tid != rxba->tid || rxba->sta_id != IWM_STATION_ID)
5195 		return 0;
5196 
5197 	if (rxba->timeout != 0)
5198 		getmicrouptime(&rxba->last_rx);
5199 
5200 	/* Bypass A-MPDU re-ordering in net80211. */
5201 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
5202 
5203 	nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK;
5204 	sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK) >>
5205 		IWM_RX_MPDU_REORDER_SN_SHIFT;
5206 
5207 	buffer = &rxba->reorder_buf;
5208 	entries = &rxba->entries[0];
5209 
5210 	if (!buffer->valid) {
5211 		if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN)
5212 			return 0;
5213 		buffer->valid = 1;
5214 	}
5215 
5216 	ni = ieee80211_find_rxnode(ic, wh);
5217 	if (type == IEEE80211_FC0_TYPE_CTL &&
5218 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5219 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5220 		goto drop;
5221 	}
5222 
5223 	/*
5224 	 * If there was a significant jump in the nssn - adjust.
5225 	 * If the SN is smaller than the NSSN it might need to first go into
5226 	 * the reorder buffer, in which case we just release up to it and the
5227 	 * rest of the function will take care of storing it and releasing up to
5228 	 * the nssn.
5229 	 */
5230 	if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5231 	    buffer->buf_size) ||
5232 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5233 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5234 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5235 		iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5236 	}
5237 
5238 	if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5239 	    device_timestamp)) {
5240 		 /* BA session will be torn down. */
5241 		ic->ic_stats.is_ht_rx_ba_window_jump++;
5242 		goto drop;
5243 
5244 	}
5245 
5246 	/* drop any outdated packets */
5247 	if (SEQ_LT(sn, buffer->head_sn)) {
5248 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5249 		goto drop;
5250 	}
5251 
5252 	/* release immediately if allowed by nssn and no stored frames */
5253 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5254 		if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5255 		   (!is_amsdu || last_subframe))
5256 			buffer->head_sn = nssn;
5257 		ieee80211_release_node(ic, ni);
5258 		return 0;
5259 	}
5260 
5261 	/*
5262 	 * release immediately if there are no stored frames, and the sn is
5263 	 * equal to the head.
5264 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5265 	 * When we released everything, and we got the next frame in the
5266 	 * sequence, according to the NSSN we can't release immediately,
5267 	 * while technically there is no hole and we can move forward.
5268 	 */
5269 	if (!buffer->num_stored && sn == buffer->head_sn) {
5270 		if (!is_amsdu || last_subframe)
5271 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5272 		ieee80211_release_node(ic, ni);
5273 		return 0;
5274 	}
5275 
5276 	index = sn % buffer->buf_size;
5277 
5278 	/*
5279 	 * Check if we already stored this frame
5280 	 * As AMSDU is either received or not as whole, logic is simple:
5281 	 * If we have frames in that position in the buffer and the last frame
5282 	 * originated from AMSDU had a different SN then it is a retransmission.
5283 	 * If it is the same SN then if the subframe index is incrementing it
5284 	 * is the same AMSDU - otherwise it is a retransmission.
5285 	 */
5286 	if (!ml_empty(&entries[index].frames)) {
5287 		if (!is_amsdu) {
5288 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5289 			goto drop;
5290 		} else if (sn != buffer->last_amsdu ||
5291 		    buffer->last_sub_index >= subframe_idx) {
5292 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5293 			goto drop;
5294 		}
5295 	} else {
5296 		/* This data is the same for all A-MSDU subframes. */
5297 		entries[index].chanidx = chanidx;
5298 		entries[index].is_shortpre = is_shortpre;
5299 		entries[index].rate_n_flags = rate_n_flags;
5300 		entries[index].device_timestamp = device_timestamp;
5301 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5302 	}
5303 
5304 	/* put in reorder buffer */
5305 	ml_enqueue(&entries[index].frames, m);
5306 	buffer->num_stored++;
5307 	getmicrouptime(&entries[index].reorder_time);
5308 
5309 	if (is_amsdu) {
5310 		buffer->last_amsdu = sn;
5311 		buffer->last_sub_index = subframe_idx;
5312 	}
5313 
5314 	/*
5315 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5316 	 * The reason is that NSSN advances on the first sub-frame, and may
5317 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5318 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5319 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5320 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5321 	 * already ahead and it will be dropped.
5322 	 * If the last sub-frame is not on this queue - we will get frame
5323 	 * release notification with up to date NSSN.
5324 	 */
5325 	if (!is_amsdu || last_subframe)
5326 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5327 
5328 	ieee80211_release_node(ic, ni);
5329 	return 1;
5330 
5331 drop:
5332 	m_freem(m);
5333 	ieee80211_release_node(ic, ni);
5334 	return 1;
5335 }
5336 
5337 void
iwm_rx_mpdu_mq(struct iwm_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen,struct mbuf_list * ml)5338 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5339     size_t maxlen, struct mbuf_list *ml)
5340 {
5341 	struct ieee80211com *ic = &sc->sc_ic;
5342 	struct ieee80211_rxinfo rxi;
5343 	struct iwm_rx_mpdu_desc *desc;
5344 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5345 	int rssi;
5346 	uint8_t chanidx;
5347 	uint16_t phy_info;
5348 
5349 	memset(&rxi, 0, sizeof(rxi));
5350 
5351 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
5352 
5353 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
5354 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5355 		m_freem(m);
5356 		return; /* drop */
5357 	}
5358 
5359 	len = le16toh(desc->mpdu_len);
5360 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5361 		/* Allow control frames in monitor mode. */
5362 		if (len < sizeof(struct ieee80211_frame_cts)) {
5363 			ic->ic_stats.is_rx_tooshort++;
5364 			IC2IFP(ic)->if_ierrors++;
5365 			m_freem(m);
5366 			return;
5367 		}
5368 	} else if (len < sizeof(struct ieee80211_frame)) {
5369 		ic->ic_stats.is_rx_tooshort++;
5370 		IC2IFP(ic)->if_ierrors++;
5371 		m_freem(m);
5372 		return;
5373 	}
5374 	if (len > maxlen - sizeof(*desc)) {
5375 		IC2IFP(ic)->if_ierrors++;
5376 		m_freem(m);
5377 		return;
5378 	}
5379 
5380 	m->m_data = pktdata + sizeof(*desc);
5381 	m->m_pkthdr.len = m->m_len = len;
5382 
5383 	/* Account for padding following the frame header. */
5384 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
5385 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5386 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5387 		if (type == IEEE80211_FC0_TYPE_CTL) {
5388 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5389 			case IEEE80211_FC0_SUBTYPE_CTS:
5390 				hdrlen = sizeof(struct ieee80211_frame_cts);
5391 				break;
5392 			case IEEE80211_FC0_SUBTYPE_ACK:
5393 				hdrlen = sizeof(struct ieee80211_frame_ack);
5394 				break;
5395 			default:
5396 				hdrlen = sizeof(struct ieee80211_frame_min);
5397 				break;
5398 			}
5399 		} else
5400 			hdrlen = ieee80211_get_hdrlen(wh);
5401 
5402 		if ((le16toh(desc->status) &
5403 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5404 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5405 			/* Padding is inserted after the IV. */
5406 			hdrlen += IEEE80211_CCMP_HDRLEN;
5407 		}
5408 
5409 		memmove(m->m_data + 2, m->m_data, hdrlen);
5410 		m_adj(m, 2);
5411 	}
5412 
5413 	/*
5414 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5415 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5416 	 * bit set in the frame header. We need to clear this bit ourselves.
5417 	 *
5418 	 * And we must allow the same CCMP PN for subframes following the
5419 	 * first subframe. Otherwise they would be discarded as replays.
5420 	 */
5421 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU) {
5422 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5423 		uint8_t subframe_idx = (desc->amsdu_info &
5424 		    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5425 		if (subframe_idx > 0)
5426 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5427 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5428 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5429 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5430 			    struct ieee80211_qosframe_addr4 *);
5431 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5432 
5433 			/* HW reverses addr3 and addr4. */
5434 			iwm_flip_address(qwh4->i_addr3);
5435 			iwm_flip_address(qwh4->i_addr4);
5436 		} else if (ieee80211_has_qos(wh) &&
5437 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5438 			struct ieee80211_qosframe *qwh = mtod(m,
5439 			    struct ieee80211_qosframe *);
5440 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5441 
5442 			/* HW reverses addr3. */
5443 			iwm_flip_address(qwh->i_addr3);
5444 		}
5445 	}
5446 
5447 	/*
5448 	 * Verify decryption before duplicate detection. The latter uses
5449 	 * the TID supplied in QoS frame headers and this TID is implicitly
5450 	 * verified as part of the CCMP nonce.
5451 	 */
5452 	if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5453 		m_freem(m);
5454 		return;
5455 	}
5456 
5457 	if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5458 		m_freem(m);
5459 		return;
5460 	}
5461 
5462 	phy_info = le16toh(desc->phy_info);
5463 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
5464 	chanidx = desc->v1.channel;
5465 	device_timestamp = desc->v1.gp2_on_air_rise;
5466 
5467 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
5468 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
5469 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5470 
5471 	rxi.rxi_rssi = rssi;
5472 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
5473 	rxi.rxi_chan = chanidx;
5474 
5475 	if (iwm_rx_reorder(sc, m, chanidx, desc,
5476 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5477 	    rate_n_flags, device_timestamp, &rxi, ml))
5478 		return;
5479 
5480 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
5481 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5482 	    rate_n_flags, device_timestamp, &rxi, ml);
5483 }
5484 
5485 void
iwm_ra_choose(struct iwm_softc * sc,struct ieee80211_node * ni)5486 iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5487 {
5488 	struct ieee80211com *ic = &sc->sc_ic;
5489 	struct iwm_node *in = (void *)ni;
5490 	int old_txmcs = ni->ni_txmcs;
5491 	int old_nss = ni->ni_vht_ss;
5492 
5493 	if (ni->ni_flags & IEEE80211_NODE_VHT)
5494 		ieee80211_ra_vht_choose(&in->in_rn_vht, ic, ni);
5495 	else
5496 		ieee80211_ra_choose(&in->in_rn, ic, ni);
5497 
5498 	/*
5499 	 * If RA has chosen a new TX rate we must update
5500 	 * the firmware's LQ rate table.
5501 	 */
5502 	if (ni->ni_txmcs != old_txmcs || ni->ni_vht_ss != old_nss)
5503 		iwm_setrates(in, 1);
5504 }
5505 
5506 void
iwm_ht_single_rate_control(struct iwm_softc * sc,struct ieee80211_node * ni,int txmcs,uint8_t failure_frame,int txfail)5507 iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5508     int txmcs, uint8_t failure_frame, int txfail)
5509 {
5510 	struct ieee80211com *ic = &sc->sc_ic;
5511 	struct iwm_node *in = (void *)ni;
5512 
5513 	/* Ignore Tx reports which don't match our last LQ command. */
5514 	if (txmcs != ni->ni_txmcs) {
5515 		if (++in->lq_rate_mismatch > 15) {
5516 			/* Try to sync firmware with the driver... */
5517 			iwm_setrates(in, 1);
5518 			in->lq_rate_mismatch = 0;
5519 		}
5520 	} else {
5521 		int mcs = txmcs;
5522 		const struct ieee80211_ht_rateset *rs =
5523 		    ieee80211_ra_get_ht_rateset(txmcs,
5524 		        ieee80211_node_supports_ht_chan40(ni),
5525 			ieee80211_ra_use_ht_sgi(ni));
5526 		unsigned int retries = 0, i;
5527 
5528 		in->lq_rate_mismatch = 0;
5529 
5530 		for (i = 0; i < failure_frame; i++) {
5531 			if (mcs > rs->min_mcs) {
5532 				ieee80211_ra_add_stats_ht(&in->in_rn,
5533 				    ic, ni, mcs, 1, 1);
5534 				mcs--;
5535 			} else
5536 				retries++;
5537 		}
5538 
5539 		if (txfail && failure_frame == 0) {
5540 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5541 			    txmcs, 1, 1);
5542 		} else {
5543 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5544 			    mcs, retries + 1, retries);
5545 		}
5546 
5547 		iwm_ra_choose(sc, ni);
5548 	}
5549 }
5550 
5551 void
iwm_vht_single_rate_control(struct iwm_softc * sc,struct ieee80211_node * ni,int txmcs,int nss,uint8_t failure_frame,int txfail)5552 iwm_vht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5553     int txmcs, int nss, uint8_t failure_frame, int txfail)
5554 {
5555 	struct ieee80211com *ic = &sc->sc_ic;
5556 	struct iwm_node *in = (void *)ni;
5557 	uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
5558 	uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
5559 
5560 	/* Ignore Tx reports which don't match our last LQ command. */
5561 	if (txmcs != ni->ni_txmcs || nss != ni->ni_vht_ss) {
5562 		if (++in->lq_rate_mismatch > 15) {
5563 			/* Try to sync firmware with the driver... */
5564 			iwm_setrates(in, 1);
5565 			in->lq_rate_mismatch = 0;
5566 		}
5567 	} else {
5568 		int mcs = txmcs;
5569 		unsigned int retries = 0, i;
5570 
5571 		if (in->in_phyctxt) {
5572 			vht_chan_width = in->in_phyctxt->vht_chan_width;
5573 			sco = in->in_phyctxt->sco;
5574 		}
5575 		in->lq_rate_mismatch = 0;
5576 
5577 		for (i = 0; i < failure_frame; i++) {
5578 			if (mcs > 0) {
5579 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5580 				    ic, ni, mcs, nss, 1, 1);
5581 				if (vht_chan_width >=
5582 				    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5583 					/*
5584 					 * First 4 Tx attempts used same MCS,
5585 					 * twice at 80MHz and twice at 40MHz.
5586 					 */
5587 					if (i >= 4)
5588 						mcs--;
5589 				} else if (sco == IEEE80211_HTOP0_SCO_SCA ||
5590 				    sco == IEEE80211_HTOP0_SCO_SCB) {
5591 					/*
5592 					 * First 4 Tx attempts used same MCS,
5593 					 * four times at 40MHz.
5594 					 */
5595 					if (i >= 4)
5596 						mcs--;
5597 				} else
5598 					mcs--;
5599 			} else
5600 				retries++;
5601 		}
5602 
5603 		if (txfail && failure_frame == 0) {
5604 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5605 			    txmcs, nss, 1, 1);
5606 		} else {
5607 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5608 			    mcs, nss, retries + 1, retries);
5609 		}
5610 
5611 		iwm_ra_choose(sc, ni);
5612 	}
5613 }
5614 
5615 void
iwm_rx_tx_cmd_single(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_node * in,int txmcs,int txrate)5616 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5617     struct iwm_node *in, int txmcs, int txrate)
5618 {
5619 	struct ieee80211com *ic = &sc->sc_ic;
5620 	struct ieee80211_node *ni = &in->in_ni;
5621 	struct ifnet *ifp = IC2IFP(ic);
5622 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5623 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5624 	uint32_t initial_rate = le32toh(tx_resp->initial_rate);
5625 	int txfail;
5626 
5627 	KASSERT(tx_resp->frame_count == 1);
5628 
5629 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
5630 	    status != IWM_TX_STATUS_DIRECT_DONE);
5631 
5632 	/*
5633 	 * Update rate control statistics.
5634 	 * Only report frames which were actually queued with the currently
5635 	 * selected Tx rate. Because Tx queues are relatively long we may
5636 	 * encounter previously selected rates here during Tx bursts.
5637 	 * Providing feedback based on such frames can lead to suboptimal
5638 	 * Tx rate control decisions.
5639 	 */
5640 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
5641 		if (txrate != ni->ni_txrate) {
5642 			if (++in->lq_rate_mismatch > 15) {
5643 				/* Try to sync firmware with the driver... */
5644 				iwm_setrates(in, 1);
5645 				in->lq_rate_mismatch = 0;
5646 			}
5647 		} else {
5648 			in->lq_rate_mismatch = 0;
5649 
5650 			in->in_amn.amn_txcnt++;
5651 			if (txfail)
5652 				in->in_amn.amn_retrycnt++;
5653 			if (tx_resp->failure_frame > 0)
5654 				in->in_amn.amn_retrycnt++;
5655 		}
5656 	} else if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5657 	    ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5658 	    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5659 		int txmcs = initial_rate & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5660 		int nss = ((initial_rate & IWM_RATE_VHT_MCS_NSS_MSK) >>
5661 		    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5662 		iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5663 		    tx_resp->failure_frame, txfail);
5664 	} else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5665 	    (initial_rate & IWM_RATE_MCS_HT_MSK)) {
5666 		int txmcs = initial_rate &
5667 		    (IWM_RATE_HT_MCS_RATE_CODE_MSK | IWM_RATE_HT_MCS_NSS_MSK);
5668 		iwm_ht_single_rate_control(sc, ni, txmcs,
5669 		    tx_resp->failure_frame, txfail);
5670 	}
5671 
5672 	if (txfail)
5673 		ifp->if_oerrors++;
5674 }
5675 
5676 void
iwm_txd_done(struct iwm_softc * sc,struct iwm_tx_data * txd)5677 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5678 {
5679 	struct ieee80211com *ic = &sc->sc_ic;
5680 
5681 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5682 	    BUS_DMASYNC_POSTWRITE);
5683 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5684 	m_freem(txd->m);
5685 	txd->m = NULL;
5686 
5687 	KASSERT(txd->in);
5688 	ieee80211_release_node(ic, &txd->in->in_ni);
5689 	txd->in = NULL;
5690 	txd->ampdu_nframes = 0;
5691 	txd->ampdu_txmcs = 0;
5692 	txd->ampdu_txnss = 0;
5693 }
5694 
5695 void
iwm_txq_advance(struct iwm_softc * sc,struct iwm_tx_ring * ring,int idx)5696 iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5697 {
5698 	struct iwm_tx_data *txd;
5699 
5700 	while (ring->tail != idx) {
5701 		txd = &ring->data[ring->tail];
5702 		if (txd->m != NULL) {
5703 			iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID);
5704 			iwm_txd_done(sc, txd);
5705 			ring->queued--;
5706 		}
5707 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
5708 	}
5709 
5710 	wakeup(ring);
5711 }
5712 
5713 void
iwm_ampdu_tx_done(struct iwm_softc * sc,struct iwm_cmd_header * cmd_hdr,struct iwm_node * in,struct iwm_tx_ring * txq,uint32_t initial_rate,uint8_t nframes,uint8_t failure_frame,uint16_t ssn,int status,struct iwm_agg_tx_status * agg_status)5714 iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5715     struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate,
5716     uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status,
5717     struct iwm_agg_tx_status *agg_status)
5718 {
5719 	struct ieee80211com *ic = &sc->sc_ic;
5720 	int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE;
5721 	struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5722 	struct ieee80211_node *ni = &in->in_ni;
5723 	struct ieee80211_tx_ba *ba;
5724 	int txfail = (status != IWM_TX_STATUS_SUCCESS &&
5725 	    status != IWM_TX_STATUS_DIRECT_DONE);
5726 	uint16_t seq;
5727 
5728 	if (ic->ic_state != IEEE80211_S_RUN)
5729 		return;
5730 
5731 	if (nframes > 1) {
5732 		int i;
5733  		/*
5734 		 * Collect information about this A-MPDU.
5735 		 */
5736 
5737 		for (i = 0; i < nframes; i++) {
5738 			uint8_t qid = agg_status[i].qid;
5739 			uint8_t idx = agg_status[i].idx;
5740 			uint16_t txstatus = (le16toh(agg_status[i].status) &
5741 			    IWM_AGG_TX_STATE_STATUS_MSK);
5742 
5743 			if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED)
5744 				continue;
5745 
5746 			if (qid != cmd_hdr->qid)
5747 				continue;
5748 
5749 			txdata = &txq->data[idx];
5750 			if (txdata->m == NULL)
5751 				continue;
5752 
5753 			/* The Tx rate was the same for all subframes. */
5754 			if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5755 			    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5756 				txdata->ampdu_txmcs = initial_rate &
5757 				    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5758 				txdata->ampdu_txnss = ((initial_rate &
5759 				    IWM_RATE_VHT_MCS_NSS_MSK) >>
5760 				    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5761 				txdata->ampdu_nframes = nframes;
5762 			} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5763 				txdata->ampdu_txmcs = initial_rate &
5764 				    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5765 				    IWM_RATE_HT_MCS_NSS_MSK);
5766 				txdata->ampdu_nframes = nframes;
5767 			}
5768 		}
5769 		return;
5770 	}
5771 
5772 	ba = &ni->ni_tx_ba[tid];
5773 	if (ba->ba_state != IEEE80211_BA_AGREED)
5774 		return;
5775 	if (SEQ_LT(ssn, ba->ba_winstart))
5776 		return;
5777 
5778 	/* This was a final single-frame Tx attempt for frame SSN-1. */
5779 	seq = (ssn - 1) & 0xfff;
5780 
5781 	/*
5782 	 * Skip rate control if our Tx rate is fixed.
5783 	 * Don't report frames to MiRA which were sent at a different
5784 	 * Tx rate than ni->ni_txmcs.
5785 	 */
5786 	if (ic->ic_fixed_mcs == -1) {
5787 		if (txdata->ampdu_nframes > 1) {
5788 			/*
5789 			 * This frame was once part of an A-MPDU.
5790 			 * Report one failed A-MPDU Tx attempt.
5791 			 * The firmware might have made several such
5792 			 * attempts but we don't keep track of this.
5793 			 */
5794 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5795 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5796 				    ic, ni, txdata->ampdu_txmcs,
5797 				    txdata->ampdu_txnss, 1, 1);
5798 			} else {
5799 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5800 				    txdata->ampdu_txmcs, 1, 1);
5801 			}
5802 		}
5803 
5804 		/* Report the final single-frame Tx attempt. */
5805 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5806 		    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5807 			int txmcs = initial_rate &
5808 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5809 			int nss = ((initial_rate &
5810 			    IWM_RATE_VHT_MCS_NSS_MSK) >>
5811 			    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5812 			iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5813 			    failure_frame, txfail);
5814 		} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5815 			int txmcs = initial_rate &
5816 			   (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5817 			   IWM_RATE_HT_MCS_NSS_MSK);
5818 			iwm_ht_single_rate_control(sc, ni, txmcs,
5819 			    failure_frame, txfail);
5820 		}
5821 	}
5822 
5823 	if (txfail)
5824 		ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
5825 
5826 	/*
5827 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5828 	 * in firmware's BA window. Firmware is not going to retransmit any
5829 	 * frames before its BA window so mark them all as done.
5830 	 */
5831 	ieee80211_output_ba_move_window(ic, ni, tid, ssn);
5832 	iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5833 	iwm_clear_oactive(sc, txq);
5834 }
5835 
5836 void
iwm_rx_tx_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)5837 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5838     struct iwm_rx_data *data)
5839 {
5840 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5841 	int idx = cmd_hdr->idx;
5842 	int qid = cmd_hdr->qid;
5843 	struct iwm_tx_ring *ring = &sc->txq[qid];
5844 	struct iwm_tx_data *txd;
5845 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5846 	uint32_t ssn;
5847 	uint32_t len = iwm_rx_packet_len(pkt);
5848 
5849 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
5850 	    BUS_DMASYNC_POSTREAD);
5851 
5852 	/* Sanity checks. */
5853 	if (sizeof(*tx_resp) > len)
5854 		return;
5855 	if (qid < IWM_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5856 		return;
5857 	if (qid > IWM_LAST_AGG_TX_QUEUE)
5858 		return;
5859 	if (sizeof(*tx_resp) + sizeof(ssn) +
5860 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5861 		return;
5862 
5863 	sc->sc_tx_timer[qid] = 0;
5864 
5865 	txd = &ring->data[idx];
5866 	if (txd->m == NULL)
5867 		return;
5868 
5869 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5870 	ssn = le32toh(ssn) & 0xfff;
5871 	if (qid >= IWM_FIRST_AGG_TX_QUEUE) {
5872 		int status;
5873 		status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5874 		iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5875 		    le32toh(tx_resp->initial_rate), tx_resp->frame_count,
5876 		    tx_resp->failure_frame, ssn, status, &tx_resp->status);
5877 	} else {
5878 		/*
5879 		 * Even though this is not an agg queue, we must only free
5880 		 * frames before the firmware's starting sequence number.
5881 		 */
5882 		iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5883 		iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5884 		iwm_clear_oactive(sc, ring);
5885 	}
5886 }
5887 
5888 void
iwm_clear_oactive(struct iwm_softc * sc,struct iwm_tx_ring * ring)5889 iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5890 {
5891 	struct ieee80211com *ic = &sc->sc_ic;
5892 	struct ifnet *ifp = IC2IFP(ic);
5893 
5894 	if (ring->queued < IWM_TX_RING_LOMARK) {
5895 		sc->qfullmsk &= ~(1 << ring->qid);
5896 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5897 			ifq_clr_oactive(&ifp->if_snd);
5898 			/*
5899 			 * Well, we're in interrupt context, but then again
5900 			 * I guess net80211 does all sorts of stunts in
5901 			 * interrupt context, so maybe this is no biggie.
5902 			 */
5903 			(*ifp->if_start)(ifp);
5904 		}
5905 	}
5906 }
5907 
5908 void
iwm_ampdu_rate_control(struct iwm_softc * sc,struct ieee80211_node * ni,struct iwm_tx_ring * txq,int tid,uint16_t seq,uint16_t ssn)5909 iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5910     struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
5911 {
5912 	struct ieee80211com *ic = &sc->sc_ic;
5913 	struct iwm_node *in = (void *)ni;
5914 	int idx, end_idx;
5915 
5916 	/*
5917 	 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
5918 	 */
5919 	idx = IWM_AGG_SSN_TO_TXQ_IDX(seq);
5920 	end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
5921 	while (idx != end_idx) {
5922 		struct iwm_tx_data *txdata = &txq->data[idx];
5923 		if (txdata->m != NULL && txdata->ampdu_nframes > 1) {
5924 			/*
5925 			 * We can assume that this subframe has been ACKed
5926 			 * because ACK failures come as single frames and
5927 			 * before failing an A-MPDU subframe the firmware
5928 			 * sends it as a single frame at least once.
5929 			 */
5930 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5931 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5932 				    ic, ni, txdata->ampdu_txmcs,
5933 				    txdata->ampdu_txnss, 1, 0);
5934 			} else {
5935 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5936 				    txdata->ampdu_txmcs, 1, 0);
5937 			}
5938 			/* Report this frame only once. */
5939 			txdata->ampdu_nframes = 0;
5940 		}
5941 
5942 		idx = (idx + 1) % IWM_TX_RING_COUNT;
5943 	}
5944 
5945 	iwm_ra_choose(sc, ni);
5946 }
5947 
5948 void
iwm_rx_compressed_ba(struct iwm_softc * sc,struct iwm_rx_packet * pkt)5949 iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
5950 {
5951 	struct iwm_ba_notif *ban = (void *)pkt->data;
5952 	struct ieee80211com *ic = &sc->sc_ic;
5953 	struct ieee80211_node *ni = ic->ic_bss;
5954 	struct iwm_node *in = (void *)ni;
5955 	struct ieee80211_tx_ba *ba;
5956 	struct iwm_tx_ring *ring;
5957 	uint16_t seq, ssn;
5958 	int qid;
5959 
5960 	if (ic->ic_state != IEEE80211_S_RUN)
5961 		return;
5962 
5963 	if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban))
5964 		return;
5965 
5966 	if (ban->sta_id != IWM_STATION_ID ||
5967 	    !IEEE80211_ADDR_EQ(in->in_macaddr, ban->sta_addr))
5968 		return;
5969 
5970 	qid = le16toh(ban->scd_flow);
5971 	if (qid < IWM_FIRST_AGG_TX_QUEUE || qid > IWM_LAST_AGG_TX_QUEUE)
5972 		return;
5973 
5974 	/* Protect against a firmware bug where the queue/TID are off. */
5975 	if (qid != IWM_FIRST_AGG_TX_QUEUE + ban->tid)
5976 		return;
5977 
5978 	sc->sc_tx_timer[qid] = 0;
5979 
5980 	ba = &ni->ni_tx_ba[ban->tid];
5981 	if (ba->ba_state != IEEE80211_BA_AGREED)
5982 		return;
5983 
5984 	ring = &sc->txq[qid];
5985 
5986 	/*
5987 	 * The first bit in ban->bitmap corresponds to the sequence number
5988 	 * stored in the sequence control field ban->seq_ctl.
5989 	 * Multiple BA notifications in a row may be using this number, with
5990 	 * additional bits being set in cba->bitmap. It is unclear how the
5991 	 * firmware decides to shift this window forward.
5992 	 * We rely on ba->ba_winstart instead.
5993 	 */
5994 	seq = le16toh(ban->seq_ctl) >> IEEE80211_SEQ_SEQ_SHIFT;
5995 
5996 	/*
5997 	 * The firmware's new BA window starting sequence number
5998 	 * corresponds to the first hole in ban->scd_ssn, implying
5999 	 * that all frames between 'seq' and 'ssn' (non-inclusive)
6000 	 * have been acked.
6001 	 */
6002 	ssn = le16toh(ban->scd_ssn);
6003 
6004 	if (SEQ_LT(ssn, ba->ba_winstart))
6005 		return;
6006 
6007 	/* Skip rate control if our Tx rate is fixed. */
6008 	if (ic->ic_fixed_mcs == -1)
6009 		iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
6010 		    ba->ba_winstart, ssn);
6011 
6012 	/*
6013 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
6014 	 * in firmware's BA window. Firmware is not going to retransmit any
6015 	 * frames before its BA window so mark them all as done.
6016 	 */
6017 	ieee80211_output_ba_move_window(ic, ni, ban->tid, ssn);
6018 	iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
6019 	iwm_clear_oactive(sc, ring);
6020 }
6021 
6022 void
iwm_rx_bmiss(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)6023 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
6024     struct iwm_rx_data *data)
6025 {
6026 	struct ieee80211com *ic = &sc->sc_ic;
6027 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
6028 	uint32_t missed;
6029 
6030 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
6031 	    (ic->ic_state != IEEE80211_S_RUN))
6032 		return;
6033 
6034 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
6035 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
6036 
6037 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
6038 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
6039 		if (ic->ic_if.if_flags & IFF_DEBUG)
6040 			printf("%s: receiving no beacons from %s; checking if "
6041 			    "this AP is still responding to probe requests\n",
6042 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
6043 		/*
6044 		 * Rather than go directly to scan state, try to send a
6045 		 * directed probe request first. If that fails then the
6046 		 * state machine will drop us into scanning after timing
6047 		 * out waiting for a probe response.
6048 		 */
6049 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
6050 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
6051 	}
6052 
6053 }
6054 
6055 int
iwm_binding_cmd(struct iwm_softc * sc,struct iwm_node * in,uint32_t action)6056 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
6057 {
6058 	struct iwm_binding_cmd cmd;
6059 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
6060 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
6061 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
6062 	uint32_t status;
6063 	size_t len;
6064 
6065 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
6066 		panic("binding already added");
6067 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
6068 		panic("binding already removed");
6069 
6070 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
6071 		return EINVAL;
6072 
6073 	memset(&cmd, 0, sizeof(cmd));
6074 
6075 	cmd.id_and_color
6076 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6077 	cmd.action = htole32(action);
6078 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6079 
6080 	cmd.macs[0] = htole32(mac_id);
6081 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
6082 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
6083 
6084 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
6085 	    !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT))
6086 		cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX);
6087 	else
6088 		cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX);
6089 
6090 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT))
6091 		len = sizeof(cmd);
6092 	else
6093 		len = sizeof(struct iwm_binding_cmd_v1);
6094 	status = 0;
6095 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD, len, &cmd,
6096 	    &status);
6097 	if (err == 0 && status != 0)
6098 		err = EIO;
6099 
6100 	return err;
6101 }
6102 
6103 void
iwm_phy_ctxt_cmd_hdr(struct iwm_softc * sc,struct iwm_phy_ctxt * ctxt,struct iwm_phy_context_cmd * cmd,uint32_t action,uint32_t apply_time)6104 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6105     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
6106 {
6107 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
6108 
6109 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6110 	    ctxt->color));
6111 	cmd->action = htole32(action);
6112 	cmd->apply_time = htole32(apply_time);
6113 }
6114 
6115 void
iwm_phy_ctxt_cmd_data(struct iwm_softc * sc,struct iwm_phy_context_cmd * cmd,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic,uint8_t sco,uint8_t vht_chan_width)6116 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
6117     struct ieee80211_channel *chan, uint8_t chains_static,
6118     uint8_t chains_dynamic, uint8_t sco, uint8_t vht_chan_width)
6119 {
6120 	struct ieee80211com *ic = &sc->sc_ic;
6121 	uint8_t active_cnt, idle_cnt;
6122 
6123 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6124 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6125 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
6126 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6127 		cmd->ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6128 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6129 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6130 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6131 			/* secondary chan above -> control chan below */
6132 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6133 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6134 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6135 			/* secondary chan below -> control chan above */
6136 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6137 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6138 		} else {
6139 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6140 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6141 		}
6142 	} else {
6143 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6144 		cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6145 	}
6146 
6147 	/* Set rx the chains */
6148 	idle_cnt = chains_static;
6149 	active_cnt = chains_dynamic;
6150 
6151 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6152 					IWM_PHY_RX_CHAIN_VALID_POS);
6153 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6154 	cmd->rxchain_info |= htole32(active_cnt <<
6155 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6156 
6157 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6158 }
6159 
6160 uint8_t
iwm_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)6161 iwm_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
6162 {
6163 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
6164 	int primary_idx = ic->ic_bss->ni_primary_chan;
6165 	/*
6166 	 * The FW is expected to check the control channel position only
6167 	 * when in HT/VHT and the channel width is not 20MHz. Return
6168 	 * this value as the default one:
6169 	 */
6170 	uint8_t pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6171 
6172 	switch (primary_idx - center_idx) {
6173 	case -6:
6174 		pos = IWM_PHY_VHT_CTRL_POS_2_BELOW;
6175 		break;
6176 	case -2:
6177 		pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6178 		break;
6179 	case 2:
6180 		pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6181 		break;
6182 	case 6:
6183 		pos = IWM_PHY_VHT_CTRL_POS_2_ABOVE;
6184 		break;
6185 	default:
6186 		break;
6187 	}
6188 
6189 	return pos;
6190 }
6191 
6192 int
iwm_phy_ctxt_cmd_uhb(struct iwm_softc * sc,struct iwm_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)6193 iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6194     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6195     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6196 {
6197 	struct ieee80211com *ic = &sc->sc_ic;
6198 	struct iwm_phy_context_cmd_uhb cmd;
6199 	uint8_t active_cnt, idle_cnt;
6200 	struct ieee80211_channel *chan = ctxt->channel;
6201 
6202 	memset(&cmd, 0, sizeof(cmd));
6203 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6204 	    ctxt->color));
6205 	cmd.action = htole32(action);
6206 	cmd.apply_time = htole32(apply_time);
6207 
6208 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6209 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6210 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
6211 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6212 		cmd.ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6213 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6214 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6215 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6216 			/* secondary chan above -> control chan below */
6217 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6218 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6219 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6220 			/* secondary chan below -> control chan above */
6221 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6222 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6223 		} else {
6224 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6225 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6226 		}
6227 	} else {
6228 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6229 		cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6230 	}
6231 
6232 	idle_cnt = chains_static;
6233 	active_cnt = chains_dynamic;
6234 	cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6235 					IWM_PHY_RX_CHAIN_VALID_POS);
6236 	cmd.rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6237 	cmd.rxchain_info |= htole32(active_cnt <<
6238 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6239 	cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6240 
6241 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6242 }
6243 
6244 int
iwm_phy_ctxt_cmd(struct iwm_softc * sc,struct iwm_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)6245 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6246     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6247     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6248 {
6249 	struct iwm_phy_context_cmd cmd;
6250 
6251 	/*
6252 	 * Intel increased the size of the fw_channel_info struct and neglected
6253 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
6254 	 * member in the middle.
6255 	 * To keep things simple we use a separate function to handle the larger
6256 	 * variant of the phy context command.
6257 	 */
6258 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
6259 		return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
6260 		    chains_dynamic, action, apply_time, sco, vht_chan_width);
6261 
6262 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
6263 
6264 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
6265 	    chains_static, chains_dynamic, sco, vht_chan_width);
6266 
6267 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
6268 	    sizeof(struct iwm_phy_context_cmd), &cmd);
6269 }
6270 
6271 int
iwm_send_cmd(struct iwm_softc * sc,struct iwm_host_cmd * hcmd)6272 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6273 {
6274 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6275 	struct iwm_tfd *desc;
6276 	struct iwm_tx_data *txdata;
6277 	struct iwm_device_cmd *cmd;
6278 	struct mbuf *m;
6279 	bus_addr_t paddr;
6280 	uint32_t addr_lo;
6281 	int err = 0, i, paylen, off, s;
6282 	int idx, code, async, group_id;
6283 	size_t hdrlen, datasz;
6284 	uint8_t *data;
6285 	int generation = sc->sc_generation;
6286 
6287 	code = hcmd->id;
6288 	async = hcmd->flags & IWM_CMD_ASYNC;
6289 	idx = ring->cur;
6290 
6291 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
6292 		paylen += hcmd->len[i];
6293 	}
6294 
6295 	/* If this command waits for a response, allocate response buffer. */
6296 	hcmd->resp_pkt = NULL;
6297 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
6298 		uint8_t *resp_buf;
6299 		KASSERT(!async);
6300 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
6301 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
6302 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
6303 			return ENOSPC;
6304 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
6305 		    M_NOWAIT | M_ZERO);
6306 		if (resp_buf == NULL)
6307 			return ENOMEM;
6308 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
6309 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6310 	} else {
6311 		sc->sc_cmd_resp_pkt[idx] = NULL;
6312 	}
6313 
6314 	s = splnet();
6315 
6316 	desc = &ring->desc[idx];
6317 	txdata = &ring->data[idx];
6318 
6319 	group_id = iwm_cmd_groupid(code);
6320 	if (group_id != 0) {
6321 		hdrlen = sizeof(cmd->hdr_wide);
6322 		datasz = sizeof(cmd->data_wide);
6323 	} else {
6324 		hdrlen = sizeof(cmd->hdr);
6325 		datasz = sizeof(cmd->data);
6326 	}
6327 
6328 	if (paylen > datasz) {
6329 		/* Command is too large to fit in pre-allocated space. */
6330 		size_t totlen = hdrlen + paylen;
6331 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
6332 			printf("%s: firmware command too long (%zd bytes)\n",
6333 			    DEVNAME(sc), totlen);
6334 			err = EINVAL;
6335 			goto out;
6336 		}
6337 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
6338 		if (m == NULL) {
6339 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
6340 			    DEVNAME(sc), totlen);
6341 			err = ENOMEM;
6342 			goto out;
6343 		}
6344 		cmd = mtod(m, struct iwm_device_cmd *);
6345 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
6346 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6347 		if (err) {
6348 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
6349 			    DEVNAME(sc), totlen);
6350 			m_freem(m);
6351 			goto out;
6352 		}
6353 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
6354 		paddr = txdata->map->dm_segs[0].ds_addr;
6355 	} else {
6356 		cmd = &ring->cmd[idx];
6357 		paddr = txdata->cmd_paddr;
6358 	}
6359 
6360 	if (group_id != 0) {
6361 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
6362 		cmd->hdr_wide.group_id = group_id;
6363 		cmd->hdr_wide.qid = ring->qid;
6364 		cmd->hdr_wide.idx = idx;
6365 		cmd->hdr_wide.length = htole16(paylen);
6366 		cmd->hdr_wide.version = iwm_cmd_version(code);
6367 		data = cmd->data_wide;
6368 	} else {
6369 		cmd->hdr.code = code;
6370 		cmd->hdr.flags = 0;
6371 		cmd->hdr.qid = ring->qid;
6372 		cmd->hdr.idx = idx;
6373 		data = cmd->data;
6374 	}
6375 
6376 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
6377 		if (hcmd->len[i] == 0)
6378 			continue;
6379 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
6380 		off += hcmd->len[i];
6381 	}
6382 	KASSERT(off == paylen);
6383 
6384 	/* lo field is not aligned */
6385 	addr_lo = htole32((uint32_t)paddr);
6386 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
6387 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
6388 	    | ((hdrlen + paylen) << 4));
6389 	desc->num_tbs = 1;
6390 
6391 	if (paylen > datasz) {
6392 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
6393 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6394 	} else {
6395 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6396 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6397 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6398 	}
6399 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6400 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6401 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6402 
6403 	/*
6404 	 * Wake up the NIC to make sure that the firmware will see the host
6405 	 * command - we will let the NIC sleep once all the host commands
6406 	 * returned. This needs to be done only on 7000 family NICs.
6407 	 */
6408 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
6409 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
6410 			err = EBUSY;
6411 			goto out;
6412 		}
6413 	}
6414 
6415 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
6416 
6417 	/* Kick command ring. */
6418 	ring->queued++;
6419 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6420 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6421 
6422 	if (!async) {
6423 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
6424 		if (err == 0) {
6425 			/* if hardware is no longer up, return error */
6426 			if (generation != sc->sc_generation) {
6427 				err = ENXIO;
6428 				goto out;
6429 			}
6430 
6431 			/* Response buffer will be freed in iwm_free_resp(). */
6432 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6433 			sc->sc_cmd_resp_pkt[idx] = NULL;
6434 		} else if (generation == sc->sc_generation) {
6435 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
6436 			    sc->sc_cmd_resp_len[idx]);
6437 			sc->sc_cmd_resp_pkt[idx] = NULL;
6438 		}
6439 	}
6440  out:
6441 	splx(s);
6442 
6443 	return err;
6444 }
6445 
6446 int
iwm_send_cmd_pdu(struct iwm_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)6447 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
6448     uint16_t len, const void *data)
6449 {
6450 	struct iwm_host_cmd cmd = {
6451 		.id = id,
6452 		.len = { len, },
6453 		.data = { data, },
6454 		.flags = flags,
6455 	};
6456 
6457 	return iwm_send_cmd(sc, &cmd);
6458 }
6459 
6460 int
iwm_send_cmd_status(struct iwm_softc * sc,struct iwm_host_cmd * cmd,uint32_t * status)6461 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
6462     uint32_t *status)
6463 {
6464 	struct iwm_rx_packet *pkt;
6465 	struct iwm_cmd_response *resp;
6466 	int err, resp_len;
6467 
6468 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
6469 	cmd->flags |= IWM_CMD_WANT_RESP;
6470 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
6471 
6472 	err = iwm_send_cmd(sc, cmd);
6473 	if (err)
6474 		return err;
6475 
6476 	pkt = cmd->resp_pkt;
6477 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
6478 		return EIO;
6479 
6480 	resp_len = iwm_rx_packet_payload_len(pkt);
6481 	if (resp_len != sizeof(*resp)) {
6482 		iwm_free_resp(sc, cmd);
6483 		return EIO;
6484 	}
6485 
6486 	resp = (void *)pkt->data;
6487 	*status = le32toh(resp->status);
6488 	iwm_free_resp(sc, cmd);
6489 	return err;
6490 }
6491 
6492 int
iwm_send_cmd_pdu_status(struct iwm_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)6493 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
6494     const void *data, uint32_t *status)
6495 {
6496 	struct iwm_host_cmd cmd = {
6497 		.id = id,
6498 		.len = { len, },
6499 		.data = { data, },
6500 	};
6501 
6502 	return iwm_send_cmd_status(sc, &cmd, status);
6503 }
6504 
6505 void
iwm_free_resp(struct iwm_softc * sc,struct iwm_host_cmd * hcmd)6506 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6507 {
6508 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
6509 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
6510 	hcmd->resp_pkt = NULL;
6511 }
6512 
6513 void
iwm_cmd_done(struct iwm_softc * sc,int qid,int idx,int code)6514 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6515 {
6516 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6517 	struct iwm_tx_data *data;
6518 
6519 	if (qid != sc->cmdqid) {
6520 		return;	/* Not a command ack. */
6521 	}
6522 
6523 	data = &ring->data[idx];
6524 
6525 	if (data->m != NULL) {
6526 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6527 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6528 		bus_dmamap_unload(sc->sc_dmat, data->map);
6529 		m_freem(data->m);
6530 		data->m = NULL;
6531 	}
6532 	wakeup(&ring->desc[idx]);
6533 
6534 	if (ring->queued == 0) {
6535 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
6536 		    DEVNAME(sc), code));
6537 	} else if (--ring->queued == 0) {
6538 		/*
6539 		 * 7000 family NICs are locked while commands are in progress.
6540 		 * All commands are now done so we may unlock the NIC again.
6541 		 */
6542 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6543 			iwm_nic_unlock(sc);
6544 	}
6545 }
6546 
6547 void
iwm_update_sched(struct iwm_softc * sc,int qid,int idx,uint8_t sta_id,uint16_t len)6548 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6549     uint16_t len)
6550 {
6551 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6552 	uint16_t val;
6553 
6554 	scd_bc_tbl = sc->sched_dma.vaddr;
6555 
6556 	len += IWM_TX_CRC_SIZE + IWM_TX_DELIMITER_SIZE;
6557 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
6558 		len = roundup(len, 4) / 4;
6559 
6560 	val = htole16(sta_id << 12 | len);
6561 
6562 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6563 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6564 
6565 	/* Update TX scheduler. */
6566 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6567 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6568 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6569 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6570 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6571 }
6572 
6573 void
iwm_reset_sched(struct iwm_softc * sc,int qid,int idx,uint8_t sta_id)6574 iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6575 {
6576 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6577 	uint16_t val;
6578 
6579 	scd_bc_tbl = sc->sched_dma.vaddr;
6580 
6581 	val = htole16(1 | (sta_id << 12));
6582 
6583 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6584 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6585 
6586 	/* Update TX scheduler. */
6587 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6588 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6589 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6590 
6591 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6592 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6593 }
6594 
6595 /*
6596  * Fill in various bit for management frames, and leave them
6597  * unfilled for data frames (firmware takes care of that).
6598  * Return the selected legacy TX rate, or zero if HT/VHT is used.
6599  */
6600 uint8_t
iwm_tx_fill_cmd(struct iwm_softc * sc,struct iwm_node * in,struct ieee80211_frame * wh,struct iwm_tx_cmd * tx)6601 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6602     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
6603 {
6604 	struct ieee80211com *ic = &sc->sc_ic;
6605 	struct ieee80211_node *ni = &in->in_ni;
6606 	const struct iwm_rate *rinfo;
6607 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6608 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6609 	int ridx, rate_flags;
6610 	uint8_t rate = 0;
6611 
6612 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
6613 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
6614 
6615 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6616 	    type != IEEE80211_FC0_TYPE_DATA) {
6617 		/* for non-data, use the lowest supported rate */
6618 		ridx = min_ridx;
6619 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
6620 	} else if (ic->ic_fixed_mcs != -1) {
6621 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6622 			ridx = IWM_FIRST_OFDM_RATE;
6623 		else
6624 			ridx = sc->sc_fixed_ridx;
6625 	} else if (ic->ic_fixed_rate != -1) {
6626 		ridx = sc->sc_fixed_ridx;
6627  	} else {
6628 		int i;
6629 		/* Use firmware rateset retry table. */
6630 		tx->initial_rate_index = 0;
6631 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
6632 		if (ni->ni_flags & IEEE80211_NODE_HT) /* VHT implies HT */
6633 			return 0;
6634 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6635 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6636 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
6637 			if (iwm_rates[i].rate == (ni->ni_txrate &
6638 			    IEEE80211_RATE_VAL)) {
6639 				ridx = i;
6640 				break;
6641 			}
6642 		}
6643 		return iwm_rates[ridx].rate & 0xff;
6644 	}
6645 
6646 	rinfo = &iwm_rates[ridx];
6647 	if ((ni->ni_flags & IEEE80211_NODE_VHT) == 0 &&
6648 	    iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
6649 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
6650 	else
6651 		rate_flags = iwm_valid_siso_ant_rate_mask(sc);
6652 	if (IWM_RIDX_IS_CCK(ridx))
6653 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
6654 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6655 	    type == IEEE80211_FC0_TYPE_DATA &&
6656 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6657 		uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
6658 		uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
6659 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
6660 		    IEEE80211_CHAN_80MHZ_ALLOWED(ni->ni_chan) &&
6661 		    ieee80211_node_supports_vht_chan80(ni))
6662 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
6663 		else if (IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
6664 		    ieee80211_node_supports_ht_chan40(ni))
6665 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
6666 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6667 			rate_flags |= IWM_RATE_MCS_VHT_MSK;
6668 		else
6669 			rate_flags |= IWM_RATE_MCS_HT_MSK;
6670 		if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80 &&
6671 		    in->in_phyctxt != NULL &&
6672 		    in->in_phyctxt->vht_chan_width == vht_chan_width) {
6673 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_80;
6674 			if (ieee80211_node_supports_vht_sgi80(ni))
6675 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6676 		} else if ((sco == IEEE80211_HTOP0_SCO_SCA ||
6677 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
6678 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
6679 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_40;
6680 			if (ieee80211_node_supports_ht_sgi40(ni))
6681 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6682 		} else if (ieee80211_node_supports_ht_sgi20(ni))
6683 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
6684 		if (ni->ni_flags & IEEE80211_NODE_VHT) {
6685 			/*
6686 			 * ifmedia only provides an MCS index, no NSS.
6687 			 * Use a fixed SISO rate.
6688 			 */
6689 			tx->rate_n_flags = htole32(rate_flags |
6690 			    (ic->ic_fixed_mcs &
6691 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK));
6692 		} else
6693 			tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
6694 	} else
6695 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
6696 
6697 	return rate;
6698 }
6699 
6700 #define TB0_SIZE 16
6701 int
iwm_tx(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,int ac)6702 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6703 {
6704 	struct ieee80211com *ic = &sc->sc_ic;
6705 	struct iwm_node *in = (void *)ni;
6706 	struct iwm_tx_ring *ring;
6707 	struct iwm_tx_data *data;
6708 	struct iwm_tfd *desc;
6709 	struct iwm_device_cmd *cmd;
6710 	struct iwm_tx_cmd *tx;
6711 	struct ieee80211_frame *wh;
6712 	struct ieee80211_key *k = NULL;
6713 	uint8_t rate;
6714 	uint8_t *ivp;
6715 	uint32_t flags;
6716 	u_int hdrlen;
6717 	bus_dma_segment_t *seg;
6718 	uint8_t tid, type, subtype;
6719 	int i, totlen, err, pad;
6720 	int qid, hasqos;
6721 
6722 	wh = mtod(m, struct ieee80211_frame *);
6723 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6724 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6725 	if (type == IEEE80211_FC0_TYPE_CTL)
6726 		hdrlen = sizeof(struct ieee80211_frame_min);
6727 	else
6728 		hdrlen = ieee80211_get_hdrlen(wh);
6729 
6730 	hasqos = ieee80211_has_qos(wh);
6731 	if (type == IEEE80211_FC0_TYPE_DATA)
6732 		tid = IWM_TID_NON_QOS;
6733 	else
6734 		tid = IWM_MAX_TID_COUNT;
6735 
6736 	/*
6737 	 * Map EDCA categories to Tx data queues.
6738 	 *
6739 	 * We use static data queue assignments even in DQA mode. We do not
6740 	 * need to share Tx queues between stations because we only implement
6741 	 * client mode; the firmware's station table contains only one entry
6742 	 * which represents our access point.
6743 	 */
6744 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6745 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6746 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
6747 		else
6748 			qid = IWM_AUX_QUEUE;
6749 	} else if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6750 		qid = IWM_DQA_MIN_MGMT_QUEUE + ac;
6751 	else
6752 		qid = ac;
6753 
6754 	/* If possible, put this frame on an aggregation queue. */
6755 	if (hasqos) {
6756 		struct ieee80211_tx_ba *ba;
6757 		uint16_t qos = ieee80211_get_qos(wh);
6758 		int qostid = qos & IEEE80211_QOS_TID;
6759 		int agg_qid = IWM_FIRST_AGG_TX_QUEUE + qostid;
6760 
6761 		ba = &ni->ni_tx_ba[qostid];
6762 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6763 		    type == IEEE80211_FC0_TYPE_DATA &&
6764 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6765 		    (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6766 		    ba->ba_state == IEEE80211_BA_AGREED) {
6767 			qid = agg_qid;
6768 			tid = qostid;
6769 			ac = ieee80211_up_to_ac(ic, qostid);
6770 		}
6771 	}
6772 
6773 	ring = &sc->txq[qid];
6774 	desc = &ring->desc[ring->cur];
6775 	memset(desc, 0, sizeof(*desc));
6776 	data = &ring->data[ring->cur];
6777 
6778 	cmd = &ring->cmd[ring->cur];
6779 	cmd->hdr.code = IWM_TX_CMD;
6780 	cmd->hdr.flags = 0;
6781 	cmd->hdr.qid = ring->qid;
6782 	cmd->hdr.idx = ring->cur;
6783 
6784 	tx = (void *)cmd->data;
6785 	memset(tx, 0, sizeof(*tx));
6786 
6787 	rate = iwm_tx_fill_cmd(sc, in, wh, tx);
6788 
6789 #if NBPFILTER > 0
6790 	if (sc->sc_drvbpf != NULL) {
6791 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
6792 		uint16_t chan_flags;
6793 
6794 		tap->wt_flags = 0;
6795 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6796 		chan_flags = ni->ni_chan->ic_flags;
6797 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6798 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6799 			chan_flags &= ~IEEE80211_CHAN_HT;
6800 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6801 		}
6802 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6803 			chan_flags &= ~IEEE80211_CHAN_VHT;
6804 		tap->wt_chan_flags = htole16(chan_flags);
6805 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6806 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6807 		    type == IEEE80211_FC0_TYPE_DATA) {
6808 			tap->wt_rate = (0x80 | ni->ni_txmcs);
6809 		} else
6810 			tap->wt_rate = rate;
6811 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6812 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6813 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6814 
6815 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6816 		    m, BPF_DIRECTION_OUT);
6817 	}
6818 #endif
6819 	totlen = m->m_pkthdr.len;
6820 
6821 	if (ic->ic_opmode != IEEE80211_M_MONITOR &&
6822 	    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
6823 		k = ieee80211_get_txkey(ic, wh, ni);
6824 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6825 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
6826 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6827 				return ENOBUFS;
6828 			/* 802.11 header may have moved. */
6829 			wh = mtod(m, struct ieee80211_frame *);
6830 			totlen = m->m_pkthdr.len;
6831 			k = NULL; /* skip hardware crypto below */
6832 		} else {
6833 			/* HW appends CCMP MIC */
6834 			totlen += IEEE80211_CCMP_HDRLEN;
6835 		}
6836 	}
6837 
6838 	flags = 0;
6839 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
6840 		flags |= IWM_TX_CMD_FLG_ACK;
6841 	}
6842 
6843 	if (type == IEEE80211_FC0_TYPE_DATA &&
6844 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6845 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
6846 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
6847 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
6848 
6849 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6850 		tx->sta_id = IWM_MONITOR_STA_ID;
6851 	else
6852 		tx->sta_id = IWM_STATION_ID;
6853 
6854 	if (type == IEEE80211_FC0_TYPE_MGT) {
6855 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
6856 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
6857 			tx->pm_frame_timeout = htole16(3);
6858 		else
6859 			tx->pm_frame_timeout = htole16(2);
6860 	} else {
6861 		if (type == IEEE80211_FC0_TYPE_CTL &&
6862 		    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
6863 			struct ieee80211_frame_min *mwh;
6864 			uint8_t *barfrm;
6865 			uint16_t ctl;
6866 			mwh = mtod(m, struct ieee80211_frame_min *);
6867 			barfrm = (uint8_t *)&mwh[1];
6868 			ctl = LE_READ_2(barfrm);
6869 			tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
6870 			    IEEE80211_BA_TID_INFO_SHIFT;
6871 			flags |= IWM_TX_CMD_FLG_ACK | IWM_TX_CMD_FLG_BAR;
6872 			tx->data_retry_limit = IWM_BAR_DFAULT_RETRY_LIMIT;
6873 		}
6874 
6875 		tx->pm_frame_timeout = htole16(0);
6876 	}
6877 
6878 	if (hdrlen & 3) {
6879 		/* First segment length must be a multiple of 4. */
6880 		flags |= IWM_TX_CMD_FLG_MH_PAD;
6881 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
6882 		pad = 4 - (hdrlen & 3);
6883 	} else
6884 		pad = 0;
6885 
6886 	tx->len = htole16(totlen);
6887 	tx->tid_tspec = tid;
6888 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
6889 
6890 	/* Set physical address of "scratch area". */
6891 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
6892 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
6893 
6894 	/* Copy 802.11 header in TX command. */
6895 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
6896 
6897 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
6898 		/* Trim 802.11 header and prepend CCMP IV. */
6899 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
6900 		ivp = mtod(m, u_int8_t *);
6901 		k->k_tsc++;	/* increment the 48-bit PN */
6902 		ivp[0] = k->k_tsc; /* PN0 */
6903 		ivp[1] = k->k_tsc >> 8; /* PN1 */
6904 		ivp[2] = 0;        /* Rsvd */
6905 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
6906 		ivp[4] = k->k_tsc >> 16; /* PN2 */
6907 		ivp[5] = k->k_tsc >> 24; /* PN3 */
6908 		ivp[6] = k->k_tsc >> 32; /* PN4 */
6909 		ivp[7] = k->k_tsc >> 40; /* PN5 */
6910 
6911 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
6912 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
6913 		/* TX scheduler includes CCMP MIC length. */
6914 		totlen += IEEE80211_CCMP_MICLEN;
6915 	} else {
6916 		/* Trim 802.11 header. */
6917 		m_adj(m, hdrlen);
6918 		tx->sec_ctl = 0;
6919 	}
6920 
6921 	flags |= IWM_TX_CMD_FLG_BT_DIS;
6922 	if (!hasqos)
6923 		flags |= IWM_TX_CMD_FLG_SEQ_CTL;
6924 
6925 	tx->tx_flags |= htole32(flags);
6926 
6927 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6928 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6929 	if (err && err != EFBIG) {
6930 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6931 		m_freem(m);
6932 		return err;
6933 	}
6934 	if (err) {
6935 		/* Too many DMA segments, linearize mbuf. */
6936 		if (m_defrag(m, M_DONTWAIT)) {
6937 			m_freem(m);
6938 			return ENOBUFS;
6939 		}
6940 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6941 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6942 		if (err) {
6943 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6944 			    err);
6945 			m_freem(m);
6946 			return err;
6947 		}
6948 	}
6949 	data->m = m;
6950 	data->in = in;
6951 	data->txmcs = ni->ni_txmcs;
6952 	data->txrate = ni->ni_txrate;
6953 	data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
6954 	data->ampdu_txnss = ni->ni_vht_ss; /* updated upon Tx interrupt */
6955 
6956 	/* Fill TX descriptor. */
6957 	desc->num_tbs = 2 + data->map->dm_nsegs;
6958 
6959 	desc->tbs[0].lo = htole32(data->cmd_paddr);
6960 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6961 	    (TB0_SIZE << 4));
6962 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
6963 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6964 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
6965 	      + hdrlen + pad - TB0_SIZE) << 4));
6966 
6967 	/* Other DMA segments are for data payload. */
6968 	seg = data->map->dm_segs;
6969 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6970 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
6971 		desc->tbs[i+2].hi_n_len = \
6972 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
6973 		    | ((seg->ds_len) << 4));
6974 	}
6975 
6976 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6977 	    BUS_DMASYNC_PREWRITE);
6978 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6979 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6980 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6981 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6982 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6983 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6984 
6985 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
6986 
6987 	/* Kick TX ring. */
6988 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6989 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6990 
6991 	/* Mark TX ring as full if we reach a certain threshold. */
6992 	if (++ring->queued > IWM_TX_RING_HIMARK) {
6993 		sc->qfullmsk |= 1 << ring->qid;
6994 	}
6995 
6996 	if (ic->ic_if.if_flags & IFF_UP)
6997 		sc->sc_tx_timer[ring->qid] = 15;
6998 
6999 	return 0;
7000 }
7001 
7002 int
iwm_flush_tx_path(struct iwm_softc * sc,int tfd_queue_msk)7003 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
7004 {
7005 	struct iwm_tx_path_flush_cmd flush_cmd = {
7006 		.sta_id = htole32(IWM_STATION_ID),
7007 		.tid_mask = htole16(0xffff),
7008 	};
7009 	int err;
7010 
7011 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
7012 	    sizeof(flush_cmd), &flush_cmd);
7013 	if (err)
7014                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
7015 	return err;
7016 }
7017 
7018 #define IWM_FLUSH_WAIT_MS	2000
7019 
7020 int
iwm_wait_tx_queues_empty(struct iwm_softc * sc)7021 iwm_wait_tx_queues_empty(struct iwm_softc *sc)
7022 {
7023 	int i, err;
7024 
7025 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
7026 		struct iwm_tx_ring *ring = &sc->txq[i];
7027 
7028 		if (i == sc->cmdqid)
7029 			continue;
7030 
7031 		while (ring->queued > 0) {
7032 			err = tsleep_nsec(ring, 0, "iwmflush",
7033 			    MSEC_TO_NSEC(IWM_FLUSH_WAIT_MS));
7034 			if (err)
7035 				return err;
7036 		}
7037 	}
7038 
7039 	return 0;
7040 }
7041 
7042 void
iwm_led_enable(struct iwm_softc * sc)7043 iwm_led_enable(struct iwm_softc *sc)
7044 {
7045 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
7046 }
7047 
7048 void
iwm_led_disable(struct iwm_softc * sc)7049 iwm_led_disable(struct iwm_softc *sc)
7050 {
7051 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
7052 }
7053 
7054 int
iwm_led_is_enabled(struct iwm_softc * sc)7055 iwm_led_is_enabled(struct iwm_softc *sc)
7056 {
7057 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
7058 }
7059 
7060 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
7061 
7062 void
iwm_led_blink_timeout(void * arg)7063 iwm_led_blink_timeout(void *arg)
7064 {
7065 	struct iwm_softc *sc = arg;
7066 
7067 	if (iwm_led_is_enabled(sc))
7068 		iwm_led_disable(sc);
7069 	else
7070 		iwm_led_enable(sc);
7071 
7072 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7073 }
7074 
7075 void
iwm_led_blink_start(struct iwm_softc * sc)7076 iwm_led_blink_start(struct iwm_softc *sc)
7077 {
7078 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7079 	iwm_led_enable(sc);
7080 }
7081 
7082 void
iwm_led_blink_stop(struct iwm_softc * sc)7083 iwm_led_blink_stop(struct iwm_softc *sc)
7084 {
7085 	timeout_del(&sc->sc_led_blink_to);
7086 	iwm_led_disable(sc);
7087 }
7088 
7089 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
7090 
7091 int
iwm_beacon_filter_send_cmd(struct iwm_softc * sc,struct iwm_beacon_filter_cmd * cmd)7092 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
7093     struct iwm_beacon_filter_cmd *cmd)
7094 {
7095 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
7096 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
7097 }
7098 
7099 void
iwm_beacon_filter_set_cqm_params(struct iwm_softc * sc,struct iwm_node * in,struct iwm_beacon_filter_cmd * cmd)7100 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
7101     struct iwm_beacon_filter_cmd *cmd)
7102 {
7103 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
7104 }
7105 
7106 int
iwm_update_beacon_abort(struct iwm_softc * sc,struct iwm_node * in,int enable)7107 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
7108 {
7109 	struct iwm_beacon_filter_cmd cmd = {
7110 		IWM_BF_CMD_CONFIG_DEFAULTS,
7111 		.bf_enable_beacon_filter = htole32(1),
7112 		.ba_enable_beacon_abort = htole32(enable),
7113 	};
7114 
7115 	if (!sc->sc_bf.bf_enabled)
7116 		return 0;
7117 
7118 	sc->sc_bf.ba_enabled = enable;
7119 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7120 	return iwm_beacon_filter_send_cmd(sc, &cmd);
7121 }
7122 
7123 void
iwm_power_build_cmd(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_power_cmd * cmd)7124 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
7125     struct iwm_mac_power_cmd *cmd)
7126 {
7127 	struct ieee80211com *ic = &sc->sc_ic;
7128 	struct ieee80211_node *ni = &in->in_ni;
7129 	int dtim_period, dtim_msec, keep_alive;
7130 
7131 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7132 	    in->in_color));
7133 	if (ni->ni_dtimperiod)
7134 		dtim_period = ni->ni_dtimperiod;
7135 	else
7136 		dtim_period = 1;
7137 
7138 	/*
7139 	 * Regardless of power management state the driver must set
7140 	 * keep alive period. FW will use it for sending keep alive NDPs
7141 	 * immediately after association. Check that keep alive period
7142 	 * is at least 3 * DTIM.
7143 	 */
7144 	dtim_msec = dtim_period * ni->ni_intval;
7145 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
7146 	keep_alive = roundup(keep_alive, 1000) / 1000;
7147 	cmd->keep_alive_seconds = htole16(keep_alive);
7148 
7149 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7150 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7151 }
7152 
7153 int
iwm_power_mac_update_mode(struct iwm_softc * sc,struct iwm_node * in)7154 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
7155 {
7156 	int err;
7157 	int ba_enable;
7158 	struct iwm_mac_power_cmd cmd;
7159 
7160 	memset(&cmd, 0, sizeof(cmd));
7161 
7162 	iwm_power_build_cmd(sc, in, &cmd);
7163 
7164 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
7165 	    sizeof(cmd), &cmd);
7166 	if (err != 0)
7167 		return err;
7168 
7169 	ba_enable = !!(cmd.flags &
7170 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
7171 	return iwm_update_beacon_abort(sc, in, ba_enable);
7172 }
7173 
7174 int
iwm_power_update_device(struct iwm_softc * sc)7175 iwm_power_update_device(struct iwm_softc *sc)
7176 {
7177 	struct iwm_device_power_cmd cmd = { };
7178 	struct ieee80211com *ic = &sc->sc_ic;
7179 
7180 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7181 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7182 
7183 	return iwm_send_cmd_pdu(sc,
7184 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
7185 }
7186 
7187 int
iwm_enable_beacon_filter(struct iwm_softc * sc,struct iwm_node * in)7188 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
7189 {
7190 	struct iwm_beacon_filter_cmd cmd = {
7191 		IWM_BF_CMD_CONFIG_DEFAULTS,
7192 		.bf_enable_beacon_filter = htole32(1),
7193 	};
7194 	int err;
7195 
7196 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7197 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7198 
7199 	if (err == 0)
7200 		sc->sc_bf.bf_enabled = 1;
7201 
7202 	return err;
7203 }
7204 
7205 int
iwm_disable_beacon_filter(struct iwm_softc * sc)7206 iwm_disable_beacon_filter(struct iwm_softc *sc)
7207 {
7208 	struct iwm_beacon_filter_cmd cmd;
7209 	int err;
7210 
7211 	memset(&cmd, 0, sizeof(cmd));
7212 
7213 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7214 	if (err == 0)
7215 		sc->sc_bf.bf_enabled = 0;
7216 
7217 	return err;
7218 }
7219 
7220 int
iwm_add_sta_cmd(struct iwm_softc * sc,struct iwm_node * in,int update)7221 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
7222 {
7223 	struct iwm_add_sta_cmd add_sta_cmd;
7224 	int err;
7225 	uint32_t status, aggsize;
7226 	const uint32_t max_aggsize = (IWM_STA_FLG_MAX_AGG_SIZE_64K >>
7227 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT);
7228 	size_t cmdsize;
7229 	struct ieee80211com *ic = &sc->sc_ic;
7230 
7231 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
7232 		panic("STA already added");
7233 
7234 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
7235 
7236 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7237 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7238 	else
7239 		add_sta_cmd.sta_id = IWM_STATION_ID;
7240 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
7241 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7242 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
7243 		else
7244 			add_sta_cmd.station_type = IWM_STA_LINK;
7245 	}
7246 	add_sta_cmd.mac_id_n_color
7247 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
7248 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7249 		int qid;
7250 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
7251 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7252 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
7253 		else
7254 			qid = IWM_AUX_QUEUE;
7255 		in->tfd_queue_msk |= (1 << qid);
7256 	} else {
7257 		int ac;
7258 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
7259 			int qid = ac;
7260 			if (isset(sc->sc_enabled_capa,
7261 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7262 				qid += IWM_DQA_MIN_MGMT_QUEUE;
7263 			in->tfd_queue_msk |= (1 << qid);
7264 		}
7265 	}
7266 	if (!update) {
7267 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7268 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7269 			    etherbroadcastaddr);
7270 		else
7271 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7272 			    in->in_macaddr);
7273 	}
7274 	add_sta_cmd.add_modify = update ? 1 : 0;
7275 	add_sta_cmd.station_flags_msk
7276 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
7277 	if (update) {
7278 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES |
7279 		    IWM_STA_MODIFY_TID_DISABLE_TX);
7280 	}
7281 	add_sta_cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
7282 	add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk);
7283 
7284 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
7285 		add_sta_cmd.station_flags_msk
7286 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
7287 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
7288 
7289 		if (iwm_mimo_enabled(sc)) {
7290 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7291 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
7292 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
7293 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
7294 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
7295 					add_sta_cmd.station_flags |=
7296 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7297 				}
7298 			} else {
7299 				if (in->in_ni.ni_rxmcs[1] != 0) {
7300 					add_sta_cmd.station_flags |=
7301 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7302 				}
7303 				if (in->in_ni.ni_rxmcs[2] != 0) {
7304 					add_sta_cmd.station_flags |=
7305 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
7306 				}
7307 			}
7308 		}
7309 
7310 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
7311 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
7312 			add_sta_cmd.station_flags |= htole32(
7313 			    IWM_STA_FLG_FAT_EN_40MHZ);
7314 		}
7315 
7316 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7317 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
7318 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
7319 				add_sta_cmd.station_flags |= htole32(
7320 				    IWM_STA_FLG_FAT_EN_80MHZ);
7321 			}
7322 			aggsize = (in->in_ni.ni_vhtcaps &
7323 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
7324 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
7325 		} else {
7326 			aggsize = (in->in_ni.ni_ampdu_param &
7327 			    IEEE80211_AMPDU_PARAM_LE);
7328 		}
7329 		if (aggsize > max_aggsize)
7330 			aggsize = max_aggsize;
7331 		add_sta_cmd.station_flags |= htole32((aggsize <<
7332 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT) &
7333 		    IWM_STA_FLG_MAX_AGG_SIZE_MSK);
7334 
7335 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
7336 		case IEEE80211_AMPDU_PARAM_SS_2:
7337 			add_sta_cmd.station_flags
7338 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
7339 			break;
7340 		case IEEE80211_AMPDU_PARAM_SS_4:
7341 			add_sta_cmd.station_flags
7342 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
7343 			break;
7344 		case IEEE80211_AMPDU_PARAM_SS_8:
7345 			add_sta_cmd.station_flags
7346 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
7347 			break;
7348 		case IEEE80211_AMPDU_PARAM_SS_16:
7349 			add_sta_cmd.station_flags
7350 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
7351 			break;
7352 		default:
7353 			break;
7354 		}
7355 	}
7356 
7357 	status = IWM_ADD_STA_SUCCESS;
7358 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7359 		cmdsize = sizeof(add_sta_cmd);
7360 	else
7361 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7362 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
7363 	    &add_sta_cmd, &status);
7364 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7365 		err = EIO;
7366 
7367 	return err;
7368 }
7369 
7370 int
iwm_add_aux_sta(struct iwm_softc * sc)7371 iwm_add_aux_sta(struct iwm_softc *sc)
7372 {
7373 	struct iwm_add_sta_cmd cmd;
7374 	int err, qid;
7375 	uint32_t status;
7376 	size_t cmdsize;
7377 
7378 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7379 		qid = IWM_DQA_AUX_QUEUE;
7380 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
7381 		    IWM_TX_FIFO_MCAST, 0, IWM_MAX_TID_COUNT, 0);
7382 	} else {
7383 		qid = IWM_AUX_QUEUE;
7384 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
7385 	}
7386 	if (err)
7387 		return err;
7388 
7389 	memset(&cmd, 0, sizeof(cmd));
7390 	cmd.sta_id = IWM_AUX_STA_ID;
7391 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7392 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
7393 	cmd.mac_id_n_color =
7394 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
7395 	cmd.tfd_queue_msk = htole32(1 << qid);
7396 	cmd.tid_disable_tx = htole16(0xffff);
7397 
7398 	status = IWM_ADD_STA_SUCCESS;
7399 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7400 		cmdsize = sizeof(cmd);
7401 	else
7402 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7403 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
7404 	    &status);
7405 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7406 		err = EIO;
7407 
7408 	return err;
7409 }
7410 
7411 int
iwm_drain_sta(struct iwm_softc * sc,struct iwm_node * in,int drain)7412 iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
7413 {
7414 	struct iwm_add_sta_cmd cmd;
7415 	int err;
7416 	uint32_t status;
7417 	size_t cmdsize;
7418 
7419 	memset(&cmd, 0, sizeof(cmd));
7420 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7421 	    in->in_color));
7422 	cmd.sta_id = IWM_STATION_ID;
7423 	cmd.add_modify = IWM_STA_MODE_MODIFY;
7424 	cmd.station_flags = drain ? htole32(IWM_STA_FLG_DRAIN_FLOW) : 0;
7425 	cmd.station_flags_msk = htole32(IWM_STA_FLG_DRAIN_FLOW);
7426 
7427 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7428 		cmdsize = sizeof(cmd);
7429 	else
7430 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7431 
7432 	status = IWM_ADD_STA_SUCCESS;
7433 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA,
7434 	    cmdsize, &cmd, &status);
7435 	if (err) {
7436 		printf("%s: could not update sta (error %d)\n",
7437 		    DEVNAME(sc), err);
7438 		return err;
7439 	}
7440 
7441 	switch (status & IWM_ADD_STA_STATUS_MASK) {
7442 	case IWM_ADD_STA_SUCCESS:
7443 		break;
7444 	default:
7445 		err = EIO;
7446 		printf("%s: Couldn't %s draining for station\n",
7447 		    DEVNAME(sc), drain ? "enable" : "disable");
7448 		break;
7449 	}
7450 
7451 	return err;
7452 }
7453 
7454 int
iwm_flush_sta(struct iwm_softc * sc,struct iwm_node * in)7455 iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
7456 {
7457 	int err;
7458 
7459 	sc->sc_flags |= IWM_FLAG_TXFLUSH;
7460 
7461 	err = iwm_drain_sta(sc, in, 1);
7462 	if (err)
7463 		goto done;
7464 
7465 	err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
7466 	if (err) {
7467 		printf("%s: could not flush Tx path (error %d)\n",
7468 		    DEVNAME(sc), err);
7469 		goto done;
7470 	}
7471 
7472 	/*
7473 	 * Flushing Tx rings may fail if the AP has disappeared.
7474 	 * We can rely on iwm_newstate_task() to reset everything and begin
7475 	 * scanning again if we are left with outstanding frames on queues.
7476 	 */
7477 	err = iwm_wait_tx_queues_empty(sc);
7478 	if (err)
7479 		goto done;
7480 
7481 	err = iwm_drain_sta(sc, in, 0);
7482 done:
7483 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
7484 	return err;
7485 }
7486 
7487 int
iwm_rm_sta_cmd(struct iwm_softc * sc,struct iwm_node * in)7488 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
7489 {
7490 	struct ieee80211com *ic = &sc->sc_ic;
7491 	struct iwm_rm_sta_cmd rm_sta_cmd;
7492 	int err;
7493 
7494 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
7495 		panic("sta already removed");
7496 
7497 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
7498 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7499 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7500 	else
7501 		rm_sta_cmd.sta_id = IWM_STATION_ID;
7502 
7503 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
7504 	    &rm_sta_cmd);
7505 
7506 	return err;
7507 }
7508 
7509 uint16_t
iwm_scan_rx_chain(struct iwm_softc * sc)7510 iwm_scan_rx_chain(struct iwm_softc *sc)
7511 {
7512 	uint16_t rx_chain;
7513 	uint8_t rx_ant;
7514 
7515 	rx_ant = iwm_fw_valid_rx_ant(sc);
7516 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
7517 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
7518 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
7519 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
7520 	return htole16(rx_chain);
7521 }
7522 
7523 uint32_t
iwm_scan_rate_n_flags(struct iwm_softc * sc,int flags,int no_cck)7524 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
7525 {
7526 	uint32_t tx_ant;
7527 	int i, ind;
7528 
7529 	for (i = 0, ind = sc->sc_scan_last_antenna;
7530 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
7531 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
7532 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
7533 			sc->sc_scan_last_antenna = ind;
7534 			break;
7535 		}
7536 	}
7537 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
7538 
7539 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
7540 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
7541 				   tx_ant);
7542 	else
7543 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
7544 }
7545 
7546 uint8_t
iwm_lmac_scan_fill_channels(struct iwm_softc * sc,struct iwm_scan_channel_cfg_lmac * chan,int n_ssids,int bgscan)7547 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
7548     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
7549 {
7550 	struct ieee80211com *ic = &sc->sc_ic;
7551 	struct ieee80211_channel *c;
7552 	uint8_t nchan;
7553 
7554 	for (nchan = 0, c = &ic->ic_channels[1];
7555 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7556 	    nchan < sc->sc_capa_n_scan_channels;
7557 	    c++) {
7558 		if (c->ic_flags == 0)
7559 			continue;
7560 
7561 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
7562 		chan->iter_count = htole16(1);
7563 		chan->iter_interval = 0;
7564 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
7565 		if (n_ssids != 0 && !bgscan)
7566 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
7567 		chan++;
7568 		nchan++;
7569 	}
7570 
7571 	return nchan;
7572 }
7573 
7574 uint8_t
iwm_umac_scan_fill_channels(struct iwm_softc * sc,struct iwm_scan_channel_cfg_umac * chan,int n_ssids,int bgscan)7575 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
7576     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
7577 {
7578 	struct ieee80211com *ic = &sc->sc_ic;
7579 	struct ieee80211_channel *c;
7580 	uint8_t nchan;
7581 
7582 	for (nchan = 0, c = &ic->ic_channels[1];
7583 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7584 	    nchan < sc->sc_capa_n_scan_channels;
7585 	    c++) {
7586 		if (c->ic_flags == 0)
7587 			continue;
7588 
7589 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
7590 		chan->iter_count = 1;
7591 		chan->iter_interval = htole16(0);
7592 		if (n_ssids != 0 && !bgscan)
7593 			chan->flags = htole32(1 << 0); /* select SSID 0 */
7594 		chan++;
7595 		nchan++;
7596 	}
7597 
7598 	return nchan;
7599 }
7600 
7601 int
iwm_fill_probe_req_v1(struct iwm_softc * sc,struct iwm_scan_probe_req_v1 * preq1)7602 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
7603 {
7604 	struct iwm_scan_probe_req preq2;
7605 	int err, i;
7606 
7607 	err = iwm_fill_probe_req(sc, &preq2);
7608 	if (err)
7609 		return err;
7610 
7611 	preq1->mac_header = preq2.mac_header;
7612 	for (i = 0; i < nitems(preq1->band_data); i++)
7613 		preq1->band_data[i] = preq2.band_data[i];
7614 	preq1->common_data = preq2.common_data;
7615 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
7616 	return 0;
7617 }
7618 
7619 int
iwm_fill_probe_req(struct iwm_softc * sc,struct iwm_scan_probe_req * preq)7620 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7621 {
7622 	struct ieee80211com *ic = &sc->sc_ic;
7623 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7624 	struct ieee80211_rateset *rs;
7625 	size_t remain = sizeof(preq->buf);
7626 	uint8_t *frm, *pos;
7627 
7628 	memset(preq, 0, sizeof(*preq));
7629 
7630 	if (remain < sizeof(*wh) + 2)
7631 		return ENOBUFS;
7632 
7633 	/*
7634 	 * Build a probe request frame.  Most of the following code is a
7635 	 * copy & paste of what is done in net80211.
7636 	 */
7637 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
7638 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
7639 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
7640 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
7641 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
7642 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
7643 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
7644 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
7645 
7646 	frm = (uint8_t *)(wh + 1);
7647 
7648 	*frm++ = IEEE80211_ELEMID_SSID;
7649 	*frm++ = 0;
7650 	/* hardware inserts SSID */
7651 
7652 	/* Tell firmware where the MAC header and SSID IE are. */
7653 	preq->mac_header.offset = 0;
7654 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
7655 	remain -= frm - (uint8_t *)wh;
7656 
7657 	/* Fill in 2GHz IEs and tell firmware where they are. */
7658 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7659 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7660 		if (remain < 4 + rs->rs_nrates)
7661 			return ENOBUFS;
7662 	} else if (remain < 2 + rs->rs_nrates)
7663 		return ENOBUFS;
7664 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
7665 	pos = frm;
7666 	frm = ieee80211_add_rates(frm, rs);
7667 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7668 		frm = ieee80211_add_xrates(frm, rs);
7669 	remain -= frm - pos;
7670 
7671 	if (isset(sc->sc_enabled_capa,
7672 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
7673 		if (remain < 3)
7674 			return ENOBUFS;
7675 		*frm++ = IEEE80211_ELEMID_DSPARMS;
7676 		*frm++ = 1;
7677 		*frm++ = 0;
7678 		remain -= 3;
7679 	}
7680 	preq->band_data[0].len = htole16(frm - pos);
7681 
7682 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7683 		/* Fill in 5GHz IEs. */
7684 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7685 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7686 			if (remain < 4 + rs->rs_nrates)
7687 				return ENOBUFS;
7688 		} else if (remain < 2 + rs->rs_nrates)
7689 			return ENOBUFS;
7690 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
7691 		pos = frm;
7692 		frm = ieee80211_add_rates(frm, rs);
7693 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7694 			frm = ieee80211_add_xrates(frm, rs);
7695 		preq->band_data[1].len = htole16(frm - pos);
7696 		remain -= frm - pos;
7697 		if (ic->ic_flags & IEEE80211_F_VHTON) {
7698 			if (remain < 14)
7699 				return ENOBUFS;
7700 			frm = ieee80211_add_vhtcaps(frm, ic);
7701 			remain -= frm - pos;
7702 			preq->band_data[1].len = htole16(frm - pos);
7703 		}
7704 	}
7705 
7706 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
7707 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
7708 	pos = frm;
7709 	if (ic->ic_flags & IEEE80211_F_HTON) {
7710 		if (remain < 28)
7711 			return ENOBUFS;
7712 		frm = ieee80211_add_htcaps(frm, ic);
7713 		/* XXX add WME info? */
7714 		remain -= frm - pos;
7715 	}
7716 
7717 	preq->common_data.len = htole16(frm - pos);
7718 
7719 	return 0;
7720 }
7721 
7722 int
iwm_lmac_scan(struct iwm_softc * sc,int bgscan)7723 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7724 {
7725 	struct ieee80211com *ic = &sc->sc_ic;
7726 	struct iwm_host_cmd hcmd = {
7727 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
7728 		.len = { 0, },
7729 		.data = { NULL, },
7730 		.flags = 0,
7731 	};
7732 	struct iwm_scan_req_lmac *req;
7733 	struct iwm_scan_probe_req_v1 *preq;
7734 	size_t req_len;
7735 	int err, async = bgscan;
7736 
7737 	req_len = sizeof(struct iwm_scan_req_lmac) +
7738 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7739 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7740 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7741 		return ENOMEM;
7742 	req = malloc(req_len, M_DEVBUF,
7743 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7744 	if (req == NULL)
7745 		return ENOMEM;
7746 
7747 	hcmd.len[0] = (uint16_t)req_len;
7748 	hcmd.data[0] = (void *)req;
7749 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7750 
7751 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7752 	req->active_dwell = 10;
7753 	req->passive_dwell = 110;
7754 	req->fragmented_dwell = 44;
7755 	req->extended_dwell = 90;
7756 	if (bgscan) {
7757 		req->max_out_time = htole32(120);
7758 		req->suspend_time = htole32(120);
7759 	} else {
7760 		req->max_out_time = htole32(0);
7761 		req->suspend_time = htole32(0);
7762 	}
7763 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
7764 	req->rx_chain_select = iwm_scan_rx_chain(sc);
7765 	req->iter_num = htole32(1);
7766 	req->delay = 0;
7767 
7768 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
7769 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
7770 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
7771 	if (ic->ic_des_esslen == 0)
7772 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
7773 	else
7774 		req->scan_flags |=
7775 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
7776 	if (isset(sc->sc_enabled_capa,
7777 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
7778 	    isset(sc->sc_enabled_capa,
7779 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
7780 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
7781 
7782 	req->flags = htole32(IWM_PHY_BAND_24);
7783 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7784 		req->flags |= htole32(IWM_PHY_BAND_5);
7785 	req->filter_flags =
7786 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
7787 
7788 	/* Tx flags 2 GHz. */
7789 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7790 	    IWM_TX_CMD_FLG_BT_DIS);
7791 	req->tx_cmd[0].rate_n_flags =
7792 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
7793 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
7794 
7795 	/* Tx flags 5 GHz. */
7796 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7797 	    IWM_TX_CMD_FLG_BT_DIS);
7798 	req->tx_cmd[1].rate_n_flags =
7799 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
7800 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
7801 
7802 	/* Check if we're doing an active directed scan. */
7803 	if (ic->ic_des_esslen != 0) {
7804 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7805 		req->direct_scan[0].len = ic->ic_des_esslen;
7806 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
7807 		    ic->ic_des_esslen);
7808 	}
7809 
7810 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
7811 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
7812 	    ic->ic_des_esslen != 0, bgscan);
7813 
7814 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
7815 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7816 	    sc->sc_capa_n_scan_channels));
7817 	err = iwm_fill_probe_req_v1(sc, preq);
7818 	if (err) {
7819 		free(req, M_DEVBUF, req_len);
7820 		return err;
7821 	}
7822 
7823 	/* Specify the scan plan: We'll do one iteration. */
7824 	req->schedule[0].iterations = 1;
7825 	req->schedule[0].full_scan_mul = 1;
7826 
7827 	/* Disable EBS. */
7828 	req->channel_opt[0].non_ebs_ratio = 1;
7829 	req->channel_opt[1].non_ebs_ratio = 1;
7830 
7831 	err = iwm_send_cmd(sc, &hcmd);
7832 	free(req, M_DEVBUF, req_len);
7833 	return err;
7834 }
7835 
7836 int
iwm_config_umac_scan(struct iwm_softc * sc)7837 iwm_config_umac_scan(struct iwm_softc *sc)
7838 {
7839 	struct ieee80211com *ic = &sc->sc_ic;
7840 	struct iwm_scan_config *scan_config;
7841 	int err, nchan;
7842 	size_t cmd_size;
7843 	struct ieee80211_channel *c;
7844 	struct iwm_host_cmd hcmd = {
7845 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
7846 		.flags = 0,
7847 	};
7848 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
7849 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
7850 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
7851 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
7852 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
7853 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
7854 	    IWM_SCAN_CONFIG_RATE_54M);
7855 
7856 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7857 
7858 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
7859 	if (scan_config == NULL)
7860 		return ENOMEM;
7861 
7862 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
7863 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
7864 	scan_config->legacy_rates = htole32(rates |
7865 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
7866 
7867 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7868 	scan_config->dwell_active = 10;
7869 	scan_config->dwell_passive = 110;
7870 	scan_config->dwell_fragmented = 44;
7871 	scan_config->dwell_extended = 90;
7872 	scan_config->out_of_channel_time = htole32(0);
7873 	scan_config->suspend_time = htole32(0);
7874 
7875 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
7876 
7877 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
7878 	scan_config->channel_flags = 0;
7879 
7880 	for (c = &ic->ic_channels[1], nchan = 0;
7881 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7882 	    nchan < sc->sc_capa_n_scan_channels; c++) {
7883 		if (c->ic_flags == 0)
7884 			continue;
7885 		scan_config->channel_array[nchan++] =
7886 		    ieee80211_mhz2ieee(c->ic_freq, 0);
7887 	}
7888 
7889 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
7890 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
7891 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
7892 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
7893 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
7894 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
7895 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
7896 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
7897 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
7898 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
7899 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
7900 
7901 	hcmd.data[0] = scan_config;
7902 	hcmd.len[0] = cmd_size;
7903 
7904 	err = iwm_send_cmd(sc, &hcmd);
7905 	free(scan_config, M_DEVBUF, cmd_size);
7906 	return err;
7907 }
7908 
7909 int
iwm_umac_scan_size(struct iwm_softc * sc)7910 iwm_umac_scan_size(struct iwm_softc *sc)
7911 {
7912 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
7913 	int tail_size;
7914 
7915 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7916 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
7917 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7918 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
7919 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7920 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
7921 	else
7922 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
7923 
7924 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
7925 	    sc->sc_capa_n_scan_channels + tail_size;
7926 }
7927 
7928 struct iwm_scan_umac_chan_param *
iwm_get_scan_req_umac_chan_param(struct iwm_softc * sc,struct iwm_scan_req_umac * req)7929 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7930     struct iwm_scan_req_umac *req)
7931 {
7932 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7933 		return &req->v8.channel;
7934 
7935 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7936 		return &req->v7.channel;
7937 
7938 	return &req->v1.channel;
7939 }
7940 
7941 void *
iwm_get_scan_req_umac_data(struct iwm_softc * sc,struct iwm_scan_req_umac * req)7942 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7943 {
7944 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7945 		return (void *)&req->v8.data;
7946 
7947 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7948 		return (void *)&req->v7.data;
7949 
7950 	return (void *)&req->v1.data;
7951 
7952 }
7953 
7954 /* adaptive dwell max budget time [TU] for full scan */
7955 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7956 /* adaptive dwell max budget time [TU] for directed scan */
7957 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7958 /* adaptive dwell default high band APs number */
7959 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7960 /* adaptive dwell default low band APs number */
7961 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7962 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7963 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7964 
7965 int
iwm_umac_scan(struct iwm_softc * sc,int bgscan)7966 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7967 {
7968 	struct ieee80211com *ic = &sc->sc_ic;
7969 	struct iwm_host_cmd hcmd = {
7970 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
7971 		.len = { 0, },
7972 		.data = { NULL, },
7973 		.flags = 0,
7974 	};
7975 	struct iwm_scan_req_umac *req;
7976 	void *cmd_data, *tail_data;
7977 	struct iwm_scan_req_umac_tail_v2 *tail;
7978 	struct iwm_scan_req_umac_tail_v1 *tailv1;
7979 	struct iwm_scan_umac_chan_param *chanparam;
7980 	size_t req_len;
7981 	int err, async = bgscan;
7982 
7983 	req_len = iwm_umac_scan_size(sc);
7984 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
7985 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
7986 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7987 		return ERANGE;
7988 	req = malloc(req_len, M_DEVBUF,
7989 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7990 	if (req == NULL)
7991 		return ENOMEM;
7992 
7993 	hcmd.len[0] = (uint16_t)req_len;
7994 	hcmd.data[0] = (void *)req;
7995 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7996 
7997 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
7998 		req->v7.adwell_default_n_aps_social =
7999 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
8000 		req->v7.adwell_default_n_aps =
8001 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
8002 
8003 		if (ic->ic_des_esslen != 0)
8004 			req->v7.adwell_max_budget =
8005 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
8006 		else
8007 			req->v7.adwell_max_budget =
8008 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
8009 
8010 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8011 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
8012 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
8013 
8014 		if (isset(sc->sc_ucode_api,
8015 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8016 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
8017 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
8018 		} else {
8019 			req->v7.active_dwell = 10;
8020 			req->v7.passive_dwell = 110;
8021 			req->v7.fragmented_dwell = 44;
8022 		}
8023 	} else {
8024 		/* These timings correspond to iwlwifi's UNASSOC scan. */
8025 		req->v1.active_dwell = 10;
8026 		req->v1.passive_dwell = 110;
8027 		req->v1.fragmented_dwell = 44;
8028 		req->v1.extended_dwell = 90;
8029 
8030 		req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8031 	}
8032 
8033 	if (bgscan) {
8034 		const uint32_t timeout = htole32(120);
8035 		if (isset(sc->sc_ucode_api,
8036 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8037 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8038 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8039 		} else if (isset(sc->sc_ucode_api,
8040 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8041 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8042 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8043 		} else {
8044 			req->v1.max_out_time = timeout;
8045 			req->v1.suspend_time = timeout;
8046 		}
8047 	}
8048 
8049 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8050 
8051 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
8052 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
8053 	chanparam->count = iwm_umac_scan_fill_channels(sc,
8054 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
8055 	    ic->ic_des_esslen != 0, bgscan);
8056 	chanparam->flags = 0;
8057 
8058 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
8059 	    sc->sc_capa_n_scan_channels;
8060 	tail = tail_data;
8061 	/* tail v1 layout differs in preq and direct_scan member fields. */
8062 	tailv1 = tail_data;
8063 
8064 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
8065 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
8066 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8067 		req->v8.general_flags2 =
8068 			IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
8069 	}
8070 
8071 	if (ic->ic_des_esslen != 0) {
8072 		if (isset(sc->sc_ucode_api,
8073 		    IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
8074 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8075 			tail->direct_scan[0].len = ic->ic_des_esslen;
8076 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
8077 			    ic->ic_des_esslen);
8078 		} else {
8079 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8080 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
8081 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
8082 			    ic->ic_des_esslen);
8083 		}
8084 		req->general_flags |=
8085 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
8086 	} else
8087 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
8088 
8089 	if (isset(sc->sc_enabled_capa,
8090 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
8091 	    isset(sc->sc_enabled_capa,
8092 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
8093 		req->general_flags |=
8094 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
8095 
8096 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8097 		req->general_flags |=
8098 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
8099 	} else {
8100 		req->general_flags |=
8101 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
8102 	}
8103 
8104 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
8105 		err = iwm_fill_probe_req(sc, &tail->preq);
8106 	else
8107 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
8108 	if (err) {
8109 		free(req, M_DEVBUF, req_len);
8110 		return err;
8111 	}
8112 
8113 	/* Specify the scan plan: We'll do one iteration. */
8114 	tail->schedule[0].interval = 0;
8115 	tail->schedule[0].iter_count = 1;
8116 
8117 	err = iwm_send_cmd(sc, &hcmd);
8118 	free(req, M_DEVBUF, req_len);
8119 	return err;
8120 }
8121 
8122 void
iwm_mcc_update(struct iwm_softc * sc,struct iwm_mcc_chub_notif * notif)8123 iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
8124 {
8125 	struct ieee80211com *ic = &sc->sc_ic;
8126 	struct ifnet *ifp = IC2IFP(ic);
8127 	char alpha2[3];
8128 
8129 	snprintf(alpha2, sizeof(alpha2), "%c%c",
8130 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
8131 
8132 	if (ifp->if_flags & IFF_DEBUG) {
8133 		printf("%s: firmware has detected regulatory domain '%s' "
8134 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
8135 	}
8136 
8137 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
8138 }
8139 
8140 uint8_t
iwm_ridx2rate(struct ieee80211_rateset * rs,int ridx)8141 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
8142 {
8143 	int i;
8144 	uint8_t rval;
8145 
8146 	for (i = 0; i < rs->rs_nrates; i++) {
8147 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
8148 		if (rval == iwm_rates[ridx].rate)
8149 			return rs->rs_rates[i];
8150 	}
8151 
8152 	return 0;
8153 }
8154 
8155 int
iwm_rval2ridx(int rval)8156 iwm_rval2ridx(int rval)
8157 {
8158 	int ridx;
8159 
8160 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
8161 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
8162 			continue;
8163 		if (rval == iwm_rates[ridx].rate)
8164 			break;
8165 	}
8166 
8167 	return ridx;
8168 }
8169 
8170 void
iwm_ack_rates(struct iwm_softc * sc,struct iwm_node * in,int * cck_rates,int * ofdm_rates)8171 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
8172     int *ofdm_rates)
8173 {
8174 	struct ieee80211_node *ni = &in->in_ni;
8175 	struct ieee80211_rateset *rs = &ni->ni_rates;
8176 	int lowest_present_ofdm = -1;
8177 	int lowest_present_cck = -1;
8178 	uint8_t cck = 0;
8179 	uint8_t ofdm = 0;
8180 	int i;
8181 
8182 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
8183 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
8184 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
8185 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8186 				continue;
8187 			cck |= (1 << i);
8188 			if (lowest_present_cck == -1 || lowest_present_cck > i)
8189 				lowest_present_cck = i;
8190 		}
8191 	}
8192 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
8193 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8194 			continue;
8195 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
8196 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
8197 			lowest_present_ofdm = i;
8198 	}
8199 
8200 	/*
8201 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
8202 	 * variables. This isn't sufficient though, as there might not
8203 	 * be all the right rates in the bitmap. E.g. if the only basic
8204 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
8205 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
8206 	 *
8207 	 *    [...] a STA responding to a received frame shall transmit
8208 	 *    its Control Response frame [...] at the highest rate in the
8209 	 *    BSSBasicRateSet parameter that is less than or equal to the
8210 	 *    rate of the immediately previous frame in the frame exchange
8211 	 *    sequence ([...]) and that is of the same modulation class
8212 	 *    ([...]) as the received frame. If no rate contained in the
8213 	 *    BSSBasicRateSet parameter meets these conditions, then the
8214 	 *    control frame sent in response to a received frame shall be
8215 	 *    transmitted at the highest mandatory rate of the PHY that is
8216 	 *    less than or equal to the rate of the received frame, and
8217 	 *    that is of the same modulation class as the received frame.
8218 	 *
8219 	 * As a consequence, we need to add all mandatory rates that are
8220 	 * lower than all of the basic rates to these bitmaps.
8221 	 */
8222 
8223 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
8224 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
8225 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
8226 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
8227 	/* 6M already there or needed so always add */
8228 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
8229 
8230 	/*
8231 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
8232 	 * Note, however:
8233 	 *  - if no CCK rates are basic, it must be ERP since there must
8234 	 *    be some basic rates at all, so they're OFDM => ERP PHY
8235 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
8236 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
8237 	 *  - if 5.5M is basic, 1M and 2M are mandatory
8238 	 *  - if 2M is basic, 1M is mandatory
8239 	 *  - if 1M is basic, that's the only valid ACK rate.
8240 	 * As a consequence, it's not as complicated as it sounds, just add
8241 	 * any lower rates to the ACK rate bitmap.
8242 	 */
8243 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
8244 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
8245 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
8246 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
8247 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
8248 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
8249 	/* 1M already there or needed so always add */
8250 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
8251 
8252 	*cck_rates = cck;
8253 	*ofdm_rates = ofdm;
8254 }
8255 
8256 void
iwm_mac_ctxt_cmd_common(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_ctx_cmd * cmd,uint32_t action)8257 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
8258     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
8259 {
8260 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
8261 	struct ieee80211com *ic = &sc->sc_ic;
8262 	struct ieee80211_node *ni = ic->ic_bss;
8263 	int cck_ack_rates, ofdm_ack_rates;
8264 	int i;
8265 
8266 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
8267 	    in->in_color));
8268 	cmd->action = htole32(action);
8269 
8270 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8271 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
8272 	else if (ic->ic_opmode == IEEE80211_M_STA)
8273 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
8274 	else
8275 		panic("unsupported operating mode %d", ic->ic_opmode);
8276 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
8277 
8278 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
8279 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8280 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
8281 		return;
8282 	}
8283 
8284 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
8285 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
8286 	cmd->cck_rates = htole32(cck_ack_rates);
8287 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
8288 
8289 	cmd->cck_short_preamble
8290 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
8291 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
8292 	cmd->short_slot
8293 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
8294 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
8295 
8296 	for (i = 0; i < EDCA_NUM_AC; i++) {
8297 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
8298 		int txf = iwm_ac_to_tx_fifo[i];
8299 
8300 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
8301 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
8302 		cmd->ac[txf].aifsn = ac->ac_aifsn;
8303 		cmd->ac[txf].fifos_mask = (1 << txf);
8304 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
8305 	}
8306 	if (ni->ni_flags & IEEE80211_NODE_QOS)
8307 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
8308 
8309 	if (ni->ni_flags & IEEE80211_NODE_HT) {
8310 		enum ieee80211_htprot htprot =
8311 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
8312 		switch (htprot) {
8313 		case IEEE80211_HTPROT_NONE:
8314 			break;
8315 		case IEEE80211_HTPROT_NONMEMBER:
8316 		case IEEE80211_HTPROT_NONHT_MIXED:
8317 			cmd->protection_flags |=
8318 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8319 			    IWM_MAC_PROT_FLG_FAT_PROT);
8320 			break;
8321 		case IEEE80211_HTPROT_20MHZ:
8322 			if (in->in_phyctxt &&
8323 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
8324 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
8325 				cmd->protection_flags |=
8326 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8327 				    IWM_MAC_PROT_FLG_FAT_PROT);
8328 			}
8329 			break;
8330 		default:
8331 			break;
8332 		}
8333 
8334 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
8335 	}
8336 	if (ic->ic_flags & IEEE80211_F_USEPROT)
8337 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
8338 
8339 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
8340 #undef IWM_EXP2
8341 }
8342 
8343 void
iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_data_sta * sta,int assoc)8344 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
8345     struct iwm_mac_data_sta *sta, int assoc)
8346 {
8347 	struct ieee80211_node *ni = &in->in_ni;
8348 	uint32_t dtim_off;
8349 	uint64_t tsf;
8350 
8351 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
8352 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
8353 	tsf = letoh64(tsf);
8354 
8355 	sta->is_assoc = htole32(assoc);
8356 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
8357 	sta->dtim_tsf = htole64(tsf + dtim_off);
8358 	sta->bi = htole32(ni->ni_intval);
8359 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
8360 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
8361 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
8362 	sta->listen_interval = htole32(10);
8363 	sta->assoc_id = htole32(ni->ni_associd);
8364 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
8365 }
8366 
8367 int
iwm_mac_ctxt_cmd(struct iwm_softc * sc,struct iwm_node * in,uint32_t action,int assoc)8368 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
8369     int assoc)
8370 {
8371 	struct ieee80211com *ic = &sc->sc_ic;
8372 	struct ieee80211_node *ni = &in->in_ni;
8373 	struct iwm_mac_ctx_cmd cmd;
8374 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
8375 
8376 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
8377 		panic("MAC already added");
8378 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
8379 		panic("MAC already removed");
8380 
8381 	memset(&cmd, 0, sizeof(cmd));
8382 
8383 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
8384 
8385 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8386 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
8387 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
8388 		    IWM_MAC_FILTER_ACCEPT_GRP |
8389 		    IWM_MAC_FILTER_IN_BEACON |
8390 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
8391 		    IWM_MAC_FILTER_IN_CRC32);
8392 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
8393 		/*
8394 		 * Allow beacons to pass through as long as we are not
8395 		 * associated or we do not have dtim period information.
8396 		 */
8397 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
8398 	else
8399 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
8400 
8401 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
8402 }
8403 
8404 int
iwm_update_quotas(struct iwm_softc * sc,struct iwm_node * in,int running)8405 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
8406 {
8407 	struct iwm_time_quota_cmd_v1 cmd;
8408 	int i, idx, num_active_macs, quota, quota_rem;
8409 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
8410 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
8411 	uint16_t id;
8412 
8413 	memset(&cmd, 0, sizeof(cmd));
8414 
8415 	/* currently, PHY ID == binding ID */
8416 	if (in && in->in_phyctxt) {
8417 		id = in->in_phyctxt->id;
8418 		KASSERT(id < IWM_MAX_BINDINGS);
8419 		colors[id] = in->in_phyctxt->color;
8420 		if (running)
8421 			n_ifs[id] = 1;
8422 	}
8423 
8424 	/*
8425 	 * The FW's scheduling session consists of
8426 	 * IWM_MAX_QUOTA fragments. Divide these fragments
8427 	 * equally between all the bindings that require quota
8428 	 */
8429 	num_active_macs = 0;
8430 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8431 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
8432 		num_active_macs += n_ifs[i];
8433 	}
8434 
8435 	quota = 0;
8436 	quota_rem = 0;
8437 	if (num_active_macs) {
8438 		quota = IWM_MAX_QUOTA / num_active_macs;
8439 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
8440 	}
8441 
8442 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
8443 		if (colors[i] < 0)
8444 			continue;
8445 
8446 		cmd.quotas[idx].id_and_color =
8447 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
8448 
8449 		if (n_ifs[i] <= 0) {
8450 			cmd.quotas[idx].quota = htole32(0);
8451 			cmd.quotas[idx].max_duration = htole32(0);
8452 		} else {
8453 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
8454 			cmd.quotas[idx].max_duration = htole32(0);
8455 		}
8456 		idx++;
8457 	}
8458 
8459 	/* Give the remainder of the session to the first binding */
8460 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
8461 
8462 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)) {
8463 		struct iwm_time_quota_cmd cmd_v2;
8464 
8465 		memset(&cmd_v2, 0, sizeof(cmd_v2));
8466 		for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8467 			cmd_v2.quotas[i].id_and_color =
8468 			    cmd.quotas[i].id_and_color;
8469 			cmd_v2.quotas[i].quota = cmd.quotas[i].quota;
8470 			cmd_v2.quotas[i].max_duration =
8471 			    cmd.quotas[i].max_duration;
8472 		}
8473 		return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
8474 		    sizeof(cmd_v2), &cmd_v2);
8475 	}
8476 
8477 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
8478 }
8479 
8480 void
iwm_add_task(struct iwm_softc * sc,struct taskq * taskq,struct task * task)8481 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8482 {
8483 	int s = splnet();
8484 
8485 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
8486 		splx(s);
8487 		return;
8488 	}
8489 
8490 	refcnt_take(&sc->task_refs);
8491 	if (!task_add(taskq, task))
8492 		refcnt_rele_wake(&sc->task_refs);
8493 	splx(s);
8494 }
8495 
8496 void
iwm_del_task(struct iwm_softc * sc,struct taskq * taskq,struct task * task)8497 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8498 {
8499 	if (task_del(taskq, task))
8500 		refcnt_rele(&sc->task_refs);
8501 }
8502 
8503 int
iwm_scan(struct iwm_softc * sc)8504 iwm_scan(struct iwm_softc *sc)
8505 {
8506 	struct ieee80211com *ic = &sc->sc_ic;
8507 	struct ifnet *ifp = IC2IFP(ic);
8508 	int err;
8509 
8510 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
8511 		err = iwm_scan_abort(sc);
8512 		if (err) {
8513 			printf("%s: could not abort background scan\n",
8514 			    DEVNAME(sc));
8515 			return err;
8516 		}
8517 	}
8518 
8519 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8520 		err = iwm_umac_scan(sc, 0);
8521 	else
8522 		err = iwm_lmac_scan(sc, 0);
8523 	if (err) {
8524 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8525 		return err;
8526 	}
8527 
8528 	/*
8529 	 * The current mode might have been fixed during association.
8530 	 * Ensure all channels get scanned.
8531 	 */
8532 	if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
8533 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
8534 
8535 	sc->sc_flags |= IWM_FLAG_SCANNING;
8536 	if (ifp->if_flags & IFF_DEBUG)
8537 		printf("%s: %s -> %s\n", ifp->if_xname,
8538 		    ieee80211_state_name[ic->ic_state],
8539 		    ieee80211_state_name[IEEE80211_S_SCAN]);
8540 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
8541 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
8542 		ieee80211_node_cleanup(ic, ic->ic_bss);
8543 	}
8544 	ic->ic_state = IEEE80211_S_SCAN;
8545 	iwm_led_blink_start(sc);
8546 	wakeup(&ic->ic_state); /* wake iwm_init() */
8547 
8548 	return 0;
8549 }
8550 
8551 int
iwm_bgscan(struct ieee80211com * ic)8552 iwm_bgscan(struct ieee80211com *ic)
8553 {
8554 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
8555 	int err;
8556 
8557 	if (sc->sc_flags & IWM_FLAG_SCANNING)
8558 		return 0;
8559 
8560 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8561 		err = iwm_umac_scan(sc, 1);
8562 	else
8563 		err = iwm_lmac_scan(sc, 1);
8564 	if (err) {
8565 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8566 		return err;
8567 	}
8568 
8569 	sc->sc_flags |= IWM_FLAG_BGSCAN;
8570 	return 0;
8571 }
8572 
8573 void
iwm_bgscan_done(struct ieee80211com * ic,struct ieee80211_node_switch_bss_arg * arg,size_t arg_size)8574 iwm_bgscan_done(struct ieee80211com *ic,
8575     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
8576 {
8577 	struct iwm_softc *sc = ic->ic_softc;
8578 
8579 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8580 	sc->bgscan_unref_arg = arg;
8581 	sc->bgscan_unref_arg_size = arg_size;
8582 	iwm_add_task(sc, systq, &sc->bgscan_done_task);
8583 }
8584 
8585 void
iwm_bgscan_done_task(void * arg)8586 iwm_bgscan_done_task(void *arg)
8587 {
8588 	struct iwm_softc *sc = arg;
8589 	struct ieee80211com *ic = &sc->sc_ic;
8590 	struct iwm_node *in = (void *)ic->ic_bss;
8591 	struct ieee80211_node *ni = &in->in_ni;
8592 	int tid, err = 0, s = splnet();
8593 
8594 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
8595 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
8596 	    ic->ic_state != IEEE80211_S_RUN) {
8597 		err = ENXIO;
8598 		goto done;
8599 	}
8600 
8601 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
8602 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
8603 
8604 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8605 			continue;
8606 
8607 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8608 		if (err)
8609 			goto done;
8610 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
8611 		if (err)
8612 			goto done;
8613 		in->tfd_queue_msk &= ~(1 << qid);
8614 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
8615 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
8616 		    IEEE80211_ACTION_DELBA,
8617 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
8618 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
8619 #endif
8620 		ieee80211_node_tx_ba_clear(ni, tid);
8621 	}
8622 
8623 	err = iwm_flush_sta(sc, in);
8624 	if (err)
8625 		goto done;
8626 
8627 	/*
8628 	 * Tx queues have been flushed and Tx agg has been stopped.
8629 	 * Allow roaming to proceed.
8630 	 */
8631 	ni->ni_unref_arg = sc->bgscan_unref_arg;
8632 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
8633 	sc->bgscan_unref_arg = NULL;
8634 	sc->bgscan_unref_arg_size = 0;
8635 	ieee80211_node_tx_stopped(ic, &in->in_ni);
8636 done:
8637 	if (err) {
8638 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8639 		sc->bgscan_unref_arg = NULL;
8640 		sc->bgscan_unref_arg_size = 0;
8641 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8642 			task_add(systq, &sc->init_task);
8643 	}
8644 	refcnt_rele_wake(&sc->task_refs);
8645 	splx(s);
8646 }
8647 
8648 int
iwm_umac_scan_abort(struct iwm_softc * sc)8649 iwm_umac_scan_abort(struct iwm_softc *sc)
8650 {
8651 	struct iwm_umac_scan_abort cmd = { 0 };
8652 
8653 	return iwm_send_cmd_pdu(sc,
8654 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
8655 	    0, sizeof(cmd), &cmd);
8656 }
8657 
8658 int
iwm_lmac_scan_abort(struct iwm_softc * sc)8659 iwm_lmac_scan_abort(struct iwm_softc *sc)
8660 {
8661 	struct iwm_host_cmd cmd = {
8662 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
8663 	};
8664 	int err, status;
8665 
8666 	err = iwm_send_cmd_status(sc, &cmd, &status);
8667 	if (err)
8668 		return err;
8669 
8670 	if (status != IWM_CAN_ABORT_STATUS) {
8671 		/*
8672 		 * The scan abort will return 1 for success or
8673 		 * 2 for "failure".  A failure condition can be
8674 		 * due to simply not being in an active scan which
8675 		 * can occur if we send the scan abort before the
8676 		 * microcode has notified us that a scan is completed.
8677 		 */
8678 		return EBUSY;
8679 	}
8680 
8681 	return 0;
8682 }
8683 
8684 int
iwm_scan_abort(struct iwm_softc * sc)8685 iwm_scan_abort(struct iwm_softc *sc)
8686 {
8687 	int err;
8688 
8689 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8690 		err = iwm_umac_scan_abort(sc);
8691 	else
8692 		err = iwm_lmac_scan_abort(sc);
8693 
8694 	if (err == 0)
8695 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8696 	return err;
8697 }
8698 
8699 int
iwm_phy_ctxt_update(struct iwm_softc * sc,struct iwm_phy_ctxt * phyctxt,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)8700 iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
8701     struct ieee80211_channel *chan, uint8_t chains_static,
8702     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8703     uint8_t vht_chan_width)
8704 {
8705 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8706 	int err;
8707 
8708 	if (isset(sc->sc_enabled_capa,
8709 	    IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8710 	    (phyctxt->channel->ic_flags & band_flags) !=
8711 	    (chan->ic_flags & band_flags)) {
8712 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8713 		    chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE, apply_time, sco,
8714 		    vht_chan_width);
8715 		if (err) {
8716 			printf("%s: could not remove PHY context "
8717 			    "(error %d)\n", DEVNAME(sc), err);
8718 			return err;
8719 		}
8720 		phyctxt->channel = chan;
8721 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8722 		    chains_dynamic, IWM_FW_CTXT_ACTION_ADD, apply_time, sco,
8723 		    vht_chan_width);
8724 		if (err) {
8725 			printf("%s: could not add PHY context "
8726 			    "(error %d)\n", DEVNAME(sc), err);
8727 			return err;
8728 		}
8729 	} else {
8730 		phyctxt->channel = chan;
8731 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8732 		    chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, apply_time, sco,
8733 		    vht_chan_width);
8734 		if (err) {
8735 			printf("%s: could not update PHY context (error %d)\n",
8736 			    DEVNAME(sc), err);
8737 			return err;
8738 		}
8739 	}
8740 
8741 	phyctxt->sco = sco;
8742 	phyctxt->vht_chan_width = vht_chan_width;
8743 	return 0;
8744 }
8745 
8746 int
iwm_auth(struct iwm_softc * sc)8747 iwm_auth(struct iwm_softc *sc)
8748 {
8749 	struct ieee80211com *ic = &sc->sc_ic;
8750 	struct iwm_node *in = (void *)ic->ic_bss;
8751 	uint32_t duration;
8752 	int generation = sc->sc_generation, err;
8753 
8754 	splassert(IPL_NET);
8755 
8756 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8757 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8758 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8759 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8760 		if (err)
8761 			return err;
8762 	} else {
8763 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8764 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8765 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8766 		if (err)
8767 			return err;
8768 	}
8769 	in->in_phyctxt = &sc->sc_phyctxt[0];
8770 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8771 	iwm_setrates(in, 0);
8772 
8773 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
8774 	if (err) {
8775 		printf("%s: could not add MAC context (error %d)\n",
8776 		    DEVNAME(sc), err);
8777 		return err;
8778  	}
8779 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
8780 
8781 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
8782 	if (err) {
8783 		printf("%s: could not add binding (error %d)\n",
8784 		    DEVNAME(sc), err);
8785 		goto rm_mac_ctxt;
8786 	}
8787 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
8788 
8789 	in->tid_disable_ampdu = 0xffff;
8790 	err = iwm_add_sta_cmd(sc, in, 0);
8791 	if (err) {
8792 		printf("%s: could not add sta (error %d)\n",
8793 		    DEVNAME(sc), err);
8794 		goto rm_binding;
8795 	}
8796 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
8797 
8798 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8799 		return 0;
8800 
8801 	/*
8802 	 * Prevent the FW from wandering off channel during association
8803 	 * by "protecting" the session with a time event.
8804 	 */
8805 	if (in->in_ni.ni_intval)
8806 		duration = in->in_ni.ni_intval * 2;
8807 	else
8808 		duration = IEEE80211_DUR_TU;
8809 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8810 
8811 	return 0;
8812 
8813 rm_binding:
8814 	if (generation == sc->sc_generation) {
8815 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8816 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8817 	}
8818 rm_mac_ctxt:
8819 	if (generation == sc->sc_generation) {
8820 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8821 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8822 	}
8823 	return err;
8824 }
8825 
8826 int
iwm_deauth(struct iwm_softc * sc)8827 iwm_deauth(struct iwm_softc *sc)
8828 {
8829 	struct ieee80211com *ic = &sc->sc_ic;
8830 	struct iwm_node *in = (void *)ic->ic_bss;
8831 	int err;
8832 
8833 	splassert(IPL_NET);
8834 
8835 	iwm_unprotect_session(sc, in);
8836 
8837 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
8838 		err = iwm_flush_sta(sc, in);
8839 		if (err)
8840 			return err;
8841 		err = iwm_rm_sta_cmd(sc, in);
8842 		if (err) {
8843 			printf("%s: could not remove STA (error %d)\n",
8844 			    DEVNAME(sc), err);
8845 			return err;
8846 		}
8847 		in->tid_disable_ampdu = 0xffff;
8848 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8849 		sc->sc_rx_ba_sessions = 0;
8850 		sc->ba_rx.start_tidmask = 0;
8851 		sc->ba_rx.stop_tidmask = 0;
8852 		sc->tx_ba_queue_mask = 0;
8853 		sc->ba_tx.start_tidmask = 0;
8854 		sc->ba_tx.stop_tidmask = 0;
8855 	}
8856 
8857 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
8858 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8859 		if (err) {
8860 			printf("%s: could not remove binding (error %d)\n",
8861 			    DEVNAME(sc), err);
8862 			return err;
8863 		}
8864 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8865 	}
8866 
8867 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
8868 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8869 		if (err) {
8870 			printf("%s: could not remove MAC context (error %d)\n",
8871 			    DEVNAME(sc), err);
8872 			return err;
8873 		}
8874 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8875 	}
8876 
8877 	/* Move unused PHY context to a default channel. */
8878 	err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8879 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8880 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8881 	if (err)
8882 		return err;
8883 
8884 	return 0;
8885 }
8886 
8887 int
iwm_run(struct iwm_softc * sc)8888 iwm_run(struct iwm_softc *sc)
8889 {
8890 	struct ieee80211com *ic = &sc->sc_ic;
8891 	struct iwm_node *in = (void *)ic->ic_bss;
8892 	struct ieee80211_node *ni = &in->in_ni;
8893 	int err;
8894 
8895 	splassert(IPL_NET);
8896 
8897 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8898 		/* Add a MAC context and a sniffing STA. */
8899 		err = iwm_auth(sc);
8900 		if (err)
8901 			return err;
8902 	}
8903 
8904 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8905 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8906 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8907 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8908 		    in->in_phyctxt->channel, chains, chains,
8909 		    0, IEEE80211_HTOP0_SCO_SCN,
8910 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8911 		if (err) {
8912 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8913 			return err;
8914 		}
8915 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8916 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8917 		uint8_t sco, vht_chan_width;
8918 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
8919 		    ieee80211_node_supports_ht_chan40(ni))
8920 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8921 		else
8922 			sco = IEEE80211_HTOP0_SCO_SCN;
8923 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8924 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
8925 		    ieee80211_node_supports_vht_chan80(ni))
8926 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
8927 		else
8928 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
8929 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8930 		    in->in_phyctxt->channel, chains, chains,
8931 		    0, sco, vht_chan_width);
8932 		if (err) {
8933 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8934 			return err;
8935 		}
8936 	}
8937 
8938 	/* Update STA again to apply HT and VHT settings. */
8939 	err = iwm_add_sta_cmd(sc, in, 1);
8940 	if (err) {
8941 		printf("%s: could not update STA (error %d)\n",
8942 		    DEVNAME(sc), err);
8943 		return err;
8944 	}
8945 
8946 	/* We have now been assigned an associd by the AP. */
8947 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
8948 	if (err) {
8949 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8950 		return err;
8951 	}
8952 
8953 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
8954 	if (err) {
8955 		printf("%s: could not set sf full on (error %d)\n",
8956 		    DEVNAME(sc), err);
8957 		return err;
8958 	}
8959 
8960 	err = iwm_allow_mcast(sc);
8961 	if (err) {
8962 		printf("%s: could not allow mcast (error %d)\n",
8963 		    DEVNAME(sc), err);
8964 		return err;
8965 	}
8966 
8967 	err = iwm_power_update_device(sc);
8968 	if (err) {
8969 		printf("%s: could not send power command (error %d)\n",
8970 		    DEVNAME(sc), err);
8971 		return err;
8972 	}
8973 #ifdef notyet
8974 	/*
8975 	 * Disabled for now. Default beacon filter settings
8976 	 * prevent net80211 from getting ERP and HT protection
8977 	 * updates from beacons.
8978 	 */
8979 	err = iwm_enable_beacon_filter(sc, in);
8980 	if (err) {
8981 		printf("%s: could not enable beacon filter\n",
8982 		    DEVNAME(sc));
8983 		return err;
8984 	}
8985 #endif
8986 	err = iwm_power_mac_update_mode(sc, in);
8987 	if (err) {
8988 		printf("%s: could not update MAC power (error %d)\n",
8989 		    DEVNAME(sc), err);
8990 		return err;
8991 	}
8992 
8993 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
8994 		err = iwm_update_quotas(sc, in, 1);
8995 		if (err) {
8996 			printf("%s: could not update quotas (error %d)\n",
8997 			    DEVNAME(sc), err);
8998 			return err;
8999 		}
9000 	}
9001 
9002 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
9003 	ieee80211_ra_node_init(&in->in_rn);
9004 	ieee80211_ra_vht_node_init(&in->in_rn_vht);
9005 
9006 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9007 		iwm_led_blink_start(sc);
9008 		return 0;
9009 	}
9010 
9011 	/* Start at lowest available bit-rate, AMRR will raise. */
9012 	in->in_ni.ni_txrate = 0;
9013 	in->in_ni.ni_txmcs = 0;
9014 	in->in_ni.ni_vht_ss = 1;
9015 	iwm_setrates(in, 0);
9016 
9017 	timeout_add_msec(&sc->sc_calib_to, 500);
9018 	iwm_led_enable(sc);
9019 
9020 	return 0;
9021 }
9022 
9023 int
iwm_run_stop(struct iwm_softc * sc)9024 iwm_run_stop(struct iwm_softc *sc)
9025 {
9026 	struct ieee80211com *ic = &sc->sc_ic;
9027 	struct iwm_node *in = (void *)ic->ic_bss;
9028 	struct ieee80211_node *ni = &in->in_ni;
9029 	int err, i, tid;
9030 
9031 	splassert(IPL_NET);
9032 
9033 	/*
9034 	 * Stop Tx/Rx BA sessions now. We cannot rely on the BA task
9035 	 * for this when moving out of RUN state since it runs in a
9036 	 * separate thread.
9037 	 * Note that in->in_ni (struct ieee80211_node) already represents
9038 	 * our new access point in case we are roaming between APs.
9039 	 * This means we cannot rely on struct ieee802111_node to tell
9040 	 * us which BA sessions exist.
9041 	 */
9042 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9043 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
9044 		if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID)
9045 			continue;
9046 		err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
9047 		if (err)
9048 			return err;
9049 		iwm_clear_reorder_buffer(sc, rxba);
9050 		if (sc->sc_rx_ba_sessions > 0)
9051 			sc->sc_rx_ba_sessions--;
9052 	}
9053 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
9054 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
9055 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
9056 			continue;
9057 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
9058 		if (err)
9059 			return err;
9060 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
9061 		if (err)
9062 			return err;
9063 		in->tfd_queue_msk &= ~(1 << qid);
9064 	}
9065 	ieee80211_ba_del(ni);
9066 
9067 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
9068 		iwm_led_blink_stop(sc);
9069 
9070 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
9071 	if (err)
9072 		return err;
9073 
9074 	iwm_disable_beacon_filter(sc);
9075 
9076 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
9077 		err = iwm_update_quotas(sc, in, 0);
9078 		if (err) {
9079 			printf("%s: could not update quotas (error %d)\n",
9080 			    DEVNAME(sc), err);
9081 			return err;
9082 		}
9083 	}
9084 
9085 	/* Mark station as disassociated. */
9086 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
9087 	if (err) {
9088 		printf("%s: failed to update MAC\n", DEVNAME(sc));
9089 		return err;
9090 	}
9091 
9092 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
9093 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
9094 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
9095 		    in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
9096 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
9097 		if (err) {
9098 			printf("%s: failed to update PHY\n", DEVNAME(sc));
9099 			return err;
9100 		}
9101 	}
9102 
9103 	return 0;
9104 }
9105 
9106 struct ieee80211_node *
iwm_node_alloc(struct ieee80211com * ic)9107 iwm_node_alloc(struct ieee80211com *ic)
9108 {
9109 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
9110 }
9111 
9112 int
iwm_set_key_v1(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)9113 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9114     struct ieee80211_key *k)
9115 {
9116 	struct iwm_softc *sc = ic->ic_softc;
9117 	struct iwm_add_sta_key_cmd_v1 cmd;
9118 
9119 	memset(&cmd, 0, sizeof(cmd));
9120 
9121 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9122 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9123 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9124 	    IWM_STA_KEY_FLG_KEYID_MSK));
9125 	if (k->k_flags & IEEE80211_KEY_GROUP)
9126 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9127 
9128 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9129 	cmd.common.key_offset = 0;
9130 	cmd.common.sta_id = IWM_STATION_ID;
9131 
9132 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9133 	    sizeof(cmd), &cmd);
9134 }
9135 
9136 int
iwm_set_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)9137 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9138     struct ieee80211_key *k)
9139 {
9140 	struct iwm_softc *sc = ic->ic_softc;
9141 	struct iwm_add_sta_key_cmd cmd;
9142 
9143 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9144 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
9145 		/* Fallback to software crypto for other ciphers. */
9146 		return (ieee80211_set_key(ic, ni, k));
9147 	}
9148 
9149 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9150 		return iwm_set_key_v1(ic, ni, k);
9151 
9152 	memset(&cmd, 0, sizeof(cmd));
9153 
9154 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9155 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9156 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9157 	    IWM_STA_KEY_FLG_KEYID_MSK));
9158 	if (k->k_flags & IEEE80211_KEY_GROUP)
9159 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9160 
9161 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9162 	cmd.common.key_offset = 0;
9163 	cmd.common.sta_id = IWM_STATION_ID;
9164 
9165 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
9166 
9167 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9168 	    sizeof(cmd), &cmd);
9169 }
9170 
9171 void
iwm_delete_key_v1(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)9172 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9173     struct ieee80211_key *k)
9174 {
9175 	struct iwm_softc *sc = ic->ic_softc;
9176 	struct iwm_add_sta_key_cmd_v1 cmd;
9177 
9178 	memset(&cmd, 0, sizeof(cmd));
9179 
9180 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9181 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9182 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9183 	    IWM_STA_KEY_FLG_KEYID_MSK));
9184 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9185 	cmd.common.key_offset = 0;
9186 	cmd.common.sta_id = IWM_STATION_ID;
9187 
9188 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9189 }
9190 
9191 void
iwm_delete_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)9192 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9193     struct ieee80211_key *k)
9194 {
9195 	struct iwm_softc *sc = ic->ic_softc;
9196 	struct iwm_add_sta_key_cmd cmd;
9197 
9198 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9199 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
9200 		/* Fallback to software crypto for other ciphers. */
9201                 ieee80211_delete_key(ic, ni, k);
9202 		return;
9203 	}
9204 
9205 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
9206 		return;
9207 
9208 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9209 		return iwm_delete_key_v1(ic, ni, k);
9210 
9211 	memset(&cmd, 0, sizeof(cmd));
9212 
9213 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9214 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9215 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9216 	    IWM_STA_KEY_FLG_KEYID_MSK));
9217 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9218 	cmd.common.key_offset = 0;
9219 	cmd.common.sta_id = IWM_STATION_ID;
9220 
9221 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9222 }
9223 
9224 void
iwm_calib_timeout(void * arg)9225 iwm_calib_timeout(void *arg)
9226 {
9227 	struct iwm_softc *sc = arg;
9228 	struct ieee80211com *ic = &sc->sc_ic;
9229 	struct iwm_node *in = (void *)ic->ic_bss;
9230 	struct ieee80211_node *ni = &in->in_ni;
9231 	int s;
9232 
9233 	s = splnet();
9234 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
9235 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
9236 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
9237 		int old_txrate = ni->ni_txrate;
9238 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
9239 		/*
9240 		 * If AMRR has chosen a new TX rate we must update
9241 		 * the firmware's LQ rate table.
9242 		 * ni_txrate may change again before the task runs so
9243 		 * cache the chosen rate in the iwm_node structure.
9244 		 */
9245 		if (ni->ni_txrate != old_txrate)
9246 			iwm_setrates(in, 1);
9247 	}
9248 
9249 	splx(s);
9250 
9251 	timeout_add_msec(&sc->sc_calib_to, 500);
9252 }
9253 
9254 void
iwm_set_rate_table_vht(struct iwm_node * in,struct iwm_lq_cmd * lqcmd)9255 iwm_set_rate_table_vht(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9256 {
9257 	struct ieee80211_node *ni = &in->in_ni;
9258 	struct ieee80211com *ic = ni->ni_ic;
9259 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9260 	int ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9261 	int i, tab, txmcs;
9262 
9263 	/*
9264 	 * Fill the LQ rate selection table with VHT rates in descending
9265 	 * order, i.e. with the node's current TX rate first. Keep reducing
9266 	 * channel width during later Tx attempts, and eventually fall back
9267 	 * to legacy OFDM. Do not mix SISO and MIMO rates.
9268 	 */
9269 	lqcmd->mimo_delim = 0;
9270 	txmcs = ni->ni_txmcs;
9271 	for (i = 0; i < nitems(lqcmd->rs_table); i++) {
9272 		if (txmcs >= 0) {
9273 			tab = IWM_RATE_MCS_VHT_MSK;
9274 			tab |= txmcs & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
9275 			tab |= ((ni->ni_vht_ss - 1) <<
9276 			    IWM_RATE_VHT_MCS_NSS_POS) &
9277 			    IWM_RATE_VHT_MCS_NSS_MSK;
9278 			if (ni->ni_vht_ss > 1)
9279 				tab |= IWM_RATE_MCS_ANT_AB_MSK;
9280 			else
9281 				tab |= iwm_valid_siso_ant_rate_mask(sc);
9282 
9283 			/*
9284 			 * First two Tx attempts may use 80MHz/40MHz/SGI.
9285 			 * Next two Tx attempts may use 40MHz/SGI.
9286 			 * Beyond that use 20 MHz and decrease the rate.
9287 			 * As a special case, MCS 9 is invalid on 20 Mhz.
9288 			 */
9289 			if (txmcs == 9) {
9290 				if (i < 2 && in->in_phyctxt->vht_chan_width >=
9291 				    IEEE80211_VHTOP0_CHAN_WIDTH_80)
9292 					tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9293 				else if (in->in_phyctxt->sco ==
9294 				    IEEE80211_HTOP0_SCO_SCA ||
9295 				    in->in_phyctxt->sco ==
9296 				    IEEE80211_HTOP0_SCO_SCB)
9297 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9298 				else {
9299 					/* no 40 MHz, fall back on MCS 8 */
9300 					tab &= ~IWM_RATE_VHT_MCS_RATE_CODE_MSK;
9301 					tab |= 8;
9302 				}
9303 
9304 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9305 				if (i < 4) {
9306 					if (ieee80211_ra_vht_use_sgi(ni))
9307 						tab |= IWM_RATE_MCS_SGI_MSK;
9308 				} else
9309 					txmcs--;
9310 			} else if (i < 2 && in->in_phyctxt->vht_chan_width >=
9311 			    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
9312 				tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9313 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9314 				if (ieee80211_ra_vht_use_sgi(ni))
9315 					tab |= IWM_RATE_MCS_SGI_MSK;
9316 			} else if (i < 4 &&
9317 			    in->in_phyctxt->vht_chan_width >=
9318 			    IEEE80211_VHTOP0_CHAN_WIDTH_HT &&
9319 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
9320 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
9321 				tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9322 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9323 				if (ieee80211_ra_vht_use_sgi(ni))
9324 					tab |= IWM_RATE_MCS_SGI_MSK;
9325 			} else if (txmcs >= 0)
9326 				txmcs--;
9327 		} else {
9328 			/* Fill the rest with the lowest possible rate. */
9329 			tab = iwm_rates[ridx_min].plcp;
9330 			tab |= iwm_valid_siso_ant_rate_mask(sc);
9331 			if (ni->ni_vht_ss > 1 && lqcmd->mimo_delim == 0)
9332 				lqcmd->mimo_delim = i;
9333 		}
9334 
9335 		lqcmd->rs_table[i] = htole32(tab);
9336 	}
9337 }
9338 
9339 void
iwm_set_rate_table(struct iwm_node * in,struct iwm_lq_cmd * lqcmd)9340 iwm_set_rate_table(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9341 {
9342 	struct ieee80211_node *ni = &in->in_ni;
9343 	struct ieee80211com *ic = ni->ni_ic;
9344 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9345 	struct ieee80211_rateset *rs = &ni->ni_rates;
9346 	int i, ridx, ridx_min, ridx_max, j, mimo, tab = 0;
9347 
9348 	/*
9349 	 * Fill the LQ rate selection table with legacy and/or HT rates
9350 	 * in descending order, i.e. with the node's current TX rate first.
9351 	 * In cases where throughput of an HT rate corresponds to a legacy
9352 	 * rate it makes no sense to add both. We rely on the fact that
9353 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
9354 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
9355 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
9356 	 */
9357 	j = 0;
9358 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9359 	mimo = iwm_is_mimo_ht_mcs(ni->ni_txmcs);
9360 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
9361 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
9362 		uint8_t plcp = iwm_rates[ridx].plcp;
9363 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
9364 
9365 		if (j >= nitems(lqcmd->rs_table))
9366 			break;
9367 		tab = 0;
9368 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9369 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
9370 				continue;
9371 	 		/* Do not mix SISO and MIMO HT rates. */
9372 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
9373 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
9374 				continue;
9375 			for (i = ni->ni_txmcs; i >= 0; i--) {
9376 				if (isclr(ni->ni_rxmcs, i))
9377 					continue;
9378 				if (ridx != iwm_ht_mcs2ridx[i])
9379 					continue;
9380 				tab = ht_plcp;
9381 				tab |= IWM_RATE_MCS_HT_MSK;
9382 				/* First two Tx attempts may use 40MHz/SGI. */
9383 				if (j > 1)
9384 					break;
9385 				if (in->in_phyctxt->sco ==
9386 				    IEEE80211_HTOP0_SCO_SCA ||
9387 				    in->in_phyctxt->sco ==
9388 				    IEEE80211_HTOP0_SCO_SCB) {
9389 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9390 					tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9391 				}
9392 				if (ieee80211_ra_use_ht_sgi(ni))
9393 					tab |= IWM_RATE_MCS_SGI_MSK;
9394 				break;
9395 			}
9396 		} else if (plcp != IWM_RATE_INVM_PLCP) {
9397 			for (i = ni->ni_txrate; i >= 0; i--) {
9398 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
9399 				    IEEE80211_RATE_VAL)) {
9400 					tab = plcp;
9401 					break;
9402 				}
9403 			}
9404 		}
9405 
9406 		if (tab == 0)
9407 			continue;
9408 
9409 		if (iwm_is_mimo_ht_plcp(ht_plcp))
9410 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
9411 		else
9412 			tab |= iwm_valid_siso_ant_rate_mask(sc);
9413 
9414 		if (IWM_RIDX_IS_CCK(ridx))
9415 			tab |= IWM_RATE_MCS_CCK_MSK;
9416 		lqcmd->rs_table[j++] = htole32(tab);
9417 	}
9418 
9419 	lqcmd->mimo_delim = (mimo ? j : 0);
9420 
9421 	/* Fill the rest with the lowest possible rate */
9422 	while (j < nitems(lqcmd->rs_table)) {
9423 		tab = iwm_rates[ridx_min].plcp;
9424 		if (IWM_RIDX_IS_CCK(ridx_min))
9425 			tab |= IWM_RATE_MCS_CCK_MSK;
9426 		tab |= iwm_valid_siso_ant_rate_mask(sc);
9427 		lqcmd->rs_table[j++] = htole32(tab);
9428 	}
9429 }
9430 
9431 void
iwm_setrates(struct iwm_node * in,int async)9432 iwm_setrates(struct iwm_node *in, int async)
9433 {
9434 	struct ieee80211_node *ni = &in->in_ni;
9435 	struct ieee80211com *ic = ni->ni_ic;
9436 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9437 	struct iwm_lq_cmd lqcmd;
9438 	struct iwm_host_cmd cmd = {
9439 		.id = IWM_LQ_CMD,
9440 		.len = { sizeof(lqcmd), },
9441 	};
9442 
9443 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
9444 
9445 	memset(&lqcmd, 0, sizeof(lqcmd));
9446 	lqcmd.sta_id = IWM_STATION_ID;
9447 
9448 	if (ic->ic_flags & IEEE80211_F_USEPROT)
9449 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
9450 
9451 	if (ni->ni_flags & IEEE80211_NODE_VHT)
9452 		iwm_set_rate_table_vht(in, &lqcmd);
9453 	else
9454 		iwm_set_rate_table(in, &lqcmd);
9455 
9456 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
9457 	    (iwm_fw_valid_tx_ant(sc) & IWM_ANT_B))
9458 		lqcmd.single_stream_ant_msk = IWM_ANT_B;
9459 	else
9460 		lqcmd.single_stream_ant_msk = IWM_ANT_A;
9461 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
9462 
9463 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
9464 	lqcmd.agg_disable_start_th = 3;
9465 	lqcmd.agg_frame_cnt_limit = 0x3f;
9466 
9467 	cmd.data[0] = &lqcmd;
9468 	iwm_send_cmd(sc, &cmd);
9469 }
9470 
9471 int
iwm_media_change(struct ifnet * ifp)9472 iwm_media_change(struct ifnet *ifp)
9473 {
9474 	struct iwm_softc *sc = ifp->if_softc;
9475 	struct ieee80211com *ic = &sc->sc_ic;
9476 	uint8_t rate, ridx;
9477 	int err;
9478 
9479 	err = ieee80211_media_change(ifp);
9480 	if (err != ENETRESET)
9481 		return err;
9482 
9483 	if (ic->ic_fixed_mcs != -1)
9484 		sc->sc_fixed_ridx = iwm_ht_mcs2ridx[ic->ic_fixed_mcs];
9485 	else if (ic->ic_fixed_rate != -1) {
9486 		rate = ic->ic_sup_rates[ic->ic_curmode].
9487 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
9488 		/* Map 802.11 rate to HW rate index. */
9489 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
9490 			if (iwm_rates[ridx].rate == rate)
9491 				break;
9492 		sc->sc_fixed_ridx = ridx;
9493 	}
9494 
9495 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9496 	    (IFF_UP | IFF_RUNNING)) {
9497 		iwm_stop(ifp);
9498 		err = iwm_init(ifp);
9499 	}
9500 	return err;
9501 }
9502 
9503 void
iwm_newstate_task(void * psc)9504 iwm_newstate_task(void *psc)
9505 {
9506 	struct iwm_softc *sc = (struct iwm_softc *)psc;
9507 	struct ieee80211com *ic = &sc->sc_ic;
9508 	enum ieee80211_state nstate = sc->ns_nstate;
9509 	enum ieee80211_state ostate = ic->ic_state;
9510 	int arg = sc->ns_arg;
9511 	int err = 0, s = splnet();
9512 
9513 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9514 		/* iwm_stop() is waiting for us. */
9515 		refcnt_rele_wake(&sc->task_refs);
9516 		splx(s);
9517 		return;
9518 	}
9519 
9520 	if (ostate == IEEE80211_S_SCAN) {
9521 		if (nstate == ostate) {
9522 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
9523 				refcnt_rele_wake(&sc->task_refs);
9524 				splx(s);
9525 				return;
9526 			}
9527 			/* Firmware is no longer scanning. Do another scan. */
9528 			goto next_scan;
9529 		} else
9530 			iwm_led_blink_stop(sc);
9531 	}
9532 
9533 	if (nstate <= ostate) {
9534 		switch (ostate) {
9535 		case IEEE80211_S_RUN:
9536 			err = iwm_run_stop(sc);
9537 			if (err)
9538 				goto out;
9539 			/* FALLTHROUGH */
9540 		case IEEE80211_S_ASSOC:
9541 		case IEEE80211_S_AUTH:
9542 			if (nstate <= IEEE80211_S_AUTH) {
9543 				err = iwm_deauth(sc);
9544 				if (err)
9545 					goto out;
9546 			}
9547 			/* FALLTHROUGH */
9548 		case IEEE80211_S_SCAN:
9549 		case IEEE80211_S_INIT:
9550 			break;
9551 		}
9552 
9553 		/* Die now if iwm_stop() was called while we were sleeping. */
9554 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9555 			refcnt_rele_wake(&sc->task_refs);
9556 			splx(s);
9557 			return;
9558 		}
9559 	}
9560 
9561 	switch (nstate) {
9562 	case IEEE80211_S_INIT:
9563 		break;
9564 
9565 	case IEEE80211_S_SCAN:
9566 next_scan:
9567 		err = iwm_scan(sc);
9568 		if (err)
9569 			break;
9570 		refcnt_rele_wake(&sc->task_refs);
9571 		splx(s);
9572 		return;
9573 
9574 	case IEEE80211_S_AUTH:
9575 		err = iwm_auth(sc);
9576 		break;
9577 
9578 	case IEEE80211_S_ASSOC:
9579 		break;
9580 
9581 	case IEEE80211_S_RUN:
9582 		err = iwm_run(sc);
9583 		break;
9584 	}
9585 
9586 out:
9587 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9588 		if (err)
9589 			task_add(systq, &sc->init_task);
9590 		else
9591 			sc->sc_newstate(ic, nstate, arg);
9592 	}
9593 	refcnt_rele_wake(&sc->task_refs);
9594 	splx(s);
9595 }
9596 
9597 int
iwm_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)9598 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
9599 {
9600 	struct ifnet *ifp = IC2IFP(ic);
9601 	struct iwm_softc *sc = ifp->if_softc;
9602 
9603 	/*
9604 	 * Prevent attempts to transition towards the same state, unless
9605 	 * we are scanning in which case a SCAN -> SCAN transition
9606 	 * triggers another scan iteration. And AUTH -> AUTH is needed
9607 	 * to support band-steering.
9608 	 */
9609 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9610 	    nstate != IEEE80211_S_AUTH)
9611 		return 0;
9612 
9613 	if (ic->ic_state == IEEE80211_S_RUN) {
9614 		timeout_del(&sc->sc_calib_to);
9615 		iwm_del_task(sc, systq, &sc->ba_task);
9616 		iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9617 		iwm_del_task(sc, systq, &sc->phy_ctxt_task);
9618 		iwm_del_task(sc, systq, &sc->bgscan_done_task);
9619 	}
9620 
9621 	sc->ns_nstate = nstate;
9622 	sc->ns_arg = arg;
9623 
9624 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9625 
9626 	return 0;
9627 }
9628 
9629 void
iwm_endscan(struct iwm_softc * sc)9630 iwm_endscan(struct iwm_softc *sc)
9631 {
9632 	struct ieee80211com *ic = &sc->sc_ic;
9633 
9634 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
9635 		return;
9636 
9637 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
9638 	ieee80211_end_scan(&ic->ic_if);
9639 }
9640 
9641 /*
9642  * Aging and idle timeouts for the different possible scenarios
9643  * in default configuration
9644  */
9645 static const uint32_t
9646 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9647 	{
9648 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
9649 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
9650 	},
9651 	{
9652 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
9653 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
9654 	},
9655 	{
9656 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
9657 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
9658 	},
9659 	{
9660 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
9661 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
9662 	},
9663 	{
9664 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
9665 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
9666 	},
9667 };
9668 
9669 /*
9670  * Aging and idle timeouts for the different possible scenarios
9671  * in single BSS MAC configuration.
9672  */
9673 static const uint32_t
9674 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9675 	{
9676 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
9677 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
9678 	},
9679 	{
9680 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
9681 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
9682 	},
9683 	{
9684 		htole32(IWM_SF_MCAST_AGING_TIMER),
9685 		htole32(IWM_SF_MCAST_IDLE_TIMER)
9686 	},
9687 	{
9688 		htole32(IWM_SF_BA_AGING_TIMER),
9689 		htole32(IWM_SF_BA_IDLE_TIMER)
9690 	},
9691 	{
9692 		htole32(IWM_SF_TX_RE_AGING_TIMER),
9693 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
9694 	},
9695 };
9696 
9697 void
iwm_fill_sf_command(struct iwm_softc * sc,struct iwm_sf_cfg_cmd * sf_cmd,struct ieee80211_node * ni)9698 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
9699     struct ieee80211_node *ni)
9700 {
9701 	int i, j, watermark;
9702 
9703 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
9704 
9705 	/*
9706 	 * If we are in association flow - check antenna configuration
9707 	 * capabilities of the AP station, and choose the watermark accordingly.
9708 	 */
9709 	if (ni) {
9710 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9711 			if (ni->ni_rxmcs[1] != 0)
9712 				watermark = IWM_SF_W_MARK_MIMO2;
9713 			else
9714 				watermark = IWM_SF_W_MARK_SISO;
9715 		} else {
9716 			watermark = IWM_SF_W_MARK_LEGACY;
9717 		}
9718 	/* default watermark value for unassociated mode. */
9719 	} else {
9720 		watermark = IWM_SF_W_MARK_MIMO2;
9721 	}
9722 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
9723 
9724 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
9725 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
9726 			sf_cmd->long_delay_timeouts[i][j] =
9727 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
9728 		}
9729 	}
9730 
9731 	if (ni) {
9732 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
9733 		       sizeof(iwm_sf_full_timeout));
9734 	} else {
9735 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
9736 		       sizeof(iwm_sf_full_timeout_def));
9737 	}
9738 
9739 }
9740 
9741 int
iwm_sf_config(struct iwm_softc * sc,int new_state)9742 iwm_sf_config(struct iwm_softc *sc, int new_state)
9743 {
9744 	struct ieee80211com *ic = &sc->sc_ic;
9745 	struct iwm_sf_cfg_cmd sf_cmd = {
9746 		.state = htole32(new_state),
9747 	};
9748 	int err = 0;
9749 
9750 #if 0	/* only used for models with sdio interface, in iwlwifi */
9751 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
9752 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
9753 #endif
9754 
9755 	switch (new_state) {
9756 	case IWM_SF_UNINIT:
9757 	case IWM_SF_INIT_OFF:
9758 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
9759 		break;
9760 	case IWM_SF_FULL_ON:
9761 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9762 		break;
9763 	default:
9764 		return EINVAL;
9765 	}
9766 
9767 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
9768 				   sizeof(sf_cmd), &sf_cmd);
9769 	return err;
9770 }
9771 
9772 int
iwm_send_bt_init_conf(struct iwm_softc * sc)9773 iwm_send_bt_init_conf(struct iwm_softc *sc)
9774 {
9775 	struct iwm_bt_coex_cmd bt_cmd;
9776 
9777 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
9778 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
9779 
9780 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
9781 	    &bt_cmd);
9782 }
9783 
9784 int
iwm_send_soc_conf(struct iwm_softc * sc)9785 iwm_send_soc_conf(struct iwm_softc *sc)
9786 {
9787 	struct iwm_soc_configuration_cmd cmd;
9788 	int err;
9789 	uint32_t cmd_id, flags = 0;
9790 
9791 	memset(&cmd, 0, sizeof(cmd));
9792 
9793 	/*
9794 	 * In VER_1 of this command, the discrete value is considered
9795 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
9796 	 * values in VER_1, this is backwards-compatible with VER_2,
9797 	 * as long as we don't set any other flag bits.
9798 	 */
9799 	if (!sc->sc_integrated) { /* VER_1 */
9800 		flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE;
9801 	} else { /* VER_2 */
9802 		uint8_t scan_cmd_ver;
9803 		if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
9804 			flags |= (sc->sc_ltr_delay &
9805 			    IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
9806 		scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP,
9807 		    IWM_SCAN_REQ_UMAC);
9808 		if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN &&
9809 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9810 			flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
9811 	}
9812 	cmd.flags = htole32(flags);
9813 
9814 	cmd.latency = htole32(sc->sc_xtal_latency);
9815 
9816 	cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD, IWM_SYSTEM_GROUP, 0);
9817 	err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9818 	if (err)
9819 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
9820 	return err;
9821 }
9822 
9823 int
iwm_send_update_mcc_cmd(struct iwm_softc * sc,const char * alpha2)9824 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
9825 {
9826 	struct iwm_mcc_update_cmd mcc_cmd;
9827 	struct iwm_host_cmd hcmd = {
9828 		.id = IWM_MCC_UPDATE_CMD,
9829 		.flags = IWM_CMD_WANT_RESP,
9830 		.resp_pkt_len = IWM_CMD_RESP_MAX,
9831 		.data = { &mcc_cmd },
9832 	};
9833 	struct iwm_rx_packet *pkt;
9834 	size_t resp_len;
9835 	int err;
9836 	int resp_v3 = isset(sc->sc_enabled_capa,
9837 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V3);
9838 
9839 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
9840 	    !sc->sc_nvm.lar_enabled) {
9841 		return 0;
9842 	}
9843 
9844 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
9845 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
9846 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
9847 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
9848 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
9849 	else
9850 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
9851 
9852 	if (resp_v3) { /* same size as resp_v2 */
9853 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
9854 	} else {
9855 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
9856 	}
9857 
9858 	err = iwm_send_cmd(sc, &hcmd);
9859 	if (err)
9860 		return err;
9861 
9862 	pkt = hcmd.resp_pkt;
9863 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
9864 		err = EIO;
9865 		goto out;
9866 	}
9867 
9868 	if (resp_v3) {
9869 		struct iwm_mcc_update_resp_v3 *resp;
9870 		resp_len = iwm_rx_packet_payload_len(pkt);
9871 		if (resp_len < sizeof(*resp)) {
9872 			err = EIO;
9873 			goto out;
9874 		}
9875 
9876 		resp = (void *)pkt->data;
9877 		if (resp_len != sizeof(*resp) +
9878 		    resp->n_channels * sizeof(resp->channels[0])) {
9879 			err = EIO;
9880 			goto out;
9881 		}
9882 	} else {
9883 		struct iwm_mcc_update_resp_v1 *resp_v1;
9884 		resp_len = iwm_rx_packet_payload_len(pkt);
9885 		if (resp_len < sizeof(*resp_v1)) {
9886 			err = EIO;
9887 			goto out;
9888 		}
9889 
9890 		resp_v1 = (void *)pkt->data;
9891 		if (resp_len != sizeof(*resp_v1) +
9892 		    resp_v1->n_channels * sizeof(resp_v1->channels[0])) {
9893 			err = EIO;
9894 			goto out;
9895 		}
9896 	}
9897 out:
9898 	iwm_free_resp(sc, &hcmd);
9899 	return err;
9900 }
9901 
9902 int
iwm_send_temp_report_ths_cmd(struct iwm_softc * sc)9903 iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
9904 {
9905 	struct iwm_temp_report_ths_cmd cmd;
9906 	int err;
9907 
9908 	/*
9909 	 * In order to give responsibility for critical-temperature-kill
9910 	 * and TX backoff to FW we need to send an empty temperature
9911 	 * reporting command at init time.
9912 	 */
9913 	memset(&cmd, 0, sizeof(cmd));
9914 
9915 	err = iwm_send_cmd_pdu(sc,
9916 	    IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD),
9917 	    0, sizeof(cmd), &cmd);
9918 	if (err)
9919 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9920 		    DEVNAME(sc), err);
9921 
9922 	return err;
9923 }
9924 
9925 void
iwm_tt_tx_backoff(struct iwm_softc * sc,uint32_t backoff)9926 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
9927 {
9928 	struct iwm_host_cmd cmd = {
9929 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
9930 		.len = { sizeof(uint32_t), },
9931 		.data = { &backoff, },
9932 	};
9933 
9934 	iwm_send_cmd(sc, &cmd);
9935 }
9936 
9937 void
iwm_free_fw_paging(struct iwm_softc * sc)9938 iwm_free_fw_paging(struct iwm_softc *sc)
9939 {
9940 	int i;
9941 
9942 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
9943 		return;
9944 
9945 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
9946 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
9947 	}
9948 
9949 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
9950 }
9951 
9952 int
iwm_fill_paging_mem(struct iwm_softc * sc,const struct iwm_fw_sects * image)9953 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9954 {
9955 	int sec_idx, idx;
9956 	uint32_t offset = 0;
9957 
9958 	/*
9959 	 * find where is the paging image start point:
9960 	 * if CPU2 exist and it's in paging format, then the image looks like:
9961 	 * CPU1 sections (2 or more)
9962 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
9963 	 * CPU2 sections (not paged)
9964 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
9965 	 * non paged to CPU2 paging sec
9966 	 * CPU2 paging CSS
9967 	 * CPU2 paging image (including instruction and data)
9968 	 */
9969 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
9970 		if (image->fw_sect[sec_idx].fws_devoff ==
9971 		    IWM_PAGING_SEPARATOR_SECTION) {
9972 			sec_idx++;
9973 			break;
9974 		}
9975 	}
9976 
9977 	/*
9978 	 * If paging is enabled there should be at least 2 more sections left
9979 	 * (one for CSS and one for Paging data)
9980 	 */
9981 	if (sec_idx >= nitems(image->fw_sect) - 1) {
9982 		printf("%s: Paging: Missing CSS and/or paging sections\n",
9983 		    DEVNAME(sc));
9984 		iwm_free_fw_paging(sc);
9985 		return EINVAL;
9986 	}
9987 
9988 	/* copy the CSS block to the dram */
9989 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
9990 	    DEVNAME(sc), sec_idx));
9991 
9992 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
9993 	    image->fw_sect[sec_idx].fws_data,
9994 	    sc->fw_paging_db[0].fw_paging_size);
9995 
9996 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
9997 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
9998 
9999 	sec_idx++;
10000 
10001 	/*
10002 	 * copy the paging blocks to the dram
10003 	 * loop index start from 1 since that CSS block already copied to dram
10004 	 * and CSS index is 0.
10005 	 * loop stop at num_of_paging_blk since that last block is not full.
10006 	 */
10007 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
10008 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
10009 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
10010 		    sc->fw_paging_db[idx].fw_paging_size);
10011 
10012 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
10013 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
10014 
10015 		offset += sc->fw_paging_db[idx].fw_paging_size;
10016 	}
10017 
10018 	/* copy the last paging block */
10019 	if (sc->num_of_pages_in_last_blk > 0) {
10020 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
10021 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
10022 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
10023 
10024 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
10025 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
10026 	}
10027 
10028 	return 0;
10029 }
10030 
10031 int
iwm_alloc_fw_paging_mem(struct iwm_softc * sc,const struct iwm_fw_sects * image)10032 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
10033 {
10034 	int blk_idx = 0;
10035 	int error, num_of_pages;
10036 
10037 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
10038 		int i;
10039 		/* Device got reset, and we setup firmware paging again */
10040 		bus_dmamap_sync(sc->sc_dmat,
10041 		    sc->fw_paging_db[0].fw_paging_block.map,
10042 		    0, IWM_FW_PAGING_SIZE,
10043 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10044 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
10045 			bus_dmamap_sync(sc->sc_dmat,
10046 			    sc->fw_paging_db[i].fw_paging_block.map,
10047 			    0, IWM_PAGING_BLOCK_SIZE,
10048 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10049 		}
10050 		return 0;
10051 	}
10052 
10053 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
10054 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
10055 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
10056 #endif
10057 
10058 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
10059 	sc->num_of_paging_blk =
10060 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
10061 
10062 	sc->num_of_pages_in_last_blk =
10063 		num_of_pages -
10064 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
10065 
10066 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
10067 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
10068 	    sc->num_of_paging_blk,
10069 	    sc->num_of_pages_in_last_blk));
10070 
10071 	/* allocate block of 4Kbytes for paging CSS */
10072 	error = iwm_dma_contig_alloc(sc->sc_dmat,
10073 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
10074 	    4096);
10075 	if (error) {
10076 		/* free all the previous pages since we failed */
10077 		iwm_free_fw_paging(sc);
10078 		return ENOMEM;
10079 	}
10080 
10081 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
10082 
10083 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
10084 	    DEVNAME(sc)));
10085 
10086 	/*
10087 	 * allocate blocks in dram.
10088 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
10089 	 */
10090 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10091 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
10092 		/* XXX Use iwm_dma_contig_alloc for allocating */
10093 		error = iwm_dma_contig_alloc(sc->sc_dmat,
10094 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
10095 		    IWM_PAGING_BLOCK_SIZE, 4096);
10096 		if (error) {
10097 			/* free all the previous pages since we failed */
10098 			iwm_free_fw_paging(sc);
10099 			return ENOMEM;
10100 		}
10101 
10102 		sc->fw_paging_db[blk_idx].fw_paging_size =
10103 		    IWM_PAGING_BLOCK_SIZE;
10104 
10105 		DPRINTF((
10106 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
10107 		    DEVNAME(sc)));
10108 	}
10109 
10110 	return 0;
10111 }
10112 
10113 int
iwm_save_fw_paging(struct iwm_softc * sc,const struct iwm_fw_sects * fw)10114 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10115 {
10116 	int ret;
10117 
10118 	ret = iwm_alloc_fw_paging_mem(sc, fw);
10119 	if (ret)
10120 		return ret;
10121 
10122 	return iwm_fill_paging_mem(sc, fw);
10123 }
10124 
10125 /* send paging cmd to FW in case CPU2 has paging image */
10126 int
iwm_send_paging_cmd(struct iwm_softc * sc,const struct iwm_fw_sects * fw)10127 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10128 {
10129 	int blk_idx;
10130 	uint32_t dev_phy_addr;
10131 	struct iwm_fw_paging_cmd fw_paging_cmd = {
10132 		.flags =
10133 			htole32(IWM_PAGING_CMD_IS_SECURED |
10134 				IWM_PAGING_CMD_IS_ENABLED |
10135 				(sc->num_of_pages_in_last_blk <<
10136 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
10137 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
10138 		.block_num = htole32(sc->num_of_paging_blk),
10139 	};
10140 
10141 	/* loop for all paging blocks + CSS block */
10142 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10143 		dev_phy_addr = htole32(
10144 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
10145 		    IWM_PAGE_2_EXP_SIZE);
10146 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
10147 		bus_dmamap_sync(sc->sc_dmat,
10148 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
10149 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
10150 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
10151 	}
10152 
10153 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
10154 					       IWM_LONG_GROUP, 0),
10155 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
10156 }
10157 
10158 int
iwm_init_hw(struct iwm_softc * sc)10159 iwm_init_hw(struct iwm_softc *sc)
10160 {
10161 	struct ieee80211com *ic = &sc->sc_ic;
10162 	int err, i, ac, qid, s;
10163 
10164 	err = iwm_run_init_mvm_ucode(sc, 0);
10165 	if (err)
10166 		return err;
10167 
10168 	/* Should stop and start HW since INIT image just loaded. */
10169 	iwm_stop_device(sc);
10170 	err = iwm_start_hw(sc);
10171 	if (err) {
10172 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10173 		return err;
10174 	}
10175 
10176 	/* Restart, this time with the regular firmware */
10177 	s = splnet();
10178 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
10179 	if (err) {
10180 		printf("%s: could not load firmware\n", DEVNAME(sc));
10181 		splx(s);
10182 		return err;
10183 	}
10184 
10185 	if (!iwm_nic_lock(sc)) {
10186 		splx(s);
10187 		return EBUSY;
10188 	}
10189 
10190 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
10191 	if (err) {
10192 		printf("%s: could not init tx ant config (error %d)\n",
10193 		    DEVNAME(sc), err);
10194 		goto err;
10195 	}
10196 
10197 	err = iwm_send_phy_db_data(sc);
10198 	if (err) {
10199 		printf("%s: could not init phy db (error %d)\n",
10200 		    DEVNAME(sc), err);
10201 		goto err;
10202 	}
10203 
10204 	err = iwm_send_phy_cfg_cmd(sc);
10205 	if (err) {
10206 		printf("%s: could not send phy config (error %d)\n",
10207 		    DEVNAME(sc), err);
10208 		goto err;
10209 	}
10210 
10211 	err = iwm_send_bt_init_conf(sc);
10212 	if (err) {
10213 		printf("%s: could not init bt coex (error %d)\n",
10214 		    DEVNAME(sc), err);
10215 		goto err;
10216 	}
10217 
10218 	if (isset(sc->sc_enabled_capa,
10219 	    IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
10220 		err = iwm_send_soc_conf(sc);
10221 		if (err)
10222 			goto err;
10223 	}
10224 
10225 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
10226 		err = iwm_send_dqa_cmd(sc);
10227 		if (err)
10228 			goto err;
10229 	}
10230 
10231 	/* Add auxiliary station for scanning */
10232 	err = iwm_add_aux_sta(sc);
10233 	if (err) {
10234 		printf("%s: could not add aux station (error %d)\n",
10235 		    DEVNAME(sc), err);
10236 		goto err;
10237 	}
10238 
10239 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
10240 		/*
10241 		 * The channel used here isn't relevant as it's
10242 		 * going to be overwritten in the other flows.
10243 		 * For now use the first channel we have.
10244 		 */
10245 		sc->sc_phyctxt[i].id = i;
10246 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
10247 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
10248 		    IWM_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
10249 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
10250 		if (err) {
10251 			printf("%s: could not add phy context %d (error %d)\n",
10252 			    DEVNAME(sc), i, err);
10253 			goto err;
10254 		}
10255 	}
10256 
10257 	/* Initialize tx backoffs to the minimum. */
10258 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
10259 		iwm_tt_tx_backoff(sc, 0);
10260 
10261 
10262 	err = iwm_config_ltr(sc);
10263 	if (err) {
10264 		printf("%s: PCIe LTR configuration failed (error %d)\n",
10265 		    DEVNAME(sc), err);
10266 	}
10267 
10268 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
10269 		err = iwm_send_temp_report_ths_cmd(sc);
10270 		if (err)
10271 			goto err;
10272 	}
10273 
10274 	err = iwm_power_update_device(sc);
10275 	if (err) {
10276 		printf("%s: could not send power command (error %d)\n",
10277 		    DEVNAME(sc), err);
10278 		goto err;
10279 	}
10280 
10281 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
10282 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
10283 		if (err) {
10284 			printf("%s: could not init LAR (error %d)\n",
10285 			    DEVNAME(sc), err);
10286 			goto err;
10287 		}
10288 	}
10289 
10290 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
10291 		err = iwm_config_umac_scan(sc);
10292 		if (err) {
10293 			printf("%s: could not configure scan (error %d)\n",
10294 			    DEVNAME(sc), err);
10295 			goto err;
10296 		}
10297 	}
10298 
10299 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10300 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10301 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
10302 		else
10303 			qid = IWM_AUX_QUEUE;
10304 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
10305 		    iwm_ac_to_tx_fifo[EDCA_AC_BE], 0, IWM_MAX_TID_COUNT, 0);
10306 		if (err) {
10307 			printf("%s: could not enable monitor inject Tx queue "
10308 			    "(error %d)\n", DEVNAME(sc), err);
10309 			goto err;
10310 		}
10311 	} else {
10312 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
10313 			if (isset(sc->sc_enabled_capa,
10314 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10315 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
10316 			else
10317 				qid = ac;
10318 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
10319 			    iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS, 0);
10320 			if (err) {
10321 				printf("%s: could not enable Tx queue %d "
10322 				    "(error %d)\n", DEVNAME(sc), ac, err);
10323 				goto err;
10324 			}
10325 		}
10326 	}
10327 
10328 	err = iwm_disable_beacon_filter(sc);
10329 	if (err) {
10330 		printf("%s: could not disable beacon filter (error %d)\n",
10331 		    DEVNAME(sc), err);
10332 		goto err;
10333 	}
10334 
10335 err:
10336 	iwm_nic_unlock(sc);
10337 	splx(s);
10338 	return err;
10339 }
10340 
10341 /* Allow multicast from our BSSID. */
10342 int
iwm_allow_mcast(struct iwm_softc * sc)10343 iwm_allow_mcast(struct iwm_softc *sc)
10344 {
10345 	struct ieee80211com *ic = &sc->sc_ic;
10346 	struct iwm_node *in = (void *)ic->ic_bss;
10347 	struct iwm_mcast_filter_cmd *cmd;
10348 	size_t size;
10349 	int err;
10350 
10351 	size = roundup(sizeof(*cmd), 4);
10352 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
10353 	if (cmd == NULL)
10354 		return ENOMEM;
10355 	cmd->filter_own = 1;
10356 	cmd->port_id = 0;
10357 	cmd->count = 0;
10358 	cmd->pass_all = 1;
10359 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
10360 
10361 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
10362 	    0, size, cmd);
10363 	free(cmd, M_DEVBUF, size);
10364 	return err;
10365 }
10366 
10367 int
iwm_init(struct ifnet * ifp)10368 iwm_init(struct ifnet *ifp)
10369 {
10370 	struct iwm_softc *sc = ifp->if_softc;
10371 	struct ieee80211com *ic = &sc->sc_ic;
10372 	int err, generation;
10373 
10374 	rw_assert_wrlock(&sc->ioctl_rwl);
10375 
10376 	generation = ++sc->sc_generation;
10377 
10378 	err = iwm_preinit(sc);
10379 	if (err)
10380 		return err;
10381 
10382 	err = iwm_start_hw(sc);
10383 	if (err) {
10384 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10385 		return err;
10386 	}
10387 
10388 	err = iwm_init_hw(sc);
10389 	if (err) {
10390 		if (generation == sc->sc_generation)
10391 			iwm_stop_device(sc);
10392 		return err;
10393 	}
10394 
10395 	if (sc->sc_nvm.sku_cap_11n_enable)
10396 		iwm_setup_ht_rates(sc);
10397 	if (sc->sc_nvm.sku_cap_11ac_enable)
10398 		iwm_setup_vht_rates(sc);
10399 
10400 	KASSERT(sc->task_refs.r_refs == 0);
10401 	refcnt_init(&sc->task_refs);
10402 	ifq_clr_oactive(&ifp->if_snd);
10403 	ifp->if_flags |= IFF_RUNNING;
10404 
10405 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10406 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
10407 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
10408 		return 0;
10409 	}
10410 
10411 	ieee80211_begin_scan(ifp);
10412 
10413 	/*
10414 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
10415 	 * Wait until the transition to SCAN state has completed.
10416 	 */
10417 	do {
10418 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
10419 		    SEC_TO_NSEC(1));
10420 		if (generation != sc->sc_generation)
10421 			return ENXIO;
10422 		if (err) {
10423 			iwm_stop(ifp);
10424 			return err;
10425 		}
10426 	} while (ic->ic_state != IEEE80211_S_SCAN);
10427 
10428 	return 0;
10429 }
10430 
10431 void
iwm_start(struct ifnet * ifp)10432 iwm_start(struct ifnet *ifp)
10433 {
10434 	struct iwm_softc *sc = ifp->if_softc;
10435 	struct ieee80211com *ic = &sc->sc_ic;
10436 	struct ieee80211_node *ni;
10437 	struct ether_header *eh;
10438 	struct mbuf *m;
10439 	int ac = EDCA_AC_BE; /* XXX */
10440 
10441 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
10442 		return;
10443 
10444 	for (;;) {
10445 		/* why isn't this done per-queue? */
10446 		if (sc->qfullmsk != 0) {
10447 			ifq_set_oactive(&ifp->if_snd);
10448 			break;
10449 		}
10450 
10451 		/* Don't queue additional frames while flushing Tx queues. */
10452 		if (sc->sc_flags & IWM_FLAG_TXFLUSH)
10453 			break;
10454 
10455 		/* need to send management frames even if we're not RUNning */
10456 		m = mq_dequeue(&ic->ic_mgtq);
10457 		if (m) {
10458 			ni = m->m_pkthdr.ph_cookie;
10459 			goto sendit;
10460 		}
10461 
10462 		if (ic->ic_state != IEEE80211_S_RUN ||
10463 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
10464 			break;
10465 
10466 		m = ifq_dequeue(&ifp->if_snd);
10467 		if (!m)
10468 			break;
10469 		if (m->m_len < sizeof (*eh) &&
10470 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
10471 			ifp->if_oerrors++;
10472 			continue;
10473 		}
10474 #if NBPFILTER > 0
10475 		if (ifp->if_bpf != NULL)
10476 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
10477 #endif
10478 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
10479 			ifp->if_oerrors++;
10480 			continue;
10481 		}
10482 
10483  sendit:
10484 #if NBPFILTER > 0
10485 		if (ic->ic_rawbpf != NULL)
10486 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
10487 #endif
10488 		if (iwm_tx(sc, m, ni, ac) != 0) {
10489 			ieee80211_release_node(ic, ni);
10490 			ifp->if_oerrors++;
10491 			continue;
10492 		}
10493 
10494 		if (ifp->if_flags & IFF_UP)
10495 			ifp->if_timer = 1;
10496 	}
10497 
10498 	return;
10499 }
10500 
10501 void
iwm_stop(struct ifnet * ifp)10502 iwm_stop(struct ifnet *ifp)
10503 {
10504 	struct iwm_softc *sc = ifp->if_softc;
10505 	struct ieee80211com *ic = &sc->sc_ic;
10506 	struct iwm_node *in = (void *)ic->ic_bss;
10507 	int i, s = splnet();
10508 
10509 	rw_assert_wrlock(&sc->ioctl_rwl);
10510 
10511 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
10512 
10513 	/* Cancel scheduled tasks and let any stale tasks finish up. */
10514 	task_del(systq, &sc->init_task);
10515 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
10516 	iwm_del_task(sc, systq, &sc->ba_task);
10517 	iwm_del_task(sc, systq, &sc->mac_ctxt_task);
10518 	iwm_del_task(sc, systq, &sc->phy_ctxt_task);
10519 	iwm_del_task(sc, systq, &sc->bgscan_done_task);
10520 	KASSERT(sc->task_refs.r_refs >= 1);
10521 	refcnt_finalize(&sc->task_refs, "iwmstop");
10522 
10523 	iwm_stop_device(sc);
10524 
10525 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
10526 	sc->bgscan_unref_arg = NULL;
10527 	sc->bgscan_unref_arg_size = 0;
10528 
10529 	/* Reset soft state. */
10530 
10531 	sc->sc_generation++;
10532 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
10533 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
10534 		sc->sc_cmd_resp_pkt[i] = NULL;
10535 		sc->sc_cmd_resp_len[i] = 0;
10536 	}
10537 	ifp->if_flags &= ~IFF_RUNNING;
10538 	ifq_clr_oactive(&ifp->if_snd);
10539 
10540 	in->in_phyctxt = NULL;
10541 	in->tid_disable_ampdu = 0xffff;
10542 	in->tfd_queue_msk = 0;
10543 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
10544 
10545 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
10546 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
10547 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
10548 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
10549 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
10550 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
10551 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
10552 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
10553 
10554 	sc->sc_rx_ba_sessions = 0;
10555 	sc->ba_rx.start_tidmask = 0;
10556 	sc->ba_rx.stop_tidmask = 0;
10557 	sc->tx_ba_queue_mask = 0;
10558 	sc->ba_tx.start_tidmask = 0;
10559 	sc->ba_tx.stop_tidmask = 0;
10560 
10561 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
10562 	sc->ns_nstate = IEEE80211_S_INIT;
10563 
10564 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
10565 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10566 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10567 		iwm_clear_reorder_buffer(sc, rxba);
10568 	}
10569 	iwm_led_blink_stop(sc);
10570 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
10571 	ifp->if_timer = 0;
10572 
10573 	splx(s);
10574 }
10575 
10576 void
iwm_watchdog(struct ifnet * ifp)10577 iwm_watchdog(struct ifnet *ifp)
10578 {
10579 	struct iwm_softc *sc = ifp->if_softc;
10580 	int i;
10581 
10582 	ifp->if_timer = 0;
10583 
10584 	/*
10585 	 * We maintain a separate timer for each Tx queue because
10586 	 * Tx aggregation queues can get "stuck" while other queues
10587 	 * keep working. The Linux driver uses a similar workaround.
10588 	 */
10589 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
10590 		if (sc->sc_tx_timer[i] > 0) {
10591 			if (--sc->sc_tx_timer[i] == 0) {
10592 				printf("%s: device timeout\n", DEVNAME(sc));
10593 				if (ifp->if_flags & IFF_DEBUG) {
10594 					iwm_nic_error(sc);
10595 					iwm_dump_driver_status(sc);
10596 				}
10597 				if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10598 					task_add(systq, &sc->init_task);
10599 				ifp->if_oerrors++;
10600 				return;
10601 			}
10602 			ifp->if_timer = 1;
10603 		}
10604 	}
10605 
10606 	ieee80211_watchdog(ifp);
10607 }
10608 
10609 int
iwm_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)10610 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
10611 {
10612 	struct iwm_softc *sc = ifp->if_softc;
10613 	int s, err = 0, generation = sc->sc_generation;
10614 
10615 	/*
10616 	 * Prevent processes from entering this function while another
10617 	 * process is tsleep'ing in it.
10618 	 */
10619 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
10620 	if (err == 0 && generation != sc->sc_generation) {
10621 		rw_exit(&sc->ioctl_rwl);
10622 		return ENXIO;
10623 	}
10624 	if (err)
10625 		return err;
10626 	s = splnet();
10627 
10628 	switch (cmd) {
10629 	case SIOCSIFADDR:
10630 		ifp->if_flags |= IFF_UP;
10631 		/* FALLTHROUGH */
10632 	case SIOCSIFFLAGS:
10633 		if (ifp->if_flags & IFF_UP) {
10634 			if (!(ifp->if_flags & IFF_RUNNING)) {
10635 				/* Force reload of firmware image from disk. */
10636 				sc->sc_fw.fw_status = IWM_FW_STATUS_NONE;
10637 				err = iwm_init(ifp);
10638 			}
10639 		} else {
10640 			if (ifp->if_flags & IFF_RUNNING)
10641 				iwm_stop(ifp);
10642 		}
10643 		break;
10644 
10645 	default:
10646 		err = ieee80211_ioctl(ifp, cmd, data);
10647 	}
10648 
10649 	if (err == ENETRESET) {
10650 		err = 0;
10651 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
10652 		    (IFF_UP | IFF_RUNNING)) {
10653 			iwm_stop(ifp);
10654 			err = iwm_init(ifp);
10655 		}
10656 	}
10657 
10658 	splx(s);
10659 	rw_exit(&sc->ioctl_rwl);
10660 
10661 	return err;
10662 }
10663 
10664 /*
10665  * Note: This structure is read from the device with IO accesses,
10666  * and the reading already does the endian conversion. As it is
10667  * read with uint32_t-sized accesses, any members with a different size
10668  * need to be ordered correctly though!
10669  */
10670 struct iwm_error_event_table {
10671 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10672 	uint32_t error_id;		/* type of error */
10673 	uint32_t trm_hw_status0;	/* TRM HW status */
10674 	uint32_t trm_hw_status1;	/* TRM HW status */
10675 	uint32_t blink2;		/* branch link */
10676 	uint32_t ilink1;		/* interrupt link */
10677 	uint32_t ilink2;		/* interrupt link */
10678 	uint32_t data1;		/* error-specific data */
10679 	uint32_t data2;		/* error-specific data */
10680 	uint32_t data3;		/* error-specific data */
10681 	uint32_t bcon_time;		/* beacon timer */
10682 	uint32_t tsf_low;		/* network timestamp function timer */
10683 	uint32_t tsf_hi;		/* network timestamp function timer */
10684 	uint32_t gp1;		/* GP1 timer register */
10685 	uint32_t gp2;		/* GP2 timer register */
10686 	uint32_t fw_rev_type;	/* firmware revision type */
10687 	uint32_t major;		/* uCode version major */
10688 	uint32_t minor;		/* uCode version minor */
10689 	uint32_t hw_ver;		/* HW Silicon version */
10690 	uint32_t brd_ver;		/* HW board version */
10691 	uint32_t log_pc;		/* log program counter */
10692 	uint32_t frame_ptr;		/* frame pointer */
10693 	uint32_t stack_ptr;		/* stack pointer */
10694 	uint32_t hcmd;		/* last host command header */
10695 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
10696 				 * rxtx_flag */
10697 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
10698 				 * host_flag */
10699 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
10700 				 * enc_flag */
10701 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
10702 				 * time_flag */
10703 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
10704 				 * wico interrupt */
10705 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
10706 	uint32_t wait_event;		/* wait event() caller address */
10707 	uint32_t l2p_control;	/* L2pControlField */
10708 	uint32_t l2p_duration;	/* L2pDurationField */
10709 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
10710 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
10711 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
10712 				 * (LMPM_PMG_SEL) */
10713 	uint32_t u_timestamp;	/* indicate when the date and time of the
10714 				 * compilation */
10715 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
10716 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
10717 
10718 /*
10719  * UMAC error struct - relevant starting from family 8000 chip.
10720  * Note: This structure is read from the device with IO accesses,
10721  * and the reading already does the endian conversion. As it is
10722  * read with u32-sized accesses, any members with a different size
10723  * need to be ordered correctly though!
10724  */
10725 struct iwm_umac_error_event_table {
10726 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10727 	uint32_t error_id;	/* type of error */
10728 	uint32_t blink1;	/* branch link */
10729 	uint32_t blink2;	/* branch link */
10730 	uint32_t ilink1;	/* interrupt link */
10731 	uint32_t ilink2;	/* interrupt link */
10732 	uint32_t data1;		/* error-specific data */
10733 	uint32_t data2;		/* error-specific data */
10734 	uint32_t data3;		/* error-specific data */
10735 	uint32_t umac_major;
10736 	uint32_t umac_minor;
10737 	uint32_t frame_pointer;	/* core register 27*/
10738 	uint32_t stack_pointer;	/* core register 28 */
10739 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
10740 	uint32_t nic_isr_pref;	/* ISR status register */
10741 } __packed;
10742 
10743 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
10744 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
10745 
10746 void
iwm_nic_umac_error(struct iwm_softc * sc)10747 iwm_nic_umac_error(struct iwm_softc *sc)
10748 {
10749 	struct iwm_umac_error_event_table table;
10750 	uint32_t base;
10751 
10752 	base = sc->sc_uc.uc_umac_error_event_table;
10753 
10754 	if (base < 0x800000) {
10755 		printf("%s: Invalid error log pointer 0x%08x\n",
10756 		    DEVNAME(sc), base);
10757 		return;
10758 	}
10759 
10760 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10761 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10762 		return;
10763 	}
10764 
10765 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10766 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
10767 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10768 			sc->sc_flags, table.valid);
10769 	}
10770 
10771 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
10772 		iwm_desc_lookup(table.error_id));
10773 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
10774 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
10775 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
10776 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
10777 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
10778 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
10779 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
10780 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
10781 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
10782 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
10783 	    table.frame_pointer);
10784 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
10785 	    table.stack_pointer);
10786 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
10787 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
10788 	    table.nic_isr_pref);
10789 }
10790 
10791 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
10792 static struct {
10793 	const char *name;
10794 	uint8_t num;
10795 } advanced_lookup[] = {
10796 	{ "NMI_INTERRUPT_WDG", 0x34 },
10797 	{ "SYSASSERT", 0x35 },
10798 	{ "UCODE_VERSION_MISMATCH", 0x37 },
10799 	{ "BAD_COMMAND", 0x38 },
10800 	{ "BAD_COMMAND", 0x39 },
10801 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
10802 	{ "FATAL_ERROR", 0x3D },
10803 	{ "NMI_TRM_HW_ERR", 0x46 },
10804 	{ "NMI_INTERRUPT_TRM", 0x4C },
10805 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
10806 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
10807 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
10808 	{ "NMI_INTERRUPT_HOST", 0x66 },
10809 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
10810 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
10811 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
10812 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
10813 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
10814 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
10815 	{ "ADVANCED_SYSASSERT", 0 },
10816 };
10817 
10818 const char *
iwm_desc_lookup(uint32_t num)10819 iwm_desc_lookup(uint32_t num)
10820 {
10821 	int i;
10822 
10823 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
10824 		if (advanced_lookup[i].num ==
10825 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
10826 			return advanced_lookup[i].name;
10827 
10828 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
10829 	return advanced_lookup[i].name;
10830 }
10831 
10832 /*
10833  * Support for dumping the error log seemed like a good idea ...
10834  * but it's mostly hex junk and the only sensible thing is the
10835  * hw/ucode revision (which we know anyway).  Since it's here,
10836  * I'll just leave it in, just in case e.g. the Intel guys want to
10837  * help us decipher some "ADVANCED_SYSASSERT" later.
10838  */
10839 void
iwm_nic_error(struct iwm_softc * sc)10840 iwm_nic_error(struct iwm_softc *sc)
10841 {
10842 	struct iwm_error_event_table table;
10843 	uint32_t base;
10844 
10845 	printf("%s: dumping device error log\n", DEVNAME(sc));
10846 	base = sc->sc_uc.uc_error_event_table;
10847 	if (base < 0x800000) {
10848 		printf("%s: Invalid error log pointer 0x%08x\n",
10849 		    DEVNAME(sc), base);
10850 		return;
10851 	}
10852 
10853 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10854 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10855 		return;
10856 	}
10857 
10858 	if (!table.valid) {
10859 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
10860 		return;
10861 	}
10862 
10863 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10864 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
10865 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10866 		    sc->sc_flags, table.valid);
10867 	}
10868 
10869 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
10870 	    iwm_desc_lookup(table.error_id));
10871 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
10872 	    table.trm_hw_status0);
10873 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
10874 	    table.trm_hw_status1);
10875 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
10876 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
10877 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
10878 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
10879 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
10880 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
10881 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
10882 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
10883 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
10884 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
10885 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
10886 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
10887 	    table.fw_rev_type);
10888 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
10889 	    table.major);
10890 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
10891 	    table.minor);
10892 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
10893 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
10894 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
10895 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
10896 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
10897 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
10898 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
10899 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
10900 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
10901 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
10902 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
10903 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
10904 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
10905 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
10906 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
10907 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
10908 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
10909 
10910 	if (sc->sc_uc.uc_umac_error_event_table)
10911 		iwm_nic_umac_error(sc);
10912 }
10913 
10914 void
iwm_dump_driver_status(struct iwm_softc * sc)10915 iwm_dump_driver_status(struct iwm_softc *sc)
10916 {
10917 	int i;
10918 
10919 	printf("driver status:\n");
10920 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
10921 		struct iwm_tx_ring *ring = &sc->txq[i];
10922 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
10923 		    "queued=%-3d\n",
10924 		    i, ring->qid, ring->cur, ring->queued);
10925 	}
10926 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
10927 	printf("  802.11 state %s\n",
10928 	    ieee80211_state_name[sc->sc_ic.ic_state]);
10929 }
10930 
10931 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
10932 do {									\
10933 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10934 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
10935 	_var_ = (void *)((_pkt_)+1);					\
10936 } while (/*CONSTCOND*/0)
10937 
10938 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
10939 do {									\
10940 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10941 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
10942 	_ptr_ = (void *)((_pkt_)+1);					\
10943 } while (/*CONSTCOND*/0)
10944 
10945 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
10946 
10947 int
iwm_rx_pkt_valid(struct iwm_rx_packet * pkt)10948 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
10949 {
10950 	int qid, idx, code;
10951 
10952 	qid = pkt->hdr.qid & ~0x80;
10953 	idx = pkt->hdr.idx;
10954 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10955 
10956 	return (!(qid == 0 && idx == 0 && code == 0) &&
10957 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
10958 }
10959 
10960 void
iwm_rx_pkt(struct iwm_softc * sc,struct iwm_rx_data * data,struct mbuf_list * ml)10961 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
10962 {
10963 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
10964 	struct iwm_rx_packet *pkt, *nextpkt;
10965 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
10966 	struct mbuf *m0, *m;
10967 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
10968 	int qid, idx, code, handled = 1;
10969 
10970 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
10971 	    BUS_DMASYNC_POSTREAD);
10972 
10973 	m0 = data->m;
10974 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
10975 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
10976 		qid = pkt->hdr.qid;
10977 		idx = pkt->hdr.idx;
10978 
10979 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10980 
10981 		if (!iwm_rx_pkt_valid(pkt))
10982 			break;
10983 
10984 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
10985 		if (len < minsz || len > (IWM_RBUF_SIZE - offset))
10986 			break;
10987 
10988 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
10989 			/* Take mbuf m0 off the RX ring. */
10990 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
10991 				ifp->if_ierrors++;
10992 				break;
10993 			}
10994 			KASSERT(data->m != m0);
10995 		}
10996 
10997 		switch (code) {
10998 		case IWM_REPLY_RX_PHY_CMD:
10999 			iwm_rx_rx_phy_cmd(sc, pkt, data);
11000 			break;
11001 
11002 		case IWM_REPLY_RX_MPDU_CMD: {
11003 			size_t maxlen = IWM_RBUF_SIZE - offset - minsz;
11004 			nextoff = offset +
11005 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
11006 			nextpkt = (struct iwm_rx_packet *)
11007 			    (m0->m_data + nextoff);
11008 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
11009 			    !iwm_rx_pkt_valid(nextpkt)) {
11010 				/* No need to copy last frame in buffer. */
11011 				if (offset > 0)
11012 					m_adj(m0, offset);
11013 				if (sc->sc_mqrx_supported)
11014 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
11015 					    maxlen, ml);
11016 				else
11017 					iwm_rx_mpdu(sc, m0, pkt->data,
11018 					    maxlen, ml);
11019 				m0 = NULL; /* stack owns m0 now; abort loop */
11020 			} else {
11021 				/*
11022 				 * Create an mbuf which points to the current
11023 				 * packet. Always copy from offset zero to
11024 				 * preserve m_pkthdr.
11025 				 */
11026 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
11027 				if (m == NULL) {
11028 					ifp->if_ierrors++;
11029 					m_freem(m0);
11030 					m0 = NULL;
11031 					break;
11032 				}
11033 				m_adj(m, offset);
11034 				if (sc->sc_mqrx_supported)
11035 					iwm_rx_mpdu_mq(sc, m, pkt->data,
11036 					    maxlen, ml);
11037 				else
11038 					iwm_rx_mpdu(sc, m, pkt->data,
11039 					    maxlen, ml);
11040 			}
11041  			break;
11042 		}
11043 
11044 		case IWM_TX_CMD:
11045 			iwm_rx_tx_cmd(sc, pkt, data);
11046 			break;
11047 
11048 		case IWM_BA_NOTIF:
11049 			iwm_rx_compressed_ba(sc, pkt);
11050 			break;
11051 
11052 		case IWM_MISSED_BEACONS_NOTIFICATION:
11053 			iwm_rx_bmiss(sc, pkt, data);
11054 			break;
11055 
11056 		case IWM_MFUART_LOAD_NOTIFICATION:
11057 			break;
11058 
11059 		case IWM_ALIVE: {
11060 			struct iwm_alive_resp_v1 *resp1;
11061 			struct iwm_alive_resp_v2 *resp2;
11062 			struct iwm_alive_resp_v3 *resp3;
11063 
11064 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
11065 				SYNC_RESP_STRUCT(resp1, pkt);
11066 				sc->sc_uc.uc_error_event_table
11067 				    = le32toh(resp1->error_event_table_ptr);
11068 				sc->sc_uc.uc_log_event_table
11069 				    = le32toh(resp1->log_event_table_ptr);
11070 				sc->sched_base = le32toh(resp1->scd_base_ptr);
11071 				if (resp1->status == IWM_ALIVE_STATUS_OK)
11072 					sc->sc_uc.uc_ok = 1;
11073 				else
11074 					sc->sc_uc.uc_ok = 0;
11075 			}
11076 
11077 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
11078 				SYNC_RESP_STRUCT(resp2, pkt);
11079 				sc->sc_uc.uc_error_event_table
11080 				    = le32toh(resp2->error_event_table_ptr);
11081 				sc->sc_uc.uc_log_event_table
11082 				    = le32toh(resp2->log_event_table_ptr);
11083 				sc->sched_base = le32toh(resp2->scd_base_ptr);
11084 				sc->sc_uc.uc_umac_error_event_table
11085 				    = le32toh(resp2->error_info_addr);
11086 				if (resp2->status == IWM_ALIVE_STATUS_OK)
11087 					sc->sc_uc.uc_ok = 1;
11088 				else
11089 					sc->sc_uc.uc_ok = 0;
11090 			}
11091 
11092 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
11093 				SYNC_RESP_STRUCT(resp3, pkt);
11094 				sc->sc_uc.uc_error_event_table
11095 				    = le32toh(resp3->error_event_table_ptr);
11096 				sc->sc_uc.uc_log_event_table
11097 				    = le32toh(resp3->log_event_table_ptr);
11098 				sc->sched_base = le32toh(resp3->scd_base_ptr);
11099 				sc->sc_uc.uc_umac_error_event_table
11100 				    = le32toh(resp3->error_info_addr);
11101 				if (resp3->status == IWM_ALIVE_STATUS_OK)
11102 					sc->sc_uc.uc_ok = 1;
11103 				else
11104 					sc->sc_uc.uc_ok = 0;
11105 			}
11106 
11107 			sc->sc_uc.uc_intr = 1;
11108 			wakeup(&sc->sc_uc);
11109 			break;
11110 		}
11111 
11112 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
11113 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
11114 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
11115 			iwm_phy_db_set_section(sc, phy_db_notif);
11116 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
11117 			wakeup(&sc->sc_init_complete);
11118 			break;
11119 		}
11120 
11121 		case IWM_STATISTICS_NOTIFICATION: {
11122 			struct iwm_notif_statistics *stats;
11123 			SYNC_RESP_STRUCT(stats, pkt);
11124 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
11125 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
11126 			break;
11127 		}
11128 
11129 		case IWM_MCC_CHUB_UPDATE_CMD: {
11130 			struct iwm_mcc_chub_notif *notif;
11131 			SYNC_RESP_STRUCT(notif, pkt);
11132 			iwm_mcc_update(sc, notif);
11133 			break;
11134 		}
11135 
11136 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
11137 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11138 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
11139 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11140 				 IWM_TEMP_REPORTING_THRESHOLDS_CMD):
11141 			break;
11142 
11143 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11144 		    IWM_CT_KILL_NOTIFICATION): {
11145 			struct iwm_ct_kill_notif *notif;
11146 			SYNC_RESP_STRUCT(notif, pkt);
11147 			printf("%s: device at critical temperature (%u degC), "
11148 			    "stopping device\n",
11149 			    DEVNAME(sc), le16toh(notif->temperature));
11150 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11151 			task_add(systq, &sc->init_task);
11152 			break;
11153 		}
11154 
11155 		case IWM_ADD_STA_KEY:
11156 		case IWM_PHY_CONFIGURATION_CMD:
11157 		case IWM_TX_ANT_CONFIGURATION_CMD:
11158 		case IWM_ADD_STA:
11159 		case IWM_MAC_CONTEXT_CMD:
11160 		case IWM_REPLY_SF_CFG_CMD:
11161 		case IWM_POWER_TABLE_CMD:
11162 		case IWM_LTR_CONFIG:
11163 		case IWM_PHY_CONTEXT_CMD:
11164 		case IWM_BINDING_CONTEXT_CMD:
11165 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
11166 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
11167 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
11168 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
11169 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
11170 		case IWM_REPLY_BEACON_FILTERING_CMD:
11171 		case IWM_MAC_PM_POWER_TABLE:
11172 		case IWM_TIME_QUOTA_CMD:
11173 		case IWM_REMOVE_STA:
11174 		case IWM_TXPATH_FLUSH:
11175 		case IWM_LQ_CMD:
11176 		case IWM_WIDE_ID(IWM_LONG_GROUP,
11177 				 IWM_FW_PAGING_BLOCK_CMD):
11178 		case IWM_BT_CONFIG:
11179 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
11180 		case IWM_NVM_ACCESS_CMD:
11181 		case IWM_MCC_UPDATE_CMD:
11182 		case IWM_TIME_EVENT_CMD: {
11183 			size_t pkt_len;
11184 
11185 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
11186 				break;
11187 
11188 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
11189 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11190 
11191 			pkt_len = sizeof(pkt->len_n_flags) +
11192 			    iwm_rx_packet_len(pkt);
11193 
11194 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
11195 			    pkt_len < sizeof(*pkt) ||
11196 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
11197 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
11198 				    sc->sc_cmd_resp_len[idx]);
11199 				sc->sc_cmd_resp_pkt[idx] = NULL;
11200 				break;
11201 			}
11202 
11203 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
11204 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11205 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
11206 			break;
11207 		}
11208 
11209 		/* ignore */
11210 		case IWM_PHY_DB_CMD:
11211 			break;
11212 
11213 		case IWM_INIT_COMPLETE_NOTIF:
11214 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
11215 			wakeup(&sc->sc_init_complete);
11216 			break;
11217 
11218 		case IWM_SCAN_OFFLOAD_COMPLETE: {
11219 			struct iwm_periodic_scan_complete *notif;
11220 			SYNC_RESP_STRUCT(notif, pkt);
11221 			break;
11222 		}
11223 
11224 		case IWM_SCAN_ITERATION_COMPLETE: {
11225 			struct iwm_lmac_scan_complete_notif *notif;
11226 			SYNC_RESP_STRUCT(notif, pkt);
11227 			iwm_endscan(sc);
11228 			break;
11229 		}
11230 
11231 		case IWM_SCAN_COMPLETE_UMAC: {
11232 			struct iwm_umac_scan_complete *notif;
11233 			SYNC_RESP_STRUCT(notif, pkt);
11234 			iwm_endscan(sc);
11235 			break;
11236 		}
11237 
11238 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
11239 			struct iwm_umac_scan_iter_complete_notif *notif;
11240 			SYNC_RESP_STRUCT(notif, pkt);
11241 			iwm_endscan(sc);
11242 			break;
11243 		}
11244 
11245 		case IWM_REPLY_ERROR: {
11246 			struct iwm_error_resp *resp;
11247 			SYNC_RESP_STRUCT(resp, pkt);
11248 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
11249 				DEVNAME(sc), le32toh(resp->error_type),
11250 				resp->cmd_id);
11251 			break;
11252 		}
11253 
11254 		case IWM_TIME_EVENT_NOTIFICATION: {
11255 			struct iwm_time_event_notif *notif;
11256 			uint32_t action;
11257 			SYNC_RESP_STRUCT(notif, pkt);
11258 
11259 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
11260 				break;
11261 			action = le32toh(notif->action);
11262 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
11263 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
11264 			break;
11265 		}
11266 
11267 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
11268 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
11269 		    break;
11270 
11271 		/*
11272 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
11273 		 * messages. Just ignore them for now.
11274 		 */
11275 		case IWM_DEBUG_LOG_MSG:
11276 			break;
11277 
11278 		case IWM_MCAST_FILTER_CMD:
11279 			break;
11280 
11281 		case IWM_SCD_QUEUE_CFG: {
11282 			struct iwm_scd_txq_cfg_rsp *rsp;
11283 			SYNC_RESP_STRUCT(rsp, pkt);
11284 
11285 			break;
11286 		}
11287 
11288 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
11289 			break;
11290 
11291 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD):
11292 			break;
11293 
11294 		default:
11295 			handled = 0;
11296 			printf("%s: unhandled firmware response 0x%x/0x%x "
11297 			    "rx ring %d[%d]\n",
11298 			    DEVNAME(sc), code, pkt->len_n_flags,
11299 			    (qid & ~0x80), idx);
11300 			break;
11301 		}
11302 
11303 		/*
11304 		 * uCode sets bit 0x80 when it originates the notification,
11305 		 * i.e. when the notification is not a direct response to a
11306 		 * command sent by the driver.
11307 		 * For example, uCode issues IWM_REPLY_RX when it sends a
11308 		 * received frame to the driver.
11309 		 */
11310 		if (handled && !(qid & (1 << 7))) {
11311 			iwm_cmd_done(sc, qid, idx, code);
11312 		}
11313 
11314 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
11315 	}
11316 
11317 	if (m0 && m0 != data->m)
11318 		m_freem(m0);
11319 }
11320 
11321 void
iwm_notif_intr(struct iwm_softc * sc)11322 iwm_notif_intr(struct iwm_softc *sc)
11323 {
11324 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
11325 	uint32_t wreg;
11326 	uint16_t hw;
11327 	int count;
11328 
11329 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
11330 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
11331 
11332 	if (sc->sc_mqrx_supported) {
11333 		count = IWM_RX_MQ_RING_COUNT;
11334 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
11335 	} else {
11336 		count = IWM_RX_RING_COUNT;
11337 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
11338 	}
11339 
11340 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
11341 	hw &= (count - 1);
11342 	while (sc->rxq.cur != hw) {
11343 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
11344 		iwm_rx_pkt(sc, data, &ml);
11345 		ADVANCE_RXQ(sc);
11346 	}
11347 	if_input(&sc->sc_ic.ic_if, &ml);
11348 
11349 	/*
11350 	 * Tell the firmware what we have processed.
11351 	 * Seems like the hardware gets upset unless we align the write by 8??
11352 	 */
11353 	hw = (hw == 0) ? count - 1 : hw - 1;
11354 	IWM_WRITE(sc, wreg, hw & ~7);
11355 }
11356 
11357 int
iwm_intr(void * arg)11358 iwm_intr(void *arg)
11359 {
11360 	struct iwm_softc *sc = arg;
11361 	struct ieee80211com *ic = &sc->sc_ic;
11362 	struct ifnet *ifp = IC2IFP(ic);
11363 	int handled = 0;
11364 	int rv = 0;
11365 	uint32_t r1, r2;
11366 
11367 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
11368 
11369 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
11370 		uint32_t *ict = sc->ict_dma.vaddr;
11371 		int tmp;
11372 
11373 		tmp = htole32(ict[sc->ict_cur]);
11374 		if (!tmp)
11375 			goto out_ena;
11376 
11377 		/*
11378 		 * ok, there was something.  keep plowing until we have all.
11379 		 */
11380 		r1 = r2 = 0;
11381 		while (tmp) {
11382 			r1 |= tmp;
11383 			ict[sc->ict_cur] = 0;
11384 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
11385 			tmp = htole32(ict[sc->ict_cur]);
11386 		}
11387 
11388 		/* this is where the fun begins.  don't ask */
11389 		if (r1 == 0xffffffff)
11390 			r1 = 0;
11391 
11392 		/*
11393 		 * Workaround for hardware bug where bits are falsely cleared
11394 		 * when using interrupt coalescing.  Bit 15 should be set if
11395 		 * bits 18 and 19 are set.
11396 		 */
11397 		if (r1 & 0xc0000)
11398 			r1 |= 0x8000;
11399 
11400 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
11401 	} else {
11402 		r1 = IWM_READ(sc, IWM_CSR_INT);
11403 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
11404 	}
11405 	if (r1 == 0 && r2 == 0) {
11406 		goto out_ena;
11407 	}
11408 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
11409 		goto out;
11410 
11411 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
11412 
11413 	/* ignored */
11414 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
11415 
11416 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
11417 		handled |= IWM_CSR_INT_BIT_RF_KILL;
11418 		iwm_check_rfkill(sc);
11419 		task_add(systq, &sc->init_task);
11420 		rv = 1;
11421 		goto out_ena;
11422 	}
11423 
11424 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
11425 		if (ifp->if_flags & IFF_DEBUG) {
11426 			iwm_nic_error(sc);
11427 			iwm_dump_driver_status(sc);
11428 		}
11429 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11430 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11431 			task_add(systq, &sc->init_task);
11432 		rv = 1;
11433 		goto out;
11434 
11435 	}
11436 
11437 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
11438 		handled |= IWM_CSR_INT_BIT_HW_ERR;
11439 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11440 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11441 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11442 			task_add(systq, &sc->init_task);
11443 		}
11444 		rv = 1;
11445 		goto out;
11446 	}
11447 
11448 	/* firmware chunk loaded */
11449 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
11450 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
11451 		handled |= IWM_CSR_INT_BIT_FH_TX;
11452 
11453 		sc->sc_fw_chunk_done = 1;
11454 		wakeup(&sc->sc_fw);
11455 	}
11456 
11457 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
11458 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
11459 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
11460 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
11461 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
11462 		}
11463 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
11464 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
11465 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
11466 		}
11467 
11468 		/* Disable periodic interrupt; we use it as just a one-shot. */
11469 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
11470 
11471 		/*
11472 		 * Enable periodic interrupt in 8 msec only if we received
11473 		 * real RX interrupt (instead of just periodic int), to catch
11474 		 * any dangling Rx interrupt.  If it was just the periodic
11475 		 * interrupt, there was no dangling Rx activity, and no need
11476 		 * to extend the periodic interrupt; one-shot is enough.
11477 		 */
11478 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
11479 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
11480 			    IWM_CSR_INT_PERIODIC_ENA);
11481 
11482 		iwm_notif_intr(sc);
11483 	}
11484 
11485 	rv = 1;
11486 
11487  out_ena:
11488 	iwm_restore_interrupts(sc);
11489  out:
11490 	return rv;
11491 }
11492 
11493 int
iwm_intr_msix(void * arg)11494 iwm_intr_msix(void *arg)
11495 {
11496 	struct iwm_softc *sc = arg;
11497 	struct ieee80211com *ic = &sc->sc_ic;
11498 	struct ifnet *ifp = IC2IFP(ic);
11499 	uint32_t inta_fh, inta_hw;
11500 	int vector = 0;
11501 
11502 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
11503 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
11504 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
11505 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
11506 	inta_fh &= sc->sc_fh_mask;
11507 	inta_hw &= sc->sc_hw_mask;
11508 
11509 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
11510 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
11511 		iwm_notif_intr(sc);
11512 	}
11513 
11514 	/* firmware chunk loaded */
11515 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
11516 		sc->sc_fw_chunk_done = 1;
11517 		wakeup(&sc->sc_fw);
11518 	}
11519 
11520 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
11521 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
11522 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
11523 		if (ifp->if_flags & IFF_DEBUG) {
11524 			iwm_nic_error(sc);
11525 			iwm_dump_driver_status(sc);
11526 		}
11527 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11528 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11529 			task_add(systq, &sc->init_task);
11530 		return 1;
11531 	}
11532 
11533 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
11534 		iwm_check_rfkill(sc);
11535 		task_add(systq, &sc->init_task);
11536 	}
11537 
11538 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
11539 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11540 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11541 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11542 			task_add(systq, &sc->init_task);
11543 		}
11544 		return 1;
11545 	}
11546 
11547 	/*
11548 	 * Before sending the interrupt the HW disables it to prevent
11549 	 * a nested interrupt. This is done by writing 1 to the corresponding
11550 	 * bit in the mask register. After handling the interrupt, it should be
11551 	 * re-enabled by clearing this bit. This register is defined as
11552 	 * write 1 clear (W1C) register, meaning that it's being clear
11553 	 * by writing 1 to the bit.
11554 	 */
11555 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
11556 	return 1;
11557 }
11558 
11559 typedef void *iwm_match_t;
11560 
11561 static const struct pci_matchid iwm_devices[] = {
11562 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
11563 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
11564 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
11565 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
11566 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
11567 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
11568 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
11569 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
11570 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
11571 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
11572 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
11573 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
11574 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
11575 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
11576 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
11577 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_3 },
11578 };
11579 
11580 int
iwm_match(struct device * parent,iwm_match_t match __unused,void * aux)11581 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
11582 {
11583 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
11584 	    nitems(iwm_devices));
11585 }
11586 
11587 int
iwm_preinit(struct iwm_softc * sc)11588 iwm_preinit(struct iwm_softc *sc)
11589 {
11590 	struct ieee80211com *ic = &sc->sc_ic;
11591 	struct ifnet *ifp = IC2IFP(ic);
11592 	int err;
11593 
11594 	err = iwm_prepare_card_hw(sc);
11595 	if (err) {
11596 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11597 		return err;
11598 	}
11599 
11600 	if (sc->attached) {
11601 		/* Update MAC in case the upper layers changed it. */
11602 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
11603 		    ((struct arpcom *)ifp)->ac_enaddr);
11604 		return 0;
11605 	}
11606 
11607 	err = iwm_start_hw(sc);
11608 	if (err) {
11609 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11610 		return err;
11611 	}
11612 
11613 	err = iwm_run_init_mvm_ucode(sc, 1);
11614 	iwm_stop_device(sc);
11615 	if (err)
11616 		return err;
11617 
11618 	/* Print version info and MAC address on first successful fw load. */
11619 	sc->attached = 1;
11620 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
11621 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
11622 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11623 
11624 	if (sc->sc_nvm.sku_cap_11n_enable)
11625 		iwm_setup_ht_rates(sc);
11626 
11627 	/* not all hardware can do 5GHz band */
11628 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11629 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
11630 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
11631 
11632 	/* Configure channel information obtained from firmware. */
11633 	ieee80211_channel_init(ifp);
11634 
11635 	/* Configure MAC address. */
11636 	err = if_setlladdr(ifp, ic->ic_myaddr);
11637 	if (err)
11638 		printf("%s: could not set MAC address (error %d)\n",
11639 		    DEVNAME(sc), err);
11640 
11641 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11642 
11643 	return 0;
11644 }
11645 
11646 void
iwm_attach_hook(struct device * self)11647 iwm_attach_hook(struct device *self)
11648 {
11649 	struct iwm_softc *sc = (void *)self;
11650 
11651 	KASSERT(!cold);
11652 
11653 	iwm_preinit(sc);
11654 }
11655 
11656 void
iwm_attach(struct device * parent,struct device * self,void * aux)11657 iwm_attach(struct device *parent, struct device *self, void *aux)
11658 {
11659 	struct iwm_softc *sc = (void *)self;
11660 	struct pci_attach_args *pa = aux;
11661 	pci_intr_handle_t ih;
11662 	pcireg_t reg, memtype;
11663 	struct ieee80211com *ic = &sc->sc_ic;
11664 	struct ifnet *ifp = &ic->ic_if;
11665 	const char *intrstr;
11666 	int err;
11667 	int txq_i, i, j;
11668 
11669 	sc->sc_pct = pa->pa_pc;
11670 	sc->sc_pcitag = pa->pa_tag;
11671 	sc->sc_dmat = pa->pa_dmat;
11672 
11673 	rw_init(&sc->ioctl_rwl, "iwmioctl");
11674 
11675 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11676 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
11677 	if (err == 0) {
11678 		printf("%s: PCIe capability structure not found!\n",
11679 		    DEVNAME(sc));
11680 		return;
11681 	}
11682 
11683 	/*
11684 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11685 	 * PCI Tx retries from interfering with C3 CPU state.
11686 	 */
11687 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11688 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11689 
11690 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
11691 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
11692 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
11693 	if (err) {
11694 		printf("%s: can't map mem space\n", DEVNAME(sc));
11695 		return;
11696 	}
11697 
11698 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11699 		sc->sc_msix = 1;
11700 	} else if (pci_intr_map_msi(pa, &ih)) {
11701 		if (pci_intr_map(pa, &ih)) {
11702 			printf("%s: can't map interrupt\n", DEVNAME(sc));
11703 			return;
11704 		}
11705 		/* Hardware bug workaround. */
11706 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11707 		    PCI_COMMAND_STATUS_REG);
11708 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11709 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11710 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11711 		    PCI_COMMAND_STATUS_REG, reg);
11712 	}
11713 
11714 	intrstr = pci_intr_string(sc->sc_pct, ih);
11715 	if (sc->sc_msix)
11716 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11717 		    iwm_intr_msix, sc, DEVNAME(sc));
11718 	else
11719 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11720 		    iwm_intr, sc, DEVNAME(sc));
11721 
11722 	if (sc->sc_ih == NULL) {
11723 		printf("\n");
11724 		printf("%s: can't establish interrupt", DEVNAME(sc));
11725 		if (intrstr != NULL)
11726 			printf(" at %s", intrstr);
11727 		printf("\n");
11728 		return;
11729 	}
11730 	printf(", %s\n", intrstr);
11731 
11732 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
11733 	switch (PCI_PRODUCT(pa->pa_id)) {
11734 	case PCI_PRODUCT_INTEL_WL_3160_1:
11735 	case PCI_PRODUCT_INTEL_WL_3160_2:
11736 		sc->sc_fwname = "iwm-3160-17";
11737 		sc->host_interrupt_operation_mode = 1;
11738 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11739 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11740 		sc->sc_nvm_max_section_size = 16384;
11741 		sc->nvm_type = IWM_NVM;
11742 		break;
11743 	case PCI_PRODUCT_INTEL_WL_3165_1:
11744 	case PCI_PRODUCT_INTEL_WL_3165_2:
11745 		sc->sc_fwname = "iwm-7265D-29";
11746 		sc->host_interrupt_operation_mode = 0;
11747 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11748 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11749 		sc->sc_nvm_max_section_size = 16384;
11750 		sc->nvm_type = IWM_NVM;
11751 		break;
11752 	case PCI_PRODUCT_INTEL_WL_3168_1:
11753 		sc->sc_fwname = "iwm-3168-29";
11754 		sc->host_interrupt_operation_mode = 0;
11755 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11756 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11757 		sc->sc_nvm_max_section_size = 16384;
11758 		sc->nvm_type = IWM_NVM_SDP;
11759 		break;
11760 	case PCI_PRODUCT_INTEL_WL_7260_1:
11761 	case PCI_PRODUCT_INTEL_WL_7260_2:
11762 		sc->sc_fwname = "iwm-7260-17";
11763 		sc->host_interrupt_operation_mode = 1;
11764 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11765 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11766 		sc->sc_nvm_max_section_size = 16384;
11767 		sc->nvm_type = IWM_NVM;
11768 		break;
11769 	case PCI_PRODUCT_INTEL_WL_7265_1:
11770 	case PCI_PRODUCT_INTEL_WL_7265_2:
11771 		sc->sc_fwname = "iwm-7265-17";
11772 		sc->host_interrupt_operation_mode = 0;
11773 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11774 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11775 		sc->sc_nvm_max_section_size = 16384;
11776 		sc->nvm_type = IWM_NVM;
11777 		break;
11778 	case PCI_PRODUCT_INTEL_WL_8260_1:
11779 	case PCI_PRODUCT_INTEL_WL_8260_2:
11780 		sc->sc_fwname = "iwm-8000C-36";
11781 		sc->host_interrupt_operation_mode = 0;
11782 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11783 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11784 		sc->sc_nvm_max_section_size = 32768;
11785 		sc->nvm_type = IWM_NVM_EXT;
11786 		break;
11787 	case PCI_PRODUCT_INTEL_WL_8265_1:
11788 		sc->sc_fwname = "iwm-8265-36";
11789 		sc->host_interrupt_operation_mode = 0;
11790 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11791 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11792 		sc->sc_nvm_max_section_size = 32768;
11793 		sc->nvm_type = IWM_NVM_EXT;
11794 		break;
11795 	case PCI_PRODUCT_INTEL_WL_9260_1:
11796 		sc->sc_fwname = "iwm-9260-46";
11797 		sc->host_interrupt_operation_mode = 0;
11798 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11799 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11800 		sc->sc_nvm_max_section_size = 32768;
11801 		sc->sc_mqrx_supported = 1;
11802 		break;
11803 	case PCI_PRODUCT_INTEL_WL_9560_1:
11804 	case PCI_PRODUCT_INTEL_WL_9560_2:
11805 	case PCI_PRODUCT_INTEL_WL_9560_3:
11806 		sc->sc_fwname = "iwm-9000-46";
11807 		sc->host_interrupt_operation_mode = 0;
11808 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11809 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11810 		sc->sc_nvm_max_section_size = 32768;
11811 		sc->sc_mqrx_supported = 1;
11812 		sc->sc_integrated = 1;
11813 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_WL_9560_3) {
11814 			sc->sc_xtal_latency = 670;
11815 			sc->sc_extra_phy_config = IWM_FW_PHY_CFG_SHARED_CLK;
11816 		} else
11817 			sc->sc_xtal_latency = 650;
11818 		break;
11819 	default:
11820 		printf("%s: unknown adapter type\n", DEVNAME(sc));
11821 		return;
11822 	}
11823 
11824 	/*
11825 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11826 	 * changed, and now the revision step also includes bit 0-1 (no more
11827 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11828 	 * in the old format.
11829 	 */
11830 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
11831 		uint32_t hw_step;
11832 
11833 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11834 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11835 
11836 		if (iwm_prepare_card_hw(sc) != 0) {
11837 			printf("%s: could not initialize hardware\n",
11838 			    DEVNAME(sc));
11839 			return;
11840 		}
11841 
11842 		/*
11843 		 * In order to recognize C step the driver should read the
11844 		 * chip version id located at the AUX bus MISC address.
11845 		 */
11846 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
11847 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
11848 		DELAY(2);
11849 
11850 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
11851 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11852 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11853 				   25000);
11854 		if (!err) {
11855 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
11856 			return;
11857 		}
11858 
11859 		if (iwm_nic_lock(sc)) {
11860 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
11861 			hw_step |= IWM_ENABLE_WFPM;
11862 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
11863 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
11864 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
11865 			if (hw_step == 0x3)
11866 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
11867 						(IWM_SILICON_C_STEP << 2);
11868 			iwm_nic_unlock(sc);
11869 		} else {
11870 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
11871 			return;
11872 		}
11873 	}
11874 
11875 	/*
11876 	 * Allocate DMA memory for firmware transfers.
11877 	 * Must be aligned on a 16-byte boundary.
11878 	 */
11879 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
11880 	    sc->sc_fwdmasegsz, 16);
11881 	if (err) {
11882 		printf("%s: could not allocate memory for firmware\n",
11883 		    DEVNAME(sc));
11884 		return;
11885 	}
11886 
11887 	/* Allocate "Keep Warm" page, used internally by the card. */
11888 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
11889 	if (err) {
11890 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
11891 		goto fail1;
11892 	}
11893 
11894 	/* Allocate interrupt cause table (ICT).*/
11895 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11896 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
11897 	if (err) {
11898 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11899 		goto fail2;
11900 	}
11901 
11902 	/* TX scheduler rings must be aligned on a 1KB boundary. */
11903 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
11904 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
11905 	if (err) {
11906 		printf("%s: could not allocate TX scheduler rings\n",
11907 		    DEVNAME(sc));
11908 		goto fail3;
11909 	}
11910 
11911 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
11912 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11913 		if (err) {
11914 			printf("%s: could not allocate TX ring %d\n",
11915 			    DEVNAME(sc), txq_i);
11916 			goto fail4;
11917 		}
11918 	}
11919 
11920 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
11921 	if (err) {
11922 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
11923 		goto fail4;
11924 	}
11925 
11926 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
11927 	if (sc->sc_nswq == NULL)
11928 		goto fail4;
11929 
11930 	/* Clear pending interrupts. */
11931 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
11932 
11933 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
11934 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
11935 	ic->ic_state = IEEE80211_S_INIT;
11936 
11937 	/* Set device capabilities. */
11938 	ic->ic_caps =
11939 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
11940 	    IEEE80211_C_WEP |		/* WEP */
11941 	    IEEE80211_C_RSN |		/* WPA/RSN */
11942 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
11943 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
11944 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
11945 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
11946 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
11947 
11948 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
11949 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
11950 	ic->ic_htcaps |=
11951 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
11952 	ic->ic_htxcaps = 0;
11953 	ic->ic_txbfcaps = 0;
11954 	ic->ic_aselcaps = 0;
11955 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
11956 
11957 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
11958 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
11959 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
11960 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
11961 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
11962 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
11963 
11964 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11965 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11966 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11967 
11968 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
11969 		sc->sc_phyctxt[i].id = i;
11970 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
11971 		sc->sc_phyctxt[i].vht_chan_width =
11972 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
11973 	}
11974 
11975 	sc->sc_amrr.amrr_min_success_threshold =  1;
11976 	sc->sc_amrr.amrr_max_success_threshold = 15;
11977 
11978 	/* IBSS channel undefined for now. */
11979 	ic->ic_ibss_chan = &ic->ic_channels[1];
11980 
11981 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
11982 
11983 	ifp->if_softc = sc;
11984 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
11985 	ifp->if_ioctl = iwm_ioctl;
11986 	ifp->if_start = iwm_start;
11987 	ifp->if_watchdog = iwm_watchdog;
11988 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
11989 
11990 	if_attach(ifp);
11991 	ieee80211_ifattach(ifp);
11992 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11993 
11994 #if NBPFILTER > 0
11995 	iwm_radiotap_attach(sc);
11996 #endif
11997 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
11998 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
11999 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
12000 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
12001 		rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
12002 		rxba->sc = sc;
12003 		timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
12004 		    rxba);
12005 		timeout_set(&rxba->reorder_buf.reorder_timer,
12006 		    iwm_reorder_timer_expired, &rxba->reorder_buf);
12007 		for (j = 0; j < nitems(rxba->entries); j++)
12008 			ml_init(&rxba->entries[j].frames);
12009 	}
12010 	task_set(&sc->init_task, iwm_init_task, sc);
12011 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
12012 	task_set(&sc->ba_task, iwm_ba_task, sc);
12013 	task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
12014 	task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
12015 	task_set(&sc->bgscan_done_task, iwm_bgscan_done_task, sc);
12016 
12017 	ic->ic_node_alloc = iwm_node_alloc;
12018 	ic->ic_bgscan_start = iwm_bgscan;
12019 	ic->ic_bgscan_done = iwm_bgscan_done;
12020 	ic->ic_set_key = iwm_set_key;
12021 	ic->ic_delete_key = iwm_delete_key;
12022 
12023 	/* Override 802.11 state transition machine. */
12024 	sc->sc_newstate = ic->ic_newstate;
12025 	ic->ic_newstate = iwm_newstate;
12026 	ic->ic_updateprot = iwm_updateprot;
12027 	ic->ic_updateslot = iwm_updateslot;
12028 	ic->ic_updateedca = iwm_updateedca;
12029 	ic->ic_updatechan = iwm_updatechan;
12030 	ic->ic_updatedtim = iwm_updatedtim;
12031 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
12032 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
12033 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
12034 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
12035 	/*
12036 	 * We cannot read the MAC address without loading the
12037 	 * firmware from disk. Postpone until mountroot is done.
12038 	 */
12039 	config_mountroot(self, iwm_attach_hook);
12040 
12041 	return;
12042 
12043 fail4:	while (--txq_i >= 0)
12044 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
12045 	iwm_free_rx_ring(sc, &sc->rxq);
12046 	iwm_dma_contig_free(&sc->sched_dma);
12047 fail3:	if (sc->ict_dma.vaddr != NULL)
12048 		iwm_dma_contig_free(&sc->ict_dma);
12049 
12050 fail2:	iwm_dma_contig_free(&sc->kw_dma);
12051 fail1:	iwm_dma_contig_free(&sc->fw_dma);
12052 	return;
12053 }
12054 
12055 #if NBPFILTER > 0
12056 void
iwm_radiotap_attach(struct iwm_softc * sc)12057 iwm_radiotap_attach(struct iwm_softc *sc)
12058 {
12059 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
12060 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
12061 
12062 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
12063 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
12064 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
12065 
12066 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
12067 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
12068 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
12069 }
12070 #endif
12071 
12072 void
iwm_init_task(void * arg1)12073 iwm_init_task(void *arg1)
12074 {
12075 	struct iwm_softc *sc = arg1;
12076 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12077 	int s = splnet();
12078 	int generation = sc->sc_generation;
12079 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
12080 
12081 	rw_enter_write(&sc->ioctl_rwl);
12082 	if (generation != sc->sc_generation) {
12083 		rw_exit(&sc->ioctl_rwl);
12084 		splx(s);
12085 		return;
12086 	}
12087 
12088 	if (ifp->if_flags & IFF_RUNNING)
12089 		iwm_stop(ifp);
12090 	else
12091 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
12092 
12093 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
12094 		iwm_init(ifp);
12095 
12096 	rw_exit(&sc->ioctl_rwl);
12097 	splx(s);
12098 }
12099 
12100 void
iwm_resume(struct iwm_softc * sc)12101 iwm_resume(struct iwm_softc *sc)
12102 {
12103 	pcireg_t reg;
12104 
12105 	/*
12106 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
12107 	 * PCI Tx retries from interfering with C3 CPU state.
12108 	 */
12109 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
12110 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
12111 
12112 	if (!sc->sc_msix) {
12113 		/* Hardware bug workaround. */
12114 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
12115 		    PCI_COMMAND_STATUS_REG);
12116 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
12117 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
12118 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
12119 		    PCI_COMMAND_STATUS_REG, reg);
12120 	}
12121 
12122 	iwm_disable_interrupts(sc);
12123 }
12124 
12125 int
iwm_wakeup(struct iwm_softc * sc)12126 iwm_wakeup(struct iwm_softc *sc)
12127 {
12128 	struct ieee80211com *ic = &sc->sc_ic;
12129 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12130 	int err;
12131 
12132 	err = iwm_start_hw(sc);
12133 	if (err)
12134 		return err;
12135 
12136 	err = iwm_init_hw(sc);
12137 	if (err)
12138 		return err;
12139 
12140 	refcnt_init(&sc->task_refs);
12141 	ifq_clr_oactive(&ifp->if_snd);
12142 	ifp->if_flags |= IFF_RUNNING;
12143 
12144 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
12145 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
12146 	else
12147 		ieee80211_begin_scan(ifp);
12148 
12149 	return 0;
12150 }
12151 
12152 int
iwm_activate(struct device * self,int act)12153 iwm_activate(struct device *self, int act)
12154 {
12155 	struct iwm_softc *sc = (struct iwm_softc *)self;
12156 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12157 	int err = 0;
12158 
12159 	switch (act) {
12160 	case DVACT_QUIESCE:
12161 		if (ifp->if_flags & IFF_RUNNING) {
12162 			rw_enter_write(&sc->ioctl_rwl);
12163 			iwm_stop(ifp);
12164 			rw_exit(&sc->ioctl_rwl);
12165 		}
12166 		break;
12167 	case DVACT_RESUME:
12168 		iwm_resume(sc);
12169 		break;
12170 	case DVACT_WAKEUP:
12171 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
12172 			err = iwm_wakeup(sc);
12173 			if (err)
12174 				printf("%s: could not initialize hardware\n",
12175 				    DEVNAME(sc));
12176 		}
12177 		break;
12178 	}
12179 
12180 	return 0;
12181 }
12182 
12183 struct cfdriver iwm_cd = {
12184 	NULL, "iwm", DV_IFNET
12185 };
12186 
12187 const struct cfattach iwm_ca = {
12188 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
12189 	NULL, iwm_activate
12190 };
12191