xref: /openbsd/sys/dev/pci/if_iwx.c (revision 42a5529e)
1 /*	$OpenBSD: if_iwx.c,v 1.188 2024/11/08 09:12:46 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ******************************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019 Intel Corporation
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * BSD LICENSE
46  *
47  * Copyright(c) 2017 Intel Deutschland GmbH
48  * Copyright(c) 2018 - 2019 Intel Corporation
49  * All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  *
55  *  * Redistributions of source code must retain the above copyright
56  *    notice, this list of conditions and the following disclaimer.
57  *  * Redistributions in binary form must reproduce the above copyright
58  *    notice, this list of conditions and the following disclaimer in
59  *    the documentation and/or other materials provided with the
60  *    distribution.
61  *  * Neither the name Intel Corporation nor the names of its
62  *    contributors may be used to endorse or promote products derived
63  *    from this software without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76  *
77  *****************************************************************************
78  */
79 
80 /*-
81  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82  *
83  * Permission to use, copy, modify, and distribute this software for any
84  * purpose with or without fee is hereby granted, provided that the above
85  * copyright notice and this permission notice appear in all copies.
86  *
87  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94  */
95 
96 #include "bpfilter.h"
97 
98 #include <sys/param.h>
99 #include <sys/malloc.h>
100 #include <sys/mbuf.h>
101 #include <sys/rwlock.h>
102 #include <sys/socket.h>
103 #include <sys/sockio.h>
104 #include <sys/systm.h>
105 #include <sys/endian.h>
106 
107 #include <sys/refcnt.h>
108 #include <sys/task.h>
109 #include <machine/bus.h>
110 #include <machine/intr.h>
111 
112 #include <dev/pci/pcireg.h>
113 #include <dev/pci/pcivar.h>
114 #include <dev/pci/pcidevs.h>
115 
116 #if NBPFILTER > 0
117 #include <net/bpf.h>
118 #endif
119 #include <net/if.h>
120 #include <net/if_media.h>
121 
122 #include <netinet/in.h>
123 #include <netinet/if_ether.h>
124 
125 #include <net80211/ieee80211_var.h>
126 #include <net80211/ieee80211_radiotap.h>
127 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
128 #undef DPRINTF /* defined in ieee80211_priv.h */
129 
130 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
131 
132 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
133 
134 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
135 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
136 
137 #ifdef IWX_DEBUG
138 #define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
139 #define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
140 int iwx_debug = 1;
141 #else
142 #define DPRINTF(x)	do { ; } while (0)
143 #define DPRINTFN(n, x)	do { ; } while (0)
144 #endif
145 
146 #include <dev/pci/if_iwxreg.h>
147 #include <dev/pci/if_iwxvar.h>
148 
149 const uint8_t iwx_nvm_channels_8000[] = {
150 	/* 2.4 GHz */
151 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
152 	/* 5 GHz */
153 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
154 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
155 	149, 153, 157, 161, 165, 169, 173, 177, 181
156 };
157 
158 static const uint8_t iwx_nvm_channels_uhb[] = {
159 	/* 2.4 GHz */
160 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
161 	/* 5 GHz */
162 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
163 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
164 	149, 153, 157, 161, 165, 169, 173, 177, 181,
165 	/* 6-7 GHz */
166 	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
167 	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
168 	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
169 	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
170 };
171 
172 #define IWX_NUM_2GHZ_CHANNELS	14
173 #define IWX_NUM_5GHZ_CHANNELS	37
174 
175 const struct iwx_rate {
176 	uint16_t rate;
177 	uint8_t plcp;
178 	uint8_t ht_plcp;
179 } iwx_rates[] = {
180 		/* Legacy */		/* HT */
181 	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
182 	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
183 	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
184 	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
185 	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
186 	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
187 	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
188 	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
189 	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
190 	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
191 	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
192 	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
193 	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
194 	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
195 	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
196 	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
197 	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
198 	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
199 	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
200 	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
201 	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
202 };
203 #define IWX_RIDX_CCK	0
204 #define IWX_RIDX_OFDM	4
205 #define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
206 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
207 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
208 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
209 
210 /* Convert an MCS index into an iwx_rates[] index. */
211 const int iwx_mcs2ridx[] = {
212 	IWX_RATE_MCS_0_INDEX,
213 	IWX_RATE_MCS_1_INDEX,
214 	IWX_RATE_MCS_2_INDEX,
215 	IWX_RATE_MCS_3_INDEX,
216 	IWX_RATE_MCS_4_INDEX,
217 	IWX_RATE_MCS_5_INDEX,
218 	IWX_RATE_MCS_6_INDEX,
219 	IWX_RATE_MCS_7_INDEX,
220 	IWX_RATE_MCS_8_INDEX,
221 	IWX_RATE_MCS_9_INDEX,
222 	IWX_RATE_MCS_10_INDEX,
223 	IWX_RATE_MCS_11_INDEX,
224 	IWX_RATE_MCS_12_INDEX,
225 	IWX_RATE_MCS_13_INDEX,
226 	IWX_RATE_MCS_14_INDEX,
227 	IWX_RATE_MCS_15_INDEX,
228 };
229 
230 uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
231 uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
232 int	iwx_is_mimo_ht_plcp(uint8_t);
233 int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
234 int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
235 int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
236 int	iwx_apply_debug_destination(struct iwx_softc *);
237 void	iwx_set_ltr(struct iwx_softc *);
238 int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
239 int	iwx_ctxt_info_gen3_init(struct iwx_softc *,
240 	    const struct iwx_fw_sects *);
241 void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
242 void	iwx_ctxt_info_free_paging(struct iwx_softc *);
243 int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
244 	    struct iwx_context_info_dram *);
245 void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
246 int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
247 	    uint8_t *, size_t);
248 int	iwx_set_default_calib(struct iwx_softc *, const void *);
249 void	iwx_fw_info_free(struct iwx_fw_info *);
250 int	iwx_read_firmware(struct iwx_softc *);
251 uint32_t iwx_prph_addr_mask(struct iwx_softc *);
252 uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
253 uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
254 void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
255 void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
256 uint32_t iwx_read_umac_prph_unlocked(struct iwx_softc *, uint32_t);
257 uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
258 void	iwx_write_umac_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
259 void	iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
260 int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
261 int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
262 int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
263 int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
264 int	iwx_nic_lock(struct iwx_softc *);
265 void	iwx_nic_assert_locked(struct iwx_softc *);
266 void	iwx_nic_unlock(struct iwx_softc *);
267 int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
268 	    uint32_t);
269 int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
270 int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
271 int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
272 	    bus_size_t);
273 void	iwx_dma_contig_free(struct iwx_dma_info *);
274 int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
275 void	iwx_disable_rx_dma(struct iwx_softc *);
276 void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
277 void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
278 int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
279 void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
280 void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
281 void	iwx_enable_rfkill_int(struct iwx_softc *);
282 int	iwx_check_rfkill(struct iwx_softc *);
283 void	iwx_enable_interrupts(struct iwx_softc *);
284 void	iwx_enable_fwload_interrupt(struct iwx_softc *);
285 void	iwx_restore_interrupts(struct iwx_softc *);
286 void	iwx_disable_interrupts(struct iwx_softc *);
287 void	iwx_ict_reset(struct iwx_softc *);
288 int	iwx_set_hw_ready(struct iwx_softc *);
289 int	iwx_prepare_card_hw(struct iwx_softc *);
290 int	iwx_force_power_gating(struct iwx_softc *);
291 void	iwx_apm_config(struct iwx_softc *);
292 int	iwx_apm_init(struct iwx_softc *);
293 void	iwx_apm_stop(struct iwx_softc *);
294 int	iwx_allow_mcast(struct iwx_softc *);
295 void	iwx_init_msix_hw(struct iwx_softc *);
296 void	iwx_conf_msix_hw(struct iwx_softc *, int);
297 int	iwx_clear_persistence_bit(struct iwx_softc *);
298 int	iwx_start_hw(struct iwx_softc *);
299 void	iwx_stop_device(struct iwx_softc *);
300 void	iwx_nic_config(struct iwx_softc *);
301 int	iwx_nic_rx_init(struct iwx_softc *);
302 int	iwx_nic_init(struct iwx_softc *);
303 int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
304 int	iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
305 void	iwx_post_alive(struct iwx_softc *);
306 int	iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
307 	    uint32_t);
308 void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
309 void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
310 void	iwx_setup_ht_rates(struct iwx_softc *);
311 void	iwx_setup_vht_rates(struct iwx_softc *);
312 int	iwx_mimo_enabled(struct iwx_softc *);
313 void	iwx_mac_ctxt_task(void *);
314 void	iwx_phy_ctxt_task(void *);
315 void	iwx_updatechan(struct ieee80211com *);
316 void	iwx_updateprot(struct ieee80211com *);
317 void	iwx_updateslot(struct ieee80211com *);
318 void	iwx_updateedca(struct ieee80211com *);
319 void	iwx_updatedtim(struct ieee80211com *);
320 void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
321 	    uint16_t);
322 void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
323 int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
324 	    uint8_t);
325 void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
326 	    uint8_t);
327 int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
328 	    uint8_t);
329 void	iwx_rx_ba_session_expired(void *);
330 void	iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
331 	    struct mbuf_list *);
332 void	iwx_reorder_timer_expired(void *);
333 void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
334 	    uint16_t, uint16_t, int, int);
335 void	iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
336 	    uint8_t);
337 void	iwx_ba_task(void *);
338 
339 void	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
340 int	iwx_is_valid_mac_addr(const uint8_t *);
341 void	iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
342 int	iwx_nvm_get(struct iwx_softc *);
343 int	iwx_load_firmware(struct iwx_softc *);
344 int	iwx_start_fw(struct iwx_softc *);
345 int	iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
346 int	iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
347 void	iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
348 int	iwx_load_pnvm(struct iwx_softc *);
349 int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
350 int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
351 int	iwx_load_ucode_wait_alive(struct iwx_softc *);
352 int	iwx_send_dqa_cmd(struct iwx_softc *);
353 int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
354 int	iwx_config_ltr(struct iwx_softc *);
355 void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
356 int	iwx_rx_addbuf(struct iwx_softc *, int, int);
357 int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
358 void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
359 	    struct iwx_rx_data *);
360 int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
361 int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
362 	    struct ieee80211_rxinfo *);
363 int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
364 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
365 void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
366 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
367 void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
368 void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
369 void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
370 void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
371 	    struct iwx_rx_data *);
372 void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
373 void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
374 	    struct iwx_rx_data *);
375 int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
376 uint8_t	iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
377 int	iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
378 	    uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
379 int	iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
380 	    uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
381 int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
382 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
383 int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
384 int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
385 	    const void *);
386 int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
387 	    uint32_t *);
388 int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
389 	    const void *, uint32_t *);
390 void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
391 void	iwx_cmd_done(struct iwx_softc *, int, int, int);
392 uint32_t iwx_fw_rateidx_ofdm(uint8_t);
393 uint32_t iwx_fw_rateidx_cck(uint8_t);
394 const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
395 	    struct ieee80211_frame *, uint16_t *, uint32_t *);
396 void	iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
397 	    uint16_t, uint16_t);
398 int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
399 int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
400 int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
401 int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
402 int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
403 	    struct iwx_beacon_filter_cmd *);
404 int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
405 void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
406 	    struct iwx_mac_power_cmd *);
407 int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
408 int	iwx_power_update_device(struct iwx_softc *);
409 int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
410 int	iwx_disable_beacon_filter(struct iwx_softc *);
411 int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
412 int	iwx_mld_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
413 int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
414 int	iwx_mld_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
415 int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
416 int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
417 int	iwx_config_umac_scan_reduced(struct iwx_softc *);
418 uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
419 void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
420 	    struct iwx_scan_general_params_v10 *, int);
421 void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
422 	    struct iwx_scan_general_params_v10 *, uint16_t, int);
423 void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
424 	    struct iwx_scan_channel_params_v6 *, uint32_t, int);
425 int	iwx_umac_scan_v14(struct iwx_softc *, int);
426 void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
427 uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
428 int	iwx_rval2ridx(int);
429 void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
430 void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
431 	    struct iwx_mac_ctx_cmd *, uint32_t);
432 void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
433 	    struct iwx_mac_data_sta *, int);
434 int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
435 int	iwx_mld_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t,
436 	    int);
437 int	iwx_clear_statistics(struct iwx_softc *);
438 void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
439 void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
440 int	iwx_scan(struct iwx_softc *);
441 int	iwx_bgscan(struct ieee80211com *);
442 void	iwx_bgscan_done(struct ieee80211com *,
443 	    struct ieee80211_node_switch_bss_arg *, size_t);
444 void	iwx_bgscan_done_task(void *);
445 int	iwx_umac_scan_abort(struct iwx_softc *);
446 int	iwx_scan_abort(struct iwx_softc *);
447 int	iwx_enable_mgmt_queue(struct iwx_softc *);
448 int	iwx_disable_mgmt_queue(struct iwx_softc *);
449 int	iwx_rs_rval2idx(uint8_t);
450 uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
451 uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
452 int	iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
453 int	iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
454 int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
455 int	iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
456 	    uint8_t, uint8_t);
457 int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
458 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
459 	    uint8_t);
460 int	iwx_auth(struct iwx_softc *);
461 int	iwx_deauth(struct iwx_softc *);
462 int	iwx_run(struct iwx_softc *);
463 int	iwx_run_stop(struct iwx_softc *);
464 struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
465 int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
466 	    struct ieee80211_key *);
467 void	iwx_setkey_task(void *);
468 void	iwx_delete_key(struct ieee80211com *,
469 	    struct ieee80211_node *, struct ieee80211_key *);
470 int	iwx_media_change(struct ifnet *);
471 void	iwx_newstate_task(void *);
472 int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
473 void	iwx_endscan(struct iwx_softc *);
474 void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
475 	    struct ieee80211_node *);
476 int	iwx_sf_config(struct iwx_softc *, int);
477 int	iwx_send_bt_init_conf(struct iwx_softc *);
478 int	iwx_send_soc_conf(struct iwx_softc *);
479 int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
480 int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
481 int	iwx_init_hw(struct iwx_softc *);
482 int	iwx_init(struct ifnet *);
483 void	iwx_start(struct ifnet *);
484 void	iwx_stop(struct ifnet *);
485 void	iwx_watchdog(struct ifnet *);
486 int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
487 const char *iwx_desc_lookup(uint32_t);
488 void	iwx_nic_error(struct iwx_softc *);
489 void	iwx_dump_driver_status(struct iwx_softc *);
490 void	iwx_nic_umac_error(struct iwx_softc *);
491 int	iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
492 	    struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
493 int	iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
494 void	iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
495 	    struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
496 	    struct mbuf_list *);
497 int	iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
498 	    int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
499 int	iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
500 	    struct iwx_rx_mpdu_desc *, int, int, uint32_t,
501 	    struct ieee80211_rxinfo *, struct mbuf_list *);
502 void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
503 	    struct mbuf_list *);
504 int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
505 void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
506 	    struct mbuf_list *);
507 void	iwx_notif_intr(struct iwx_softc *);
508 int	iwx_intr(void *);
509 int	iwx_intr_msix(void *);
510 int	iwx_match(struct device *, void *, void *);
511 int	iwx_preinit(struct iwx_softc *);
512 void	iwx_attach_hook(struct device *);
513 const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
514 void	iwx_attach(struct device *, struct device *, void *);
515 void	iwx_init_task(void *);
516 int	iwx_activate(struct device *, int);
517 void	iwx_resume(struct iwx_softc *);
518 int	iwx_wakeup(struct iwx_softc *);
519 
520 #if NBPFILTER > 0
521 void	iwx_radiotap_attach(struct iwx_softc *);
522 #endif
523 
524 uint8_t
iwx_lookup_cmd_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)525 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
526 {
527 	const struct iwx_fw_cmd_version *entry;
528 	int i;
529 
530 	for (i = 0; i < sc->n_cmd_versions; i++) {
531 		entry = &sc->cmd_versions[i];
532 		if (entry->group == grp && entry->cmd == cmd)
533 			return entry->cmd_ver;
534 	}
535 
536 	return IWX_FW_CMD_VER_UNKNOWN;
537 }
538 
539 uint8_t
iwx_lookup_notif_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)540 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
541 {
542 	const struct iwx_fw_cmd_version *entry;
543 	int i;
544 
545 	for (i = 0; i < sc->n_cmd_versions; i++) {
546 		entry = &sc->cmd_versions[i];
547 		if (entry->group == grp && entry->cmd == cmd)
548 			return entry->notif_ver;
549 	}
550 
551 	return IWX_FW_CMD_VER_UNKNOWN;
552 }
553 
554 int
iwx_is_mimo_ht_plcp(uint8_t ht_plcp)555 iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
556 {
557 	switch (ht_plcp) {
558 	case IWX_RATE_HT_MIMO2_MCS_8_PLCP:
559 	case IWX_RATE_HT_MIMO2_MCS_9_PLCP:
560 	case IWX_RATE_HT_MIMO2_MCS_10_PLCP:
561 	case IWX_RATE_HT_MIMO2_MCS_11_PLCP:
562 	case IWX_RATE_HT_MIMO2_MCS_12_PLCP:
563 	case IWX_RATE_HT_MIMO2_MCS_13_PLCP:
564 	case IWX_RATE_HT_MIMO2_MCS_14_PLCP:
565 	case IWX_RATE_HT_MIMO2_MCS_15_PLCP:
566 		return 1;
567 	default:
568 		break;
569 	}
570 
571 	return 0;
572 }
573 
574 int
iwx_store_cscheme(struct iwx_softc * sc,uint8_t * data,size_t dlen)575 iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
576 {
577 	struct iwx_fw_cscheme_list *l = (void *)data;
578 
579 	if (dlen < sizeof(*l) ||
580 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
581 		return EINVAL;
582 
583 	/* we don't actually store anything for now, always use s/w crypto */
584 
585 	return 0;
586 }
587 
588 int
iwx_ctxt_info_alloc_dma(struct iwx_softc * sc,const struct iwx_fw_onesect * sec,struct iwx_dma_info * dram)589 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
590     const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
591 {
592 	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
593 	if (err) {
594 		printf("%s: could not allocate context info DMA memory\n",
595 		    DEVNAME(sc));
596 		return err;
597 	}
598 
599 	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
600 
601 	return 0;
602 }
603 
604 void
iwx_ctxt_info_free_paging(struct iwx_softc * sc)605 iwx_ctxt_info_free_paging(struct iwx_softc *sc)
606 {
607 	struct iwx_self_init_dram *dram = &sc->init_dram;
608 	int i;
609 
610 	if (!dram->paging)
611 		return;
612 
613 	/* free paging*/
614 	for (i = 0; i < dram->paging_cnt; i++)
615 		iwx_dma_contig_free(&dram->paging[i]);
616 
617 	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
618 	dram->paging_cnt = 0;
619 	dram->paging = NULL;
620 }
621 
622 int
iwx_get_num_sections(const struct iwx_fw_sects * fws,int start)623 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
624 {
625 	int i = 0;
626 
627 	while (start < fws->fw_count &&
628 	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
629 	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
630 		start++;
631 		i++;
632 	}
633 
634 	return i;
635 }
636 
637 int
iwx_init_fw_sec(struct iwx_softc * sc,const struct iwx_fw_sects * fws,struct iwx_context_info_dram * ctxt_dram)638 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
639     struct iwx_context_info_dram *ctxt_dram)
640 {
641 	struct iwx_self_init_dram *dram = &sc->init_dram;
642 	int i, ret, fw_cnt = 0;
643 
644 	KASSERT(dram->paging == NULL);
645 
646 	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
647 	/* add 1 due to separator */
648 	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
649 	/* add 2 due to separators */
650 	dram->paging_cnt = iwx_get_num_sections(fws,
651 	    dram->lmac_cnt + dram->umac_cnt + 2);
652 
653 	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
654 	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
655 	if (!dram->fw) {
656 		printf("%s: could not allocate memory for firmware sections\n",
657 		    DEVNAME(sc));
658 		return ENOMEM;
659 	}
660 
661 	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
662 	    M_DEVBUF, M_ZERO | M_NOWAIT);
663 	if (!dram->paging) {
664 		printf("%s: could not allocate memory for firmware paging\n",
665 		    DEVNAME(sc));
666 		return ENOMEM;
667 	}
668 
669 	/* initialize lmac sections */
670 	for (i = 0; i < dram->lmac_cnt; i++) {
671 		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
672 						   &dram->fw[fw_cnt]);
673 		if (ret)
674 			return ret;
675 		ctxt_dram->lmac_img[i] =
676 			htole64(dram->fw[fw_cnt].paddr);
677 		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
678 		    (unsigned long long)dram->fw[fw_cnt].paddr,
679 		    (unsigned long long)dram->fw[fw_cnt].size));
680 		fw_cnt++;
681 	}
682 
683 	/* initialize umac sections */
684 	for (i = 0; i < dram->umac_cnt; i++) {
685 		/* access FW with +1 to make up for lmac separator */
686 		ret = iwx_ctxt_info_alloc_dma(sc,
687 		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
688 		if (ret)
689 			return ret;
690 		ctxt_dram->umac_img[i] =
691 			htole64(dram->fw[fw_cnt].paddr);
692 		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
693 			(unsigned long long)dram->fw[fw_cnt].paddr,
694 			(unsigned long long)dram->fw[fw_cnt].size));
695 		fw_cnt++;
696 	}
697 
698 	/*
699 	 * Initialize paging.
700 	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
701 	 * stored separately.
702 	 * This is since the timing of its release is different -
703 	 * while fw memory can be released on alive, the paging memory can be
704 	 * freed only when the device goes down.
705 	 * Given that, the logic here in accessing the fw image is a bit
706 	 * different - fw_cnt isn't changing so loop counter is added to it.
707 	 */
708 	for (i = 0; i < dram->paging_cnt; i++) {
709 		/* access FW with +2 to make up for lmac & umac separators */
710 		int fw_idx = fw_cnt + i + 2;
711 
712 		ret = iwx_ctxt_info_alloc_dma(sc,
713 		    &fws->fw_sect[fw_idx], &dram->paging[i]);
714 		if (ret)
715 			return ret;
716 
717 		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
718 		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
719 		    (unsigned long long)dram->paging[i].paddr,
720 		    (unsigned long long)dram->paging[i].size));
721 	}
722 
723 	return 0;
724 }
725 
726 void
iwx_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)727 iwx_fw_version_str(char *buf, size_t bufsize,
728     uint32_t major, uint32_t minor, uint32_t api)
729 {
730 	/*
731 	 * Starting with major version 35 the Linux driver prints the minor
732 	 * version in hexadecimal.
733 	 */
734 	if (major >= 35)
735 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
736 	else
737 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
738 }
739 
740 int
iwx_alloc_fw_monitor_block(struct iwx_softc * sc,uint8_t max_power,uint8_t min_power)741 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
742     uint8_t min_power)
743 {
744 	struct iwx_dma_info *fw_mon = &sc->fw_mon;
745 	uint32_t size = 0;
746 	uint8_t power;
747 	int err;
748 
749 	if (fw_mon->size)
750 		return 0;
751 
752 	for (power = max_power; power >= min_power; power--) {
753 		size = (1 << power);
754 
755 		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
756 		if (err)
757 			continue;
758 
759 		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
760 			 DEVNAME(sc), size));
761 		break;
762 	}
763 
764 	if (err) {
765 		fw_mon->size = 0;
766 		return err;
767 	}
768 
769 	if (power != max_power)
770 		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
771 			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
772 			(unsigned long)(1 << (max_power - 10))));
773 
774 	return 0;
775 }
776 
777 int
iwx_alloc_fw_monitor(struct iwx_softc * sc,uint8_t max_power)778 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
779 {
780 	if (!max_power) {
781 		/* default max_power is maximum */
782 		max_power = 26;
783 	} else {
784 		max_power += 11;
785 	}
786 
787 	if (max_power > 26) {
788 		DPRINTF(("%s: External buffer size for monitor is too big %d, "
789 		     "check the FW TLV\n", DEVNAME(sc), max_power));
790 		return 0;
791 	}
792 
793 	if (sc->fw_mon.size)
794 		return 0;
795 
796 	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
797 }
798 
799 int
iwx_apply_debug_destination(struct iwx_softc * sc)800 iwx_apply_debug_destination(struct iwx_softc *sc)
801 {
802 	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
803 	int i, err;
804 	uint8_t mon_mode, size_power, base_shift, end_shift;
805 	uint32_t base_reg, end_reg;
806 
807 	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
808 	mon_mode = dest_v1->monitor_mode;
809 	size_power = dest_v1->size_power;
810 	base_reg = le32toh(dest_v1->base_reg);
811 	end_reg = le32toh(dest_v1->end_reg);
812 	base_shift = dest_v1->base_shift;
813 	end_shift = dest_v1->end_shift;
814 
815 	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
816 
817 	if (mon_mode == EXTERNAL_MODE) {
818 		err = iwx_alloc_fw_monitor(sc, size_power);
819 		if (err)
820 			return err;
821 	}
822 
823 	if (!iwx_nic_lock(sc))
824 		return EBUSY;
825 
826 	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
827 		uint32_t addr, val;
828 		uint8_t op;
829 
830 		addr = le32toh(dest_v1->reg_ops[i].addr);
831 		val = le32toh(dest_v1->reg_ops[i].val);
832 		op = dest_v1->reg_ops[i].op;
833 
834 		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
835 		switch (op) {
836 		case CSR_ASSIGN:
837 			IWX_WRITE(sc, addr, val);
838 			break;
839 		case CSR_SETBIT:
840 			IWX_SETBITS(sc, addr, (1 << val));
841 			break;
842 		case CSR_CLEARBIT:
843 			IWX_CLRBITS(sc, addr, (1 << val));
844 			break;
845 		case PRPH_ASSIGN:
846 			iwx_write_prph(sc, addr, val);
847 			break;
848 		case PRPH_SETBIT:
849 			err = iwx_set_bits_prph(sc, addr, (1 << val));
850 			if (err)
851 				return err;
852 			break;
853 		case PRPH_CLEARBIT:
854 			err = iwx_clear_bits_prph(sc, addr, (1 << val));
855 			if (err)
856 				return err;
857 			break;
858 		case PRPH_BLOCKBIT:
859 			if (iwx_read_prph(sc, addr) & (1 << val))
860 				goto monitor;
861 			break;
862 		default:
863 			DPRINTF(("%s: FW debug - unknown OP %d\n",
864 			    DEVNAME(sc), op));
865 			break;
866 		}
867 	}
868 
869 monitor:
870 	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
871 		iwx_write_prph(sc, le32toh(base_reg),
872 		    sc->fw_mon.paddr >> base_shift);
873 		iwx_write_prph(sc, end_reg,
874 		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
875 		    >> end_shift);
876 	}
877 
878 	iwx_nic_unlock(sc);
879 	return 0;
880 }
881 
882 void
iwx_set_ltr(struct iwx_softc * sc)883 iwx_set_ltr(struct iwx_softc *sc)
884 {
885 	uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
886 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
887 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
888 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
889 	    ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
890 	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
891 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
892 	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
893 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
894 	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
895 	    (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
896 
897 	/*
898 	 * To workaround hardware latency issues during the boot process,
899 	 * initialize the LTR to ~250 usec (see ltr_val above).
900 	 * The firmware initializes this again later (to a smaller value).
901 	 */
902 	if (!sc->sc_integrated) {
903 		IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
904 	} else if (sc->sc_integrated &&
905 		   sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
906 		iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
907 		    IWX_HPM_MAC_LRT_ENABLE_ALL);
908 		iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
909 	}
910 }
911 
912 int
iwx_ctxt_info_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)913 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
914 {
915 	struct iwx_context_info *ctxt_info;
916 	struct iwx_context_info_rbd_cfg *rx_cfg;
917 	uint32_t control_flags = 0;
918 	uint64_t paddr;
919 	int err;
920 
921 	ctxt_info = sc->ctxt_info_dma.vaddr;
922 	memset(ctxt_info, 0, sizeof(*ctxt_info));
923 
924 	ctxt_info->version.version = 0;
925 	ctxt_info->version.mac_id =
926 		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
927 	/* size is in DWs */
928 	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
929 
930 	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
931 	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
932 			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
933 			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
934 			(IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
935 	ctxt_info->control.control_flags = htole32(control_flags);
936 
937 	/* initialize RX default queue */
938 	rx_cfg = &ctxt_info->rbd_cfg;
939 	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
940 	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
941 	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
942 
943 	/* initialize TX command queue */
944 	ctxt_info->hcmd_cfg.cmd_queue_addr =
945 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
946 	ctxt_info->hcmd_cfg.cmd_queue_size =
947 		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
948 
949 	/* allocate ucode sections in dram and set addresses */
950 	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
951 	if (err) {
952 		iwx_ctxt_info_free_fw_img(sc);
953 		return err;
954 	}
955 
956 	/* Configure debug, if exists */
957 	if (sc->sc_fw.dbg_dest_tlv_v1) {
958 		err = iwx_apply_debug_destination(sc);
959 		if (err) {
960 			iwx_ctxt_info_free_fw_img(sc);
961 			return err;
962 		}
963 	}
964 
965 	/*
966 	 * Write the context info DMA base address. The device expects a
967 	 * 64-bit address but a simple bus_space_write_8 to this register
968 	 * won't work on some devices, such as the AX201.
969 	 */
970 	paddr = sc->ctxt_info_dma.paddr;
971 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
972 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
973 
974 	/* kick FW self load */
975 	if (!iwx_nic_lock(sc)) {
976 		iwx_ctxt_info_free_fw_img(sc);
977 		return EBUSY;
978 	}
979 
980 	iwx_set_ltr(sc);
981 	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
982 	iwx_nic_unlock(sc);
983 
984 	/* Context info will be released upon alive or failure to get one */
985 
986 	return 0;
987 }
988 
989 int
iwx_ctxt_info_gen3_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)990 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
991 {
992 	struct iwx_context_info_gen3 *ctxt_info_gen3;
993 	struct iwx_prph_scratch *prph_scratch;
994 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
995 	uint16_t cb_size;
996 	uint32_t control_flags, scratch_size;
997 	uint64_t paddr;
998 	int err;
999 
1000 	if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1001 		printf("%s: no image loader found in firmware file\n",
1002 		    DEVNAME(sc));
1003 		iwx_ctxt_info_free_fw_img(sc);
1004 		return EINVAL;
1005 	}
1006 
1007 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1008 	    sc->sc_fw.iml_len, 0);
1009 	if (err) {
1010 		printf("%s: could not allocate DMA memory for "
1011 		    "firmware image loader\n", DEVNAME(sc));
1012 		iwx_ctxt_info_free_fw_img(sc);
1013 		return ENOMEM;
1014 	}
1015 
1016 	prph_scratch = sc->prph_scratch_dma.vaddr;
1017 	memset(prph_scratch, 0, sizeof(*prph_scratch));
1018 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1019 	prph_sc_ctrl->version.version = 0;
1020 	prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1021 	prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1022 
1023 	control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1024 	    IWX_PRPH_SCRATCH_MTR_MODE |
1025 	    (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1026 	if (sc->sc_imr_enabled)
1027 		control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1028 	prph_sc_ctrl->control.control_flags = htole32(control_flags);
1029 
1030 	/* initialize RX default queue */
1031 	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1032 	    htole64(sc->rxq.free_desc_dma.paddr);
1033 
1034 	/* allocate ucode sections in dram and set addresses */
1035 	err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1036 	if (err) {
1037 		iwx_dma_contig_free(&sc->iml_dma);
1038 		iwx_ctxt_info_free_fw_img(sc);
1039 		return err;
1040 	}
1041 
1042 	ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1043 	memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1044 	ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1045 	ctxt_info_gen3->prph_scratch_base_addr =
1046 	    htole64(sc->prph_scratch_dma.paddr);
1047 	scratch_size = sizeof(*prph_scratch);
1048 	ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1049 	ctxt_info_gen3->cr_head_idx_arr_base_addr =
1050 	    htole64(sc->rxq.stat_dma.paddr);
1051 	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1052 	    htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1053 	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1054 	    htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1055 	ctxt_info_gen3->mtr_base_addr =
1056 	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1057 	ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1058 	cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1059 	ctxt_info_gen3->mtr_size = htole16(cb_size);
1060 	cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1061 	ctxt_info_gen3->mcr_size = htole16(cb_size);
1062 
1063 	memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1064 
1065 	paddr = sc->ctxt_info_dma.paddr;
1066 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1067 	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1068 
1069 	paddr = sc->iml_dma.paddr;
1070 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1071 	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1072 	IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1073 
1074 	IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1075 		    IWX_CSR_AUTO_FUNC_BOOT_ENA);
1076 
1077 	/* kick FW self load */
1078 	if (!iwx_nic_lock(sc)) {
1079 		iwx_dma_contig_free(&sc->iml_dma);
1080 		iwx_ctxt_info_free_fw_img(sc);
1081 		return EBUSY;
1082 	}
1083 	iwx_set_ltr(sc);
1084 	iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1085 	iwx_nic_unlock(sc);
1086 
1087 	/* Context info will be released upon alive or failure to get one */
1088 	return 0;
1089 }
1090 
1091 void
iwx_ctxt_info_free_fw_img(struct iwx_softc * sc)1092 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1093 {
1094 	struct iwx_self_init_dram *dram = &sc->init_dram;
1095 	int i;
1096 
1097 	if (!dram->fw)
1098 		return;
1099 
1100 	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1101 		iwx_dma_contig_free(&dram->fw[i]);
1102 
1103 	free(dram->fw, M_DEVBUF,
1104 	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
1105 	dram->lmac_cnt = 0;
1106 	dram->umac_cnt = 0;
1107 	dram->fw = NULL;
1108 }
1109 
1110 int
iwx_firmware_store_section(struct iwx_softc * sc,enum iwx_ucode_type type,uint8_t * data,size_t dlen)1111 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1112     uint8_t *data, size_t dlen)
1113 {
1114 	struct iwx_fw_sects *fws;
1115 	struct iwx_fw_onesect *fwone;
1116 
1117 	if (type >= IWX_UCODE_TYPE_MAX)
1118 		return EINVAL;
1119 	if (dlen < sizeof(uint32_t))
1120 		return EINVAL;
1121 
1122 	fws = &sc->sc_fw.fw_sects[type];
1123 	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
1124 	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1125 		return EINVAL;
1126 
1127 	fwone = &fws->fw_sect[fws->fw_count];
1128 
1129 	/* first 32bit are device load offset */
1130 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1131 
1132 	/* rest is data */
1133 	fwone->fws_data = data + sizeof(uint32_t);
1134 	fwone->fws_len = dlen - sizeof(uint32_t);
1135 
1136 	fws->fw_count++;
1137 	fws->fw_totlen += fwone->fws_len;
1138 
1139 	return 0;
1140 }
1141 
1142 #define IWX_DEFAULT_SCAN_CHANNELS	40
1143 /* Newer firmware might support more channels. Raise this value if needed. */
1144 #define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
1145 
1146 struct iwx_tlv_calib_data {
1147 	uint32_t ucode_type;
1148 	struct iwx_tlv_calib_ctrl calib;
1149 } __packed;
1150 
1151 int
iwx_set_default_calib(struct iwx_softc * sc,const void * data)1152 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1153 {
1154 	const struct iwx_tlv_calib_data *def_calib = data;
1155 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
1156 
1157 	if (ucode_type >= IWX_UCODE_TYPE_MAX)
1158 		return EINVAL;
1159 
1160 	sc->sc_default_calib[ucode_type].flow_trigger =
1161 	    def_calib->calib.flow_trigger;
1162 	sc->sc_default_calib[ucode_type].event_trigger =
1163 	    def_calib->calib.event_trigger;
1164 
1165 	return 0;
1166 }
1167 
1168 void
iwx_fw_info_free(struct iwx_fw_info * fw)1169 iwx_fw_info_free(struct iwx_fw_info *fw)
1170 {
1171 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
1172 	fw->fw_rawdata = NULL;
1173 	fw->fw_rawsize = 0;
1174 	/* don't touch fw->fw_status */
1175 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1176 	free(fw->iml, M_DEVBUF, fw->iml_len);
1177 	fw->iml = NULL;
1178 	fw->iml_len = 0;
1179 }
1180 
1181 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1182 
1183 int
iwx_read_firmware(struct iwx_softc * sc)1184 iwx_read_firmware(struct iwx_softc *sc)
1185 {
1186 	struct ieee80211com *ic = &sc->sc_ic;
1187 	struct iwx_fw_info *fw = &sc->sc_fw;
1188 	struct iwx_tlv_ucode_header *uhdr;
1189 	struct iwx_ucode_tlv tlv;
1190 	uint32_t tlv_type;
1191 	uint8_t *data;
1192 	int err;
1193 	size_t len;
1194 
1195 	if (fw->fw_status == IWX_FW_STATUS_DONE)
1196 		return 0;
1197 
1198 	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
1199 		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
1200 	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1201 
1202 	if (fw->fw_rawdata != NULL)
1203 		iwx_fw_info_free(fw);
1204 
1205 	err = loadfirmware(sc->sc_fwname,
1206 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1207 	if (err) {
1208 		printf("%s: could not read firmware %s (error %d)\n",
1209 		    DEVNAME(sc), sc->sc_fwname, err);
1210 		goto out;
1211 	}
1212 
1213 	if (ic->ic_if.if_flags & IFF_DEBUG)
1214 		printf("%s: using firmware %s\n", DEVNAME(sc), sc->sc_fwname);
1215 
1216 	sc->sc_capaflags = 0;
1217 	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1218 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1219 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1220 	sc->n_cmd_versions = 0;
1221 
1222 	uhdr = (void *)fw->fw_rawdata;
1223 	if (*(uint32_t *)fw->fw_rawdata != 0
1224 	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1225 		printf("%s: invalid firmware %s\n",
1226 		    DEVNAME(sc), sc->sc_fwname);
1227 		err = EINVAL;
1228 		goto out;
1229 	}
1230 
1231 	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1232 	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1233 	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1234 	    IWX_UCODE_API(le32toh(uhdr->ver)));
1235 
1236 	data = uhdr->data;
1237 	len = fw->fw_rawsize - sizeof(*uhdr);
1238 
1239 	while (len >= sizeof(tlv)) {
1240 		size_t tlv_len;
1241 		void *tlv_data;
1242 
1243 		memcpy(&tlv, data, sizeof(tlv));
1244 		tlv_len = le32toh(tlv.length);
1245 		tlv_type = le32toh(tlv.type);
1246 
1247 		len -= sizeof(tlv);
1248 		data += sizeof(tlv);
1249 		tlv_data = data;
1250 
1251 		if (len < tlv_len) {
1252 			printf("%s: firmware too short: %zu bytes\n",
1253 			    DEVNAME(sc), len);
1254 			err = EINVAL;
1255 			goto parse_out;
1256 		}
1257 
1258 		switch (tlv_type) {
1259 		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1260 			if (tlv_len < sizeof(uint32_t)) {
1261 				err = EINVAL;
1262 				goto parse_out;
1263 			}
1264 			sc->sc_capa_max_probe_len
1265 			    = le32toh(*(uint32_t *)tlv_data);
1266 			if (sc->sc_capa_max_probe_len >
1267 			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1268 				err = EINVAL;
1269 				goto parse_out;
1270 			}
1271 			break;
1272 		case IWX_UCODE_TLV_PAN:
1273 			if (tlv_len) {
1274 				err = EINVAL;
1275 				goto parse_out;
1276 			}
1277 			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1278 			break;
1279 		case IWX_UCODE_TLV_FLAGS:
1280 			if (tlv_len < sizeof(uint32_t)) {
1281 				err = EINVAL;
1282 				goto parse_out;
1283 			}
1284 			/*
1285 			 * Apparently there can be many flags, but Linux driver
1286 			 * parses only the first one, and so do we.
1287 			 *
1288 			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1289 			 * Intentional or a bug?  Observations from
1290 			 * current firmware file:
1291 			 *  1) TLV_PAN is parsed first
1292 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1293 			 * ==> this resets TLV_PAN to itself... hnnnk
1294 			 */
1295 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1296 			break;
1297 		case IWX_UCODE_TLV_CSCHEME:
1298 			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1299 			if (err)
1300 				goto parse_out;
1301 			break;
1302 		case IWX_UCODE_TLV_NUM_OF_CPU: {
1303 			uint32_t num_cpu;
1304 			if (tlv_len != sizeof(uint32_t)) {
1305 				err = EINVAL;
1306 				goto parse_out;
1307 			}
1308 			num_cpu = le32toh(*(uint32_t *)tlv_data);
1309 			if (num_cpu < 1 || num_cpu > 2) {
1310 				err = EINVAL;
1311 				goto parse_out;
1312 			}
1313 			break;
1314 		}
1315 		case IWX_UCODE_TLV_SEC_RT:
1316 			err = iwx_firmware_store_section(sc,
1317 			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1318 			if (err)
1319 				goto parse_out;
1320 			break;
1321 		case IWX_UCODE_TLV_SEC_INIT:
1322 			err = iwx_firmware_store_section(sc,
1323 			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1324 			if (err)
1325 				goto parse_out;
1326 			break;
1327 		case IWX_UCODE_TLV_SEC_WOWLAN:
1328 			err = iwx_firmware_store_section(sc,
1329 			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1330 			if (err)
1331 				goto parse_out;
1332 			break;
1333 		case IWX_UCODE_TLV_DEF_CALIB:
1334 			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1335 				err = EINVAL;
1336 				goto parse_out;
1337 			}
1338 			err = iwx_set_default_calib(sc, tlv_data);
1339 			if (err)
1340 				goto parse_out;
1341 			break;
1342 		case IWX_UCODE_TLV_PHY_SKU:
1343 			if (tlv_len != sizeof(uint32_t)) {
1344 				err = EINVAL;
1345 				goto parse_out;
1346 			}
1347 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1348 			break;
1349 
1350 		case IWX_UCODE_TLV_API_CHANGES_SET: {
1351 			struct iwx_ucode_api *api;
1352 			int idx, i;
1353 			if (tlv_len != sizeof(*api)) {
1354 				err = EINVAL;
1355 				goto parse_out;
1356 			}
1357 			api = (struct iwx_ucode_api *)tlv_data;
1358 			idx = le32toh(api->api_index);
1359 			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1360 				err = EINVAL;
1361 				goto parse_out;
1362 			}
1363 			for (i = 0; i < 32; i++) {
1364 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1365 					continue;
1366 				setbit(sc->sc_ucode_api, i + (32 * idx));
1367 			}
1368 			break;
1369 		}
1370 
1371 		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1372 			struct iwx_ucode_capa *capa;
1373 			int idx, i;
1374 			if (tlv_len != sizeof(*capa)) {
1375 				err = EINVAL;
1376 				goto parse_out;
1377 			}
1378 			capa = (struct iwx_ucode_capa *)tlv_data;
1379 			idx = le32toh(capa->api_index);
1380 			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1381 				goto parse_out;
1382 			}
1383 			for (i = 0; i < 32; i++) {
1384 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1385 					continue;
1386 				setbit(sc->sc_enabled_capa, i + (32 * idx));
1387 			}
1388 			break;
1389 		}
1390 
1391 		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1392 		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1393 			/* ignore, not used by current driver */
1394 			break;
1395 
1396 		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1397 			err = iwx_firmware_store_section(sc,
1398 			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1399 			    tlv_len);
1400 			if (err)
1401 				goto parse_out;
1402 			break;
1403 
1404 		case IWX_UCODE_TLV_PAGING:
1405 			if (tlv_len != sizeof(uint32_t)) {
1406 				err = EINVAL;
1407 				goto parse_out;
1408 			}
1409 			break;
1410 
1411 		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1412 			if (tlv_len != sizeof(uint32_t)) {
1413 				err = EINVAL;
1414 				goto parse_out;
1415 			}
1416 			sc->sc_capa_n_scan_channels =
1417 			  le32toh(*(uint32_t *)tlv_data);
1418 			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1419 				err = ERANGE;
1420 				goto parse_out;
1421 			}
1422 			break;
1423 
1424 		case IWX_UCODE_TLV_FW_VERSION:
1425 			if (tlv_len != sizeof(uint32_t) * 3) {
1426 				err = EINVAL;
1427 				goto parse_out;
1428 			}
1429 
1430 			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1431 			    le32toh(((uint32_t *)tlv_data)[0]),
1432 			    le32toh(((uint32_t *)tlv_data)[1]),
1433 			    le32toh(((uint32_t *)tlv_data)[2]));
1434 			break;
1435 
1436 		case IWX_UCODE_TLV_FW_DBG_DEST: {
1437 			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1438 
1439 			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1440 			if (*fw->dbg_dest_ver != 0) {
1441 				err = EINVAL;
1442 				goto parse_out;
1443 			}
1444 
1445 			if (fw->dbg_dest_tlv_init)
1446 				break;
1447 			fw->dbg_dest_tlv_init = true;
1448 
1449 			dest_v1 = (void *)tlv_data;
1450 			fw->dbg_dest_tlv_v1 = dest_v1;
1451 			fw->n_dest_reg = tlv_len -
1452 			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1453 			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1454 			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1455 			break;
1456 		}
1457 
1458 		case IWX_UCODE_TLV_FW_DBG_CONF: {
1459 			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1460 
1461 			if (!fw->dbg_dest_tlv_init ||
1462 			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1463 			    fw->dbg_conf_tlv[conf->id] != NULL)
1464 				break;
1465 
1466 			DPRINTF(("Found debug configuration: %d\n", conf->id));
1467 			fw->dbg_conf_tlv[conf->id] = conf;
1468 			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1469 			break;
1470 		}
1471 
1472 		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1473 			struct iwx_umac_debug_addrs *dbg_ptrs =
1474 				(void *)tlv_data;
1475 
1476 			if (tlv_len != sizeof(*dbg_ptrs)) {
1477 				err = EINVAL;
1478 				goto parse_out;
1479 			}
1480 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1481 				break;
1482 			sc->sc_uc.uc_umac_error_event_table =
1483 				le32toh(dbg_ptrs->error_info_addr) &
1484 				~IWX_FW_ADDR_CACHE_CONTROL;
1485 			sc->sc_uc.error_event_table_tlv_status |=
1486 				IWX_ERROR_EVENT_TABLE_UMAC;
1487 			break;
1488 		}
1489 
1490 		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1491 			struct iwx_lmac_debug_addrs *dbg_ptrs =
1492 				(void *)tlv_data;
1493 
1494 			if (tlv_len != sizeof(*dbg_ptrs)) {
1495 				err = EINVAL;
1496 				goto parse_out;
1497 			}
1498 			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1499 				break;
1500 			sc->sc_uc.uc_lmac_error_event_table[0] =
1501 				le32toh(dbg_ptrs->error_event_table_ptr) &
1502 				~IWX_FW_ADDR_CACHE_CONTROL;
1503 			sc->sc_uc.error_event_table_tlv_status |=
1504 				IWX_ERROR_EVENT_TABLE_LMAC1;
1505 			break;
1506 		}
1507 
1508 		case IWX_UCODE_TLV_FW_MEM_SEG:
1509 			break;
1510 
1511 		case IWX_UCODE_TLV_IML:
1512 			if (sc->sc_fw.iml != NULL) {
1513 				free(fw->iml, M_DEVBUF, fw->iml_len);
1514 				fw->iml_len = 0;
1515 			}
1516 			sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1517 			    M_WAIT | M_CANFAIL | M_ZERO);
1518 			if (sc->sc_fw.iml == NULL) {
1519 				err = ENOMEM;
1520 				goto parse_out;
1521 			}
1522 			memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1523 			sc->sc_fw.iml_len = tlv_len;
1524 			break;
1525 
1526 		case IWX_UCODE_TLV_CMD_VERSIONS:
1527 			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1528 				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1529 				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1530 			}
1531 			if (sc->n_cmd_versions != 0) {
1532 				err = EINVAL;
1533 				goto parse_out;
1534 			}
1535 			if (tlv_len > sizeof(sc->cmd_versions)) {
1536 				err = EINVAL;
1537 				goto parse_out;
1538 			}
1539 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1540 			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1541 			break;
1542 
1543 		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1544 			break;
1545 
1546 		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1547 		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1548 		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1549 		case IWX_UCODE_TLV_FW_NUM_BEACONS:
1550 			break;
1551 
1552 		/* undocumented TLVs found in iwx-cc-a0-46 image */
1553 		case 58:
1554 		case 0x1000003:
1555 		case 0x1000004:
1556 			break;
1557 
1558 		/* undocumented TLVs found in iwx-cc-a0-48 image */
1559 		case 0x1000000:
1560 		case 0x1000002:
1561 			break;
1562 
1563 		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1564 		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1565 		case IWX_UCODE_TLV_TYPE_HCMD:
1566 		case IWX_UCODE_TLV_TYPE_REGIONS:
1567 		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1568 		case IWX_UCODE_TLV_TYPE_CONF_SET:
1569 		case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1570 		case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1571 		case IWX_UCODE_TLV_CURRENT_PC:
1572 			break;
1573 
1574 		/* undocumented TLV found in iwx-cc-a0-67 image */
1575 		case 0x100000b:
1576 			break;
1577 
1578 		/* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1579 		case 0x101:
1580 			break;
1581 
1582 		/* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1583 		case 0x100000c:
1584 			break;
1585 
1586 		default:
1587 			err = EINVAL;
1588 			goto parse_out;
1589 		}
1590 
1591 		/*
1592 		 * Check for size_t overflow and ignore missing padding at
1593 		 * end of firmware file.
1594 		 */
1595 		if (roundup(tlv_len, 4) > len)
1596 			break;
1597 
1598 		len -= roundup(tlv_len, 4);
1599 		data += roundup(tlv_len, 4);
1600 	}
1601 
1602 	KASSERT(err == 0);
1603 
1604  parse_out:
1605 	if (err) {
1606 		printf("%s: firmware parse error %d, "
1607 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1608 	}
1609 
1610  out:
1611 	if (err) {
1612 		fw->fw_status = IWX_FW_STATUS_NONE;
1613 		if (fw->fw_rawdata != NULL)
1614 			iwx_fw_info_free(fw);
1615 	} else
1616 		fw->fw_status = IWX_FW_STATUS_DONE;
1617 	wakeup(&sc->sc_fw);
1618 
1619 	return err;
1620 }
1621 
1622 uint32_t
iwx_prph_addr_mask(struct iwx_softc * sc)1623 iwx_prph_addr_mask(struct iwx_softc *sc)
1624 {
1625 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1626 		return 0x00ffffff;
1627 	else
1628 		return 0x000fffff;
1629 }
1630 
1631 uint32_t
iwx_read_prph_unlocked(struct iwx_softc * sc,uint32_t addr)1632 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1633 {
1634 	uint32_t mask = iwx_prph_addr_mask(sc);
1635 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1636 	IWX_BARRIER_READ_WRITE(sc);
1637 	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1638 }
1639 
1640 uint32_t
iwx_read_prph(struct iwx_softc * sc,uint32_t addr)1641 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1642 {
1643 	iwx_nic_assert_locked(sc);
1644 	return iwx_read_prph_unlocked(sc, addr);
1645 }
1646 
1647 void
iwx_write_prph_unlocked(struct iwx_softc * sc,uint32_t addr,uint32_t val)1648 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1649 {
1650 	uint32_t mask = iwx_prph_addr_mask(sc);
1651 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1652 	IWX_BARRIER_WRITE(sc);
1653 	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1654 }
1655 
1656 void
iwx_write_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1657 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1658 {
1659 	iwx_nic_assert_locked(sc);
1660 	iwx_write_prph_unlocked(sc, addr, val);
1661 }
1662 
1663 void
iwx_write_prph64(struct iwx_softc * sc,uint64_t addr,uint64_t val)1664 iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1665 {
1666 	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1667 	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1668 }
1669 
1670 uint32_t
iwx_read_umac_prph_unlocked(struct iwx_softc * sc,uint32_t addr)1671 iwx_read_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1672 {
1673 	return iwx_read_prph_unlocked(sc, addr + sc->sc_umac_prph_offset);
1674 }
1675 
1676 uint32_t
iwx_read_umac_prph(struct iwx_softc * sc,uint32_t addr)1677 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1678 {
1679 	return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1680 }
1681 
1682 void
iwx_write_umac_prph_unlocked(struct iwx_softc * sc,uint32_t addr,uint32_t val)1683 iwx_write_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1684 {
1685 	iwx_write_prph_unlocked(sc, addr + sc->sc_umac_prph_offset, val);
1686 }
1687 
1688 void
iwx_write_umac_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1689 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1690 {
1691 	iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1692 }
1693 
1694 int
iwx_read_mem(struct iwx_softc * sc,uint32_t addr,void * buf,int dwords)1695 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1696 {
1697 	int offs, err = 0;
1698 	uint32_t *vals = buf;
1699 
1700 	if (iwx_nic_lock(sc)) {
1701 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1702 		for (offs = 0; offs < dwords; offs++)
1703 			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1704 		iwx_nic_unlock(sc);
1705 	} else {
1706 		err = EBUSY;
1707 	}
1708 	return err;
1709 }
1710 
1711 int
iwx_write_mem(struct iwx_softc * sc,uint32_t addr,const void * buf,int dwords)1712 iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1713 {
1714 	int offs;
1715 	const uint32_t *vals = buf;
1716 
1717 	if (iwx_nic_lock(sc)) {
1718 		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1719 		/* WADDR auto-increments */
1720 		for (offs = 0; offs < dwords; offs++) {
1721 			uint32_t val = vals ? vals[offs] : 0;
1722 			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1723 		}
1724 		iwx_nic_unlock(sc);
1725 	} else {
1726 		return EBUSY;
1727 	}
1728 	return 0;
1729 }
1730 
1731 int
iwx_write_mem32(struct iwx_softc * sc,uint32_t addr,uint32_t val)1732 iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1733 {
1734 	return iwx_write_mem(sc, addr, &val, 1);
1735 }
1736 
1737 int
iwx_poll_bit(struct iwx_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1738 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1739     int timo)
1740 {
1741 	for (;;) {
1742 		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1743 			return 1;
1744 		}
1745 		if (timo < 10) {
1746 			return 0;
1747 		}
1748 		timo -= 10;
1749 		DELAY(10);
1750 	}
1751 }
1752 
1753 int
iwx_nic_lock(struct iwx_softc * sc)1754 iwx_nic_lock(struct iwx_softc *sc)
1755 {
1756 	if (sc->sc_nic_locks > 0) {
1757 		iwx_nic_assert_locked(sc);
1758 		sc->sc_nic_locks++;
1759 		return 1; /* already locked */
1760 	}
1761 
1762 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1763 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1764 
1765 	DELAY(2);
1766 
1767 	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1768 	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1769 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1770 	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1771 		sc->sc_nic_locks++;
1772 		return 1;
1773 	}
1774 
1775 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1776 	return 0;
1777 }
1778 
1779 void
iwx_nic_assert_locked(struct iwx_softc * sc)1780 iwx_nic_assert_locked(struct iwx_softc *sc)
1781 {
1782 	if (sc->sc_nic_locks <= 0)
1783 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1784 }
1785 
1786 void
iwx_nic_unlock(struct iwx_softc * sc)1787 iwx_nic_unlock(struct iwx_softc *sc)
1788 {
1789 	if (sc->sc_nic_locks > 0) {
1790 		if (--sc->sc_nic_locks == 0)
1791 			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1792 			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1793 	} else
1794 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1795 }
1796 
1797 int
iwx_set_bits_mask_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1798 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1799     uint32_t mask)
1800 {
1801 	uint32_t val;
1802 
1803 	if (iwx_nic_lock(sc)) {
1804 		val = iwx_read_prph(sc, reg) & mask;
1805 		val |= bits;
1806 		iwx_write_prph(sc, reg, val);
1807 		iwx_nic_unlock(sc);
1808 		return 0;
1809 	}
1810 	return EBUSY;
1811 }
1812 
1813 int
iwx_set_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1814 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1815 {
1816 	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1817 }
1818 
1819 int
iwx_clear_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1820 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1821 {
1822 	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1823 }
1824 
1825 int
iwx_dma_contig_alloc(bus_dma_tag_t tag,struct iwx_dma_info * dma,bus_size_t size,bus_size_t alignment)1826 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1827     bus_size_t size, bus_size_t alignment)
1828 {
1829 	int nsegs, err;
1830 	caddr_t va;
1831 
1832 	dma->tag = tag;
1833 	dma->size = size;
1834 
1835 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1836 	    &dma->map);
1837 	if (err)
1838 		goto fail;
1839 
1840 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1841 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1842 	if (err)
1843 		goto fail;
1844 
1845 	if (nsegs > 1) {
1846 		err = ENOMEM;
1847 		goto fail;
1848 	}
1849 
1850 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1851 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1852 	if (err)
1853 		goto fail;
1854 	dma->vaddr = va;
1855 
1856 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1857 	    BUS_DMA_NOWAIT);
1858 	if (err)
1859 		goto fail;
1860 
1861 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1862 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1863 
1864 	return 0;
1865 
1866 fail:	iwx_dma_contig_free(dma);
1867 	return err;
1868 }
1869 
1870 void
iwx_dma_contig_free(struct iwx_dma_info * dma)1871 iwx_dma_contig_free(struct iwx_dma_info *dma)
1872 {
1873 	if (dma->map != NULL) {
1874 		if (dma->vaddr != NULL) {
1875 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1876 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1877 			bus_dmamap_unload(dma->tag, dma->map);
1878 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1879 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1880 			dma->vaddr = NULL;
1881 		}
1882 		bus_dmamap_destroy(dma->tag, dma->map);
1883 		dma->map = NULL;
1884 	}
1885 }
1886 
1887 int
iwx_alloc_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)1888 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1889 {
1890 	bus_size_t size;
1891 	int i, err;
1892 
1893 	ring->cur = 0;
1894 
1895 	/* Allocate RX descriptors (256-byte aligned). */
1896 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1897 		size = sizeof(struct iwx_rx_transfer_desc);
1898 	else
1899 		size = sizeof(uint64_t);
1900 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1901 	    size * IWX_RX_MQ_RING_COUNT, 256);
1902 	if (err) {
1903 		printf("%s: could not allocate RX ring DMA memory\n",
1904 		    DEVNAME(sc));
1905 		goto fail;
1906 	}
1907 	ring->desc = ring->free_desc_dma.vaddr;
1908 
1909 	/* Allocate RX status area (16-byte aligned). */
1910 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1911 		size = sizeof(uint16_t);
1912 	else
1913 		size = sizeof(*ring->stat);
1914 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
1915 	if (err) {
1916 		printf("%s: could not allocate RX status DMA memory\n",
1917 		    DEVNAME(sc));
1918 		goto fail;
1919 	}
1920 	ring->stat = ring->stat_dma.vaddr;
1921 
1922 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1923 		size = sizeof(struct iwx_rx_completion_desc);
1924 	else
1925 		size = sizeof(uint32_t);
1926 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1927 	    size * IWX_RX_MQ_RING_COUNT, 256);
1928 	if (err) {
1929 		printf("%s: could not allocate RX ring DMA memory\n",
1930 		    DEVNAME(sc));
1931 		goto fail;
1932 	}
1933 
1934 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1935 		struct iwx_rx_data *data = &ring->data[i];
1936 
1937 		memset(data, 0, sizeof(*data));
1938 		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1939 		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1940 		    &data->map);
1941 		if (err) {
1942 			printf("%s: could not create RX buf DMA map\n",
1943 			    DEVNAME(sc));
1944 			goto fail;
1945 		}
1946 
1947 		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1948 		if (err)
1949 			goto fail;
1950 	}
1951 	return 0;
1952 
1953 fail:	iwx_free_rx_ring(sc, ring);
1954 	return err;
1955 }
1956 
1957 void
iwx_disable_rx_dma(struct iwx_softc * sc)1958 iwx_disable_rx_dma(struct iwx_softc *sc)
1959 {
1960 	int ntries;
1961 
1962 	if (iwx_nic_lock(sc)) {
1963 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1964 			iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
1965 		else
1966 			iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1967 		for (ntries = 0; ntries < 1000; ntries++) {
1968 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
1969 				if (iwx_read_umac_prph(sc,
1970 				    IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
1971 					break;
1972 			} else {
1973 				if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1974 				    IWX_RXF_DMA_IDLE)
1975 					break;
1976 			}
1977 			DELAY(10);
1978 		}
1979 		iwx_nic_unlock(sc);
1980 	}
1981 }
1982 
1983 void
iwx_reset_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)1984 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1985 {
1986 	ring->cur = 0;
1987 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1988 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1989 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
1990 		uint16_t *status = sc->rxq.stat_dma.vaddr;
1991 		*status = 0;
1992 	} else
1993 		memset(ring->stat, 0, sizeof(*ring->stat));
1994 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1995 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1996 
1997 }
1998 
1999 void
iwx_free_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2000 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2001 {
2002 	int i;
2003 
2004 	iwx_dma_contig_free(&ring->free_desc_dma);
2005 	iwx_dma_contig_free(&ring->stat_dma);
2006 	iwx_dma_contig_free(&ring->used_desc_dma);
2007 
2008 	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2009 		struct iwx_rx_data *data = &ring->data[i];
2010 
2011 		if (data->m != NULL) {
2012 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2013 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2014 			bus_dmamap_unload(sc->sc_dmat, data->map);
2015 			m_freem(data->m);
2016 			data->m = NULL;
2017 		}
2018 		if (data->map != NULL)
2019 			bus_dmamap_destroy(sc->sc_dmat, data->map);
2020 	}
2021 }
2022 
2023 int
iwx_alloc_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring,int qid)2024 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2025 {
2026 	bus_addr_t paddr;
2027 	bus_size_t size;
2028 	int i, err;
2029 	size_t bc_tbl_size;
2030 	bus_size_t bc_align;
2031 
2032 	ring->qid = qid;
2033 	ring->queued = 0;
2034 	ring->cur = 0;
2035 	ring->cur_hw = 0;
2036 	ring->tail = 0;
2037 	ring->tail_hw = 0;
2038 
2039 	/* Allocate TX descriptors (256-byte aligned). */
2040 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2041 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2042 	if (err) {
2043 		printf("%s: could not allocate TX ring DMA memory\n",
2044 		    DEVNAME(sc));
2045 		goto fail;
2046 	}
2047 	ring->desc = ring->desc_dma.vaddr;
2048 
2049 	/*
2050 	 * The hardware supports up to 512 Tx rings which is more
2051 	 * than we currently need.
2052 	 *
2053 	 * In DQA mode we use 1 command queue + 1 default queue for
2054 	 * management, control, and non-QoS data frames.
2055 	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2056 	 *
2057 	 * Tx aggregation requires additional queues, one queue per TID for
2058 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2059 	 * Firmware may assign its own internal IDs for these queues
2060 	 * depending on which TID gets aggregation enabled first.
2061 	 * The driver maintains a table mapping driver-side queue IDs
2062 	 * to firmware-side queue IDs.
2063 	 */
2064 
2065 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2066 		bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2067 		    IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2068 		bc_align = 128;
2069 	} else {
2070 		bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2071 		bc_align = 64;
2072 	}
2073 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2074 	    bc_align);
2075 	if (err) {
2076 		printf("%s: could not allocate byte count table DMA memory\n",
2077 		    DEVNAME(sc));
2078 		goto fail;
2079 	}
2080 
2081 	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2082 	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2083 	    IWX_FIRST_TB_SIZE_ALIGN);
2084 	if (err) {
2085 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
2086 		goto fail;
2087 	}
2088 	ring->cmd = ring->cmd_dma.vaddr;
2089 
2090 	paddr = ring->cmd_dma.paddr;
2091 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2092 		struct iwx_tx_data *data = &ring->data[i];
2093 		size_t mapsize;
2094 
2095 		data->cmd_paddr = paddr;
2096 		paddr += sizeof(struct iwx_device_cmd);
2097 
2098 		/* FW commands may require more mapped space than packets. */
2099 		if (qid == IWX_DQA_CMD_QUEUE)
2100 			mapsize = (sizeof(struct iwx_cmd_header) +
2101 			    IWX_MAX_CMD_PAYLOAD_SIZE);
2102 		else
2103 			mapsize = MCLBYTES;
2104 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
2105 		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
2106 		    &data->map);
2107 		if (err) {
2108 			printf("%s: could not create TX buf DMA map\n",
2109 			    DEVNAME(sc));
2110 			goto fail;
2111 		}
2112 	}
2113 	KASSERT(paddr == ring->cmd_dma.paddr + size);
2114 	return 0;
2115 
2116 fail:	iwx_free_tx_ring(sc, ring);
2117 	return err;
2118 }
2119 
2120 void
iwx_reset_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2121 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2122 {
2123 	int i;
2124 
2125 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2126 		struct iwx_tx_data *data = &ring->data[i];
2127 
2128 		if (data->m != NULL) {
2129 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2130 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2131 			bus_dmamap_unload(sc->sc_dmat, data->map);
2132 			m_freem(data->m);
2133 			data->m = NULL;
2134 		}
2135 	}
2136 
2137 	/* Clear byte count table. */
2138 	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2139 
2140 	/* Clear TX descriptors. */
2141 	memset(ring->desc, 0, ring->desc_dma.size);
2142 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
2143 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
2144 	sc->qfullmsk &= ~(1 << ring->qid);
2145 	sc->qenablemsk &= ~(1 << ring->qid);
2146 	for (i = 0; i < nitems(sc->aggqid); i++) {
2147 		if (sc->aggqid[i] == ring->qid) {
2148 			sc->aggqid[i] = 0;
2149 			break;
2150 		}
2151 	}
2152 	ring->queued = 0;
2153 	ring->cur = 0;
2154 	ring->cur_hw = 0;
2155 	ring->tail = 0;
2156 	ring->tail_hw = 0;
2157 	ring->tid = 0;
2158 }
2159 
2160 void
iwx_free_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2161 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2162 {
2163 	int i;
2164 
2165 	iwx_dma_contig_free(&ring->desc_dma);
2166 	iwx_dma_contig_free(&ring->cmd_dma);
2167 	iwx_dma_contig_free(&ring->bc_tbl);
2168 
2169 	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2170 		struct iwx_tx_data *data = &ring->data[i];
2171 
2172 		if (data->m != NULL) {
2173 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2174 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2175 			bus_dmamap_unload(sc->sc_dmat, data->map);
2176 			m_freem(data->m);
2177 			data->m = NULL;
2178 		}
2179 		if (data->map != NULL)
2180 			bus_dmamap_destroy(sc->sc_dmat, data->map);
2181 	}
2182 }
2183 
2184 void
iwx_enable_rfkill_int(struct iwx_softc * sc)2185 iwx_enable_rfkill_int(struct iwx_softc *sc)
2186 {
2187 	if (!sc->sc_msix) {
2188 		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2189 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2190 	} else {
2191 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2192 		    sc->sc_fh_init_mask);
2193 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2194 		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2195 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2196 	}
2197 
2198 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2199 	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2200 }
2201 
2202 int
iwx_check_rfkill(struct iwx_softc * sc)2203 iwx_check_rfkill(struct iwx_softc *sc)
2204 {
2205 	uint32_t v;
2206 	int rv;
2207 
2208 	/*
2209 	 * "documentation" is not really helpful here:
2210 	 *  27:	HW_RF_KILL_SW
2211 	 *	Indicates state of (platform's) hardware RF-Kill switch
2212 	 *
2213 	 * But apparently when it's off, it's on ...
2214 	 */
2215 	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2216 	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2217 	if (rv) {
2218 		sc->sc_flags |= IWX_FLAG_RFKILL;
2219 	} else {
2220 		sc->sc_flags &= ~IWX_FLAG_RFKILL;
2221 	}
2222 
2223 	return rv;
2224 }
2225 
2226 void
iwx_enable_interrupts(struct iwx_softc * sc)2227 iwx_enable_interrupts(struct iwx_softc *sc)
2228 {
2229 	if (!sc->sc_msix) {
2230 		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2231 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2232 	} else {
2233 		/*
2234 		 * fh/hw_mask keeps all the unmasked causes.
2235 		 * Unlike msi, in msix cause is enabled when it is unset.
2236 		 */
2237 		sc->sc_hw_mask = sc->sc_hw_init_mask;
2238 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2239 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2240 		    ~sc->sc_fh_mask);
2241 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2242 		    ~sc->sc_hw_mask);
2243 	}
2244 }
2245 
2246 void
iwx_enable_fwload_interrupt(struct iwx_softc * sc)2247 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2248 {
2249 	if (!sc->sc_msix) {
2250 		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2251 		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2252 	} else {
2253 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2254 		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2255 		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2256 		/*
2257 		 * Leave all the FH causes enabled to get the ALIVE
2258 		 * notification.
2259 		 */
2260 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2261 		    ~sc->sc_fh_init_mask);
2262 		sc->sc_fh_mask = sc->sc_fh_init_mask;
2263 	}
2264 }
2265 
2266 void
iwx_restore_interrupts(struct iwx_softc * sc)2267 iwx_restore_interrupts(struct iwx_softc *sc)
2268 {
2269 	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2270 }
2271 
2272 void
iwx_disable_interrupts(struct iwx_softc * sc)2273 iwx_disable_interrupts(struct iwx_softc *sc)
2274 {
2275 	if (!sc->sc_msix) {
2276 		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2277 
2278 		/* acknowledge all interrupts */
2279 		IWX_WRITE(sc, IWX_CSR_INT, ~0);
2280 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2281 	} else {
2282 		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2283 		    sc->sc_fh_init_mask);
2284 		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2285 		    sc->sc_hw_init_mask);
2286 	}
2287 }
2288 
2289 void
iwx_ict_reset(struct iwx_softc * sc)2290 iwx_ict_reset(struct iwx_softc *sc)
2291 {
2292 	iwx_disable_interrupts(sc);
2293 
2294 	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2295 	sc->ict_cur = 0;
2296 
2297 	/* Set physical address of ICT (4KB aligned). */
2298 	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2299 	    IWX_CSR_DRAM_INT_TBL_ENABLE
2300 	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2301 	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2302 	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2303 
2304 	/* Switch to ICT interrupt mode in driver. */
2305 	sc->sc_flags |= IWX_FLAG_USE_ICT;
2306 
2307 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2308 	iwx_enable_interrupts(sc);
2309 }
2310 
2311 #define IWX_HW_READY_TIMEOUT 50
2312 int
iwx_set_hw_ready(struct iwx_softc * sc)2313 iwx_set_hw_ready(struct iwx_softc *sc)
2314 {
2315 	int ready;
2316 
2317 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2318 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2319 
2320 	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2321 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2322 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2323 	    IWX_HW_READY_TIMEOUT);
2324 	if (ready)
2325 		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2326 		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2327 
2328 	return ready;
2329 }
2330 #undef IWX_HW_READY_TIMEOUT
2331 
2332 int
iwx_prepare_card_hw(struct iwx_softc * sc)2333 iwx_prepare_card_hw(struct iwx_softc *sc)
2334 {
2335 	int t = 0;
2336 	int ntries;
2337 
2338 	if (iwx_set_hw_ready(sc))
2339 		return 0;
2340 
2341 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2342 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2343 	DELAY(1000);
2344 
2345 	for (ntries = 0; ntries < 10; ntries++) {
2346 		/* If HW is not ready, prepare the conditions to check again */
2347 		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2348 		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2349 
2350 		do {
2351 			if (iwx_set_hw_ready(sc))
2352 				return 0;
2353 			DELAY(200);
2354 			t += 200;
2355 		} while (t < 150000);
2356 		DELAY(25000);
2357 	}
2358 
2359 	return ETIMEDOUT;
2360 }
2361 
2362 int
iwx_force_power_gating(struct iwx_softc * sc)2363 iwx_force_power_gating(struct iwx_softc *sc)
2364 {
2365 	int err;
2366 
2367 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2368 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2369 	if (err)
2370 		return err;
2371 	DELAY(20);
2372 	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2373 	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2374 	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2375 	if (err)
2376 		return err;
2377 	DELAY(20);
2378 	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2379 	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2380 	return err;
2381 }
2382 
2383 void
iwx_apm_config(struct iwx_softc * sc)2384 iwx_apm_config(struct iwx_softc *sc)
2385 {
2386 	pcireg_t lctl, cap;
2387 
2388 	/*
2389 	 * L0S states have been found to be unstable with our devices
2390 	 * and in newer hardware they are not officially supported at
2391 	 * all, so we must always set the L0S_DISABLED bit.
2392 	 */
2393 	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2394 
2395 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2396 	    sc->sc_cap_off + PCI_PCIE_LCSR);
2397 	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2398 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2399 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2400 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2401 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2402 	    DEVNAME(sc),
2403 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2404 	    sc->sc_ltr_enabled ? "En" : "Dis"));
2405 }
2406 
2407 /*
2408  * Start up NIC's basic functionality after it has been reset
2409  * e.g. after platform boot or shutdown.
2410  * NOTE:  This does not load uCode nor start the embedded processor
2411  */
2412 int
iwx_apm_init(struct iwx_softc * sc)2413 iwx_apm_init(struct iwx_softc *sc)
2414 {
2415 	int err = 0;
2416 
2417 	/*
2418 	 * Disable L0s without affecting L1;
2419 	 *  don't wait for ICH L0s (ICH bug W/A)
2420 	 */
2421 	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2422 	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2423 
2424 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2425 	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2426 
2427 	/*
2428 	 * Enable HAP INTA (interrupt from management bus) to
2429 	 * wake device's PCI Express link L1a -> L0s
2430 	 */
2431 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2432 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2433 
2434 	iwx_apm_config(sc);
2435 
2436 	/*
2437 	 * Set "initialization complete" bit to move adapter from
2438 	 * D0U* --> D0A* (powered-up active) state.
2439 	 */
2440 	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2441 
2442 	/*
2443 	 * Wait for clock stabilization; once stabilized, access to
2444 	 * device-internal resources is supported, e.g. iwx_write_prph()
2445 	 * and accesses to uCode SRAM.
2446 	 */
2447 	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2448 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2449 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2450 		printf("%s: timeout waiting for clock stabilization\n",
2451 		    DEVNAME(sc));
2452 		err = ETIMEDOUT;
2453 		goto out;
2454 	}
2455  out:
2456 	if (err)
2457 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2458 	return err;
2459 }
2460 
2461 void
iwx_apm_stop(struct iwx_softc * sc)2462 iwx_apm_stop(struct iwx_softc *sc)
2463 {
2464 	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2465 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2466 	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2467 	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2468 	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2469 	DELAY(1000);
2470 	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2471 	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2472 	DELAY(5000);
2473 
2474 	/* stop device's busmaster DMA activity */
2475 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2476 
2477 	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2478 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2479 	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2480 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2481 
2482 	/*
2483 	 * Clear "initialization complete" bit to move adapter from
2484 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2485 	 */
2486 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2487 	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2488 }
2489 
2490 void
iwx_init_msix_hw(struct iwx_softc * sc)2491 iwx_init_msix_hw(struct iwx_softc *sc)
2492 {
2493 	iwx_conf_msix_hw(sc, 0);
2494 
2495 	if (!sc->sc_msix)
2496 		return;
2497 
2498 	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2499 	sc->sc_fh_mask = sc->sc_fh_init_mask;
2500 	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2501 	sc->sc_hw_mask = sc->sc_hw_init_mask;
2502 }
2503 
2504 void
iwx_conf_msix_hw(struct iwx_softc * sc,int stopped)2505 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2506 {
2507 	int vector = 0;
2508 
2509 	if (!sc->sc_msix) {
2510 		/* Newer chips default to MSIX. */
2511 		if (!stopped && iwx_nic_lock(sc)) {
2512 			iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2513 			    IWX_UREG_CHICK_MSI_ENABLE);
2514 			iwx_nic_unlock(sc);
2515 		}
2516 		return;
2517 	}
2518 
2519 	if (!stopped && iwx_nic_lock(sc)) {
2520 		iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2521 		    IWX_UREG_CHICK_MSIX_ENABLE);
2522 		iwx_nic_unlock(sc);
2523 	}
2524 
2525 	/* Disable all interrupts */
2526 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2527 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2528 
2529 	/* Map fallback-queue (command/mgmt) to a single vector */
2530 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2531 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2532 	/* Map RSS queue (data) to the same vector */
2533 	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2534 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2535 
2536 	/* Enable the RX queues cause interrupts */
2537 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2538 	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2539 
2540 	/* Map non-RX causes to the same vector */
2541 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2542 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2543 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2544 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2545 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2546 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2547 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2548 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2549 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2550 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2551 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2552 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2553 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2554 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2555 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2556 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2557 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2558 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2559 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2560 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2561 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2562 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2563 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2564 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2565 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2566 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2567 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2568 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2569 	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2570 	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2571 
2572 	/* Enable non-RX causes interrupts */
2573 	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2574 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2575 	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2576 	    IWX_MSIX_FH_INT_CAUSES_S2D |
2577 	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2578 	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2579 	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2580 	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2581 	    IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2582 	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2583 	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2584 	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2585 	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2586 	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2587 	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2588 	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2589 	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2590 }
2591 
2592 int
iwx_clear_persistence_bit(struct iwx_softc * sc)2593 iwx_clear_persistence_bit(struct iwx_softc *sc)
2594 {
2595 	uint32_t hpm, wprot;
2596 
2597 	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2598 	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2599 		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2600 		if (wprot & IWX_PREG_WFPM_ACCESS) {
2601 			printf("%s: cannot clear persistence bit\n",
2602 			    DEVNAME(sc));
2603 			return EPERM;
2604 		}
2605 		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2606 		    hpm & ~IWX_PERSISTENCE_BIT);
2607 	}
2608 
2609 	return 0;
2610 }
2611 
2612 int
iwx_start_hw(struct iwx_softc * sc)2613 iwx_start_hw(struct iwx_softc *sc)
2614 {
2615 	int err;
2616 
2617 	err = iwx_prepare_card_hw(sc);
2618 	if (err)
2619 		return err;
2620 
2621 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2622 		err = iwx_clear_persistence_bit(sc);
2623 		if (err)
2624 			return err;
2625 	}
2626 
2627 	/* Reset the entire device */
2628 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2629 	DELAY(5000);
2630 
2631 	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2632 	    sc->sc_integrated) {
2633 		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2634 		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2635 		DELAY(20);
2636 		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2637 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2638 		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2639 			printf("%s: timeout waiting for clock stabilization\n",
2640 			    DEVNAME(sc));
2641 			return ETIMEDOUT;
2642 		}
2643 
2644 		err = iwx_force_power_gating(sc);
2645 		if (err)
2646 			return err;
2647 
2648 		/* Reset the entire device */
2649 		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2650 		DELAY(5000);
2651 	}
2652 
2653 	err = iwx_apm_init(sc);
2654 	if (err)
2655 		return err;
2656 
2657 	iwx_init_msix_hw(sc);
2658 
2659 	iwx_enable_rfkill_int(sc);
2660 	iwx_check_rfkill(sc);
2661 
2662 	return 0;
2663 }
2664 
2665 void
iwx_stop_device(struct iwx_softc * sc)2666 iwx_stop_device(struct iwx_softc *sc)
2667 {
2668 	struct ieee80211com *ic = &sc->sc_ic;
2669 	struct ieee80211_node *ni = ic->ic_bss;
2670 	int i;
2671 
2672 	iwx_disable_interrupts(sc);
2673 	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2674 
2675 	iwx_disable_rx_dma(sc);
2676 	iwx_reset_rx_ring(sc, &sc->rxq);
2677 	for (i = 0; i < nitems(sc->txq); i++)
2678 		iwx_reset_tx_ring(sc, &sc->txq[i]);
2679 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2680 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2681 		if (ba->ba_state != IEEE80211_BA_AGREED)
2682 			continue;
2683 		ieee80211_delba_request(ic, ni, 0, 1, i);
2684 	}
2685 
2686 	/* Make sure (redundant) we've released our request to stay awake */
2687 	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2688 	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2689 	if (sc->sc_nic_locks > 0)
2690 		printf("%s: %d active NIC locks forcefully cleared\n",
2691 		    DEVNAME(sc), sc->sc_nic_locks);
2692 	sc->sc_nic_locks = 0;
2693 
2694 	/* Stop the device, and put it in low power state */
2695 	iwx_apm_stop(sc);
2696 
2697 	/* Reset the on-board processor. */
2698 	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2699 	DELAY(5000);
2700 
2701 	/*
2702 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2703 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2704 	 * that enables radio won't fire on the correct irq, and the
2705 	 * driver won't be able to handle the interrupt.
2706 	 * Configure the IVAR table again after reset.
2707 	 */
2708 	iwx_conf_msix_hw(sc, 1);
2709 
2710 	/*
2711 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2712 	 * Clear the interrupt again.
2713 	 */
2714 	iwx_disable_interrupts(sc);
2715 
2716 	/* Even though we stop the HW we still want the RF kill interrupt. */
2717 	iwx_enable_rfkill_int(sc);
2718 	iwx_check_rfkill(sc);
2719 
2720 	iwx_prepare_card_hw(sc);
2721 
2722 	iwx_ctxt_info_free_paging(sc);
2723 	iwx_dma_contig_free(&sc->pnvm_dma);
2724 	for (i = 0; i < sc->pnvm_segs; i++)
2725 		iwx_dma_contig_free(&sc->pnvm_seg_dma[i]);
2726 }
2727 
2728 void
iwx_nic_config(struct iwx_softc * sc)2729 iwx_nic_config(struct iwx_softc *sc)
2730 {
2731 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2732 	uint32_t mask, val, reg_val = 0;
2733 
2734 	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2735 	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2736 	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2737 	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2738 	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2739 	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2740 
2741 	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2742 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2743 	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2744 	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2745 
2746 	/* radio configuration */
2747 	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2748 	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2749 	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2750 
2751 	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2752 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2753 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2754 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2755 	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2756 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2757 	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2758 
2759 	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2760 	val &= ~mask;
2761 	val |= reg_val;
2762 	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2763 }
2764 
2765 int
iwx_nic_rx_init(struct iwx_softc * sc)2766 iwx_nic_rx_init(struct iwx_softc *sc)
2767 {
2768 	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2769 
2770 	/*
2771 	 * We don't configure the RFH; the firmware will do that.
2772 	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2773 	 */
2774 	return 0;
2775 }
2776 
2777 int
iwx_nic_init(struct iwx_softc * sc)2778 iwx_nic_init(struct iwx_softc *sc)
2779 {
2780 	int err;
2781 
2782 	iwx_apm_init(sc);
2783 	if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2784 		iwx_nic_config(sc);
2785 
2786 	err = iwx_nic_rx_init(sc);
2787 	if (err)
2788 		return err;
2789 
2790 	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2791 
2792 	return 0;
2793 }
2794 
2795 /* Map a TID to an ieee80211_edca_ac category. */
2796 const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT] = {
2797 	EDCA_AC_BE,
2798 	EDCA_AC_BK,
2799 	EDCA_AC_BK,
2800 	EDCA_AC_BE,
2801 	EDCA_AC_VI,
2802 	EDCA_AC_VI,
2803 	EDCA_AC_VO,
2804 	EDCA_AC_VO,
2805 };
2806 
2807 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2808 const uint8_t iwx_ac_to_tx_fifo[] = {
2809 	IWX_GEN2_EDCA_TX_FIFO_BE,
2810 	IWX_GEN2_EDCA_TX_FIFO_BK,
2811 	IWX_GEN2_EDCA_TX_FIFO_VI,
2812 	IWX_GEN2_EDCA_TX_FIFO_VO,
2813 };
2814 
2815 int
iwx_enable_txq(struct iwx_softc * sc,int sta_id,int qid,int tid,int num_slots)2816 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2817     int num_slots)
2818 {
2819 	struct iwx_rx_packet *pkt;
2820 	struct iwx_tx_queue_cfg_rsp *resp;
2821 	struct iwx_tx_queue_cfg_cmd cmd_v0;
2822 	struct iwx_scd_queue_cfg_cmd cmd_v3;
2823 	struct iwx_host_cmd hcmd = {
2824 		.flags = IWX_CMD_WANT_RESP,
2825 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2826 	};
2827 	struct iwx_tx_ring *ring = &sc->txq[qid];
2828 	int err, fwqid, cmd_ver;
2829 	uint32_t wr_idx;
2830 	size_t resp_len;
2831 
2832 	iwx_reset_tx_ring(sc, ring);
2833 
2834 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2835 	    IWX_SCD_QUEUE_CONFIG_CMD);
2836 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2837 		memset(&cmd_v0, 0, sizeof(cmd_v0));
2838 		cmd_v0.sta_id = sta_id;
2839 		cmd_v0.tid = tid;
2840 		cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2841 		cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2842 		cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2843 		cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2844 		hcmd.id = IWX_SCD_QUEUE_CFG;
2845 		hcmd.data[0] = &cmd_v0;
2846 		hcmd.len[0] = sizeof(cmd_v0);
2847 	} else if (cmd_ver == 3) {
2848 		memset(&cmd_v3, 0, sizeof(cmd_v3));
2849 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2850 		cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2851 		cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2852 		cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2853 		cmd_v3.u.add.flags = htole32(0);
2854 		cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2855 		cmd_v3.u.add.tid = tid;
2856 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2857 		    IWX_SCD_QUEUE_CONFIG_CMD);
2858 		hcmd.data[0] = &cmd_v3;
2859 		hcmd.len[0] = sizeof(cmd_v3);
2860 	} else {
2861 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2862 		    DEVNAME(sc), cmd_ver);
2863 		return ENOTSUP;
2864 	}
2865 
2866 	err = iwx_send_cmd(sc, &hcmd);
2867 	if (err)
2868 		return err;
2869 
2870 	pkt = hcmd.resp_pkt;
2871 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2872 		err = EIO;
2873 		goto out;
2874 	}
2875 
2876 	resp_len = iwx_rx_packet_payload_len(pkt);
2877 	if (resp_len != sizeof(*resp)) {
2878 		err = EIO;
2879 		goto out;
2880 	}
2881 
2882 	resp = (void *)pkt->data;
2883 	fwqid = le16toh(resp->queue_number);
2884 	wr_idx = le16toh(resp->write_pointer);
2885 
2886 	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2887 	if (fwqid != qid) {
2888 		err = EIO;
2889 		goto out;
2890 	}
2891 
2892 	if (wr_idx != ring->cur_hw) {
2893 		err = EIO;
2894 		goto out;
2895 	}
2896 
2897 	sc->qenablemsk |= (1 << qid);
2898 	ring->tid = tid;
2899 out:
2900 	iwx_free_resp(sc, &hcmd);
2901 	return err;
2902 }
2903 
2904 int
iwx_disable_txq(struct iwx_softc * sc,int sta_id,int qid,uint8_t tid)2905 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2906 {
2907 	struct iwx_rx_packet *pkt;
2908 	struct iwx_tx_queue_cfg_rsp *resp;
2909 	struct iwx_tx_queue_cfg_cmd cmd_v0;
2910 	struct iwx_scd_queue_cfg_cmd cmd_v3;
2911 	struct iwx_host_cmd hcmd = {
2912 		.flags = IWX_CMD_WANT_RESP,
2913 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2914 	};
2915 	struct iwx_tx_ring *ring = &sc->txq[qid];
2916 	int err, cmd_ver;
2917 
2918 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2919 	    IWX_SCD_QUEUE_CONFIG_CMD);
2920 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2921 		memset(&cmd_v0, 0, sizeof(cmd_v0));
2922 		cmd_v0.sta_id = sta_id;
2923 		cmd_v0.tid = tid;
2924 		cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
2925 		cmd_v0.cb_size = htole32(0);
2926 		cmd_v0.byte_cnt_addr = htole64(0);
2927 		cmd_v0.tfdq_addr = htole64(0);
2928 		hcmd.id = IWX_SCD_QUEUE_CFG;
2929 		hcmd.data[0] = &cmd_v0;
2930 		hcmd.len[0] = sizeof(cmd_v0);
2931 	} else if (cmd_ver == 3) {
2932 		memset(&cmd_v3, 0, sizeof(cmd_v3));
2933 		cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
2934 		cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
2935 		cmd_v3.u.remove.tid = tid;
2936 		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2937 		    IWX_SCD_QUEUE_CONFIG_CMD);
2938 		hcmd.data[0] = &cmd_v3;
2939 		hcmd.len[0] = sizeof(cmd_v3);
2940 	} else {
2941 		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2942 		    DEVNAME(sc), cmd_ver);
2943 		return ENOTSUP;
2944 	}
2945 
2946 	err = iwx_send_cmd(sc, &hcmd);
2947 	if (err)
2948 		return err;
2949 
2950 	pkt = hcmd.resp_pkt;
2951 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2952 		err = EIO;
2953 		goto out;
2954 	}
2955 
2956 	sc->qenablemsk &= ~(1 << qid);
2957 	iwx_reset_tx_ring(sc, ring);
2958 out:
2959 	iwx_free_resp(sc, &hcmd);
2960 	return err;
2961 }
2962 
2963 void
iwx_post_alive(struct iwx_softc * sc)2964 iwx_post_alive(struct iwx_softc *sc)
2965 {
2966 	int txcmd_ver;
2967 
2968 	iwx_ict_reset(sc);
2969 
2970 	txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
2971 	if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
2972 		sc->sc_rate_n_flags_version = 2;
2973 	else
2974 		sc->sc_rate_n_flags_version = 1;
2975 
2976 	txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
2977 }
2978 
2979 int
iwx_schedule_session_protection(struct iwx_softc * sc,struct iwx_node * in,uint32_t duration_tu)2980 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2981     uint32_t duration_tu)
2982 {
2983 	struct iwx_session_prot_cmd cmd = {
2984 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
2985 		    in->in_color)),
2986 		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
2987 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
2988 		.duration_tu = htole32(duration_tu),
2989 	};
2990 	uint32_t cmd_id;
2991 	int err;
2992 
2993 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
2994 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
2995 	if (!err)
2996 		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
2997 	return err;
2998 }
2999 
3000 void
iwx_unprotect_session(struct iwx_softc * sc,struct iwx_node * in)3001 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3002 {
3003 	struct iwx_session_prot_cmd cmd = {
3004 		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3005 		    in->in_color)),
3006 		.action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3007 		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3008 		.duration_tu = 0,
3009 	};
3010 	uint32_t cmd_id;
3011 
3012 	/* Do nothing if the time event has already ended. */
3013 	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3014 		return;
3015 
3016 	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3017 	if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3018 		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3019 }
3020 
3021 /*
3022  * NVM read access and content parsing.  We do not support
3023  * external NVM or writing NVM.
3024  */
3025 
3026 uint8_t
iwx_fw_valid_tx_ant(struct iwx_softc * sc)3027 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3028 {
3029 	uint8_t tx_ant;
3030 
3031 	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3032 	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3033 
3034 	if (sc->sc_nvm.valid_tx_ant)
3035 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3036 
3037 	return tx_ant;
3038 }
3039 
3040 uint8_t
iwx_fw_valid_rx_ant(struct iwx_softc * sc)3041 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3042 {
3043 	uint8_t rx_ant;
3044 
3045 	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3046 	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3047 
3048 	if (sc->sc_nvm.valid_rx_ant)
3049 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3050 
3051 	return rx_ant;
3052 }
3053 
3054 void
iwx_init_channel_map(struct iwx_softc * sc,uint16_t * channel_profile_v3,uint32_t * channel_profile_v4,int nchan_profile)3055 iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
3056     uint32_t *channel_profile_v4, int nchan_profile)
3057 {
3058 	struct ieee80211com *ic = &sc->sc_ic;
3059 	struct iwx_nvm_data *data = &sc->sc_nvm;
3060 	int ch_idx;
3061 	struct ieee80211_channel *channel;
3062 	uint32_t ch_flags;
3063 	int is_5ghz;
3064 	int flags, hw_value;
3065 	int nchan;
3066 	const uint8_t *nvm_channels;
3067 
3068 	if (sc->sc_uhb_supported) {
3069 		nchan = nitems(iwx_nvm_channels_uhb);
3070 		nvm_channels = iwx_nvm_channels_uhb;
3071 	} else {
3072 		nchan = nitems(iwx_nvm_channels_8000);
3073 		nvm_channels = iwx_nvm_channels_8000;
3074 	}
3075 
3076 	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
3077 		if (channel_profile_v4)
3078 			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
3079 		else
3080 			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
3081 
3082 		/* net80211 cannot handle 6 GHz channel numbers yet */
3083 		if (ch_idx >= IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS)
3084 			break;
3085 
3086 		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
3087 		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
3088 			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
3089 
3090 		hw_value = nvm_channels[ch_idx];
3091 		channel = &ic->ic_channels[hw_value];
3092 
3093 		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
3094 			channel->ic_freq = 0;
3095 			channel->ic_flags = 0;
3096 			continue;
3097 		}
3098 
3099 		if (!is_5ghz) {
3100 			flags = IEEE80211_CHAN_2GHZ;
3101 			channel->ic_flags
3102 			    = IEEE80211_CHAN_CCK
3103 			    | IEEE80211_CHAN_OFDM
3104 			    | IEEE80211_CHAN_DYN
3105 			    | IEEE80211_CHAN_2GHZ;
3106 		} else {
3107 			flags = IEEE80211_CHAN_5GHZ;
3108 			channel->ic_flags =
3109 			    IEEE80211_CHAN_A;
3110 		}
3111 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3112 
3113 		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
3114 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3115 
3116 		if (data->sku_cap_11n_enable) {
3117 			channel->ic_flags |= IEEE80211_CHAN_HT;
3118 			if (ch_flags & IWX_NVM_CHANNEL_40MHZ)
3119 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3120 		}
3121 
3122 		if (is_5ghz && data->sku_cap_11ac_enable) {
3123 			channel->ic_flags |= IEEE80211_CHAN_VHT;
3124 			if (ch_flags & IWX_NVM_CHANNEL_80MHZ)
3125 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3126 		}
3127 	}
3128 }
3129 
3130 int
iwx_mimo_enabled(struct iwx_softc * sc)3131 iwx_mimo_enabled(struct iwx_softc *sc)
3132 {
3133 	struct ieee80211com *ic = &sc->sc_ic;
3134 
3135 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3136 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3137 }
3138 
3139 void
iwx_setup_ht_rates(struct iwx_softc * sc)3140 iwx_setup_ht_rates(struct iwx_softc *sc)
3141 {
3142 	struct ieee80211com *ic = &sc->sc_ic;
3143 	uint8_t rx_ant;
3144 
3145 	/* TX is supported with the same MCS as RX. */
3146 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3147 
3148 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3149 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3150 
3151 	if (!iwx_mimo_enabled(sc))
3152 		return;
3153 
3154 	rx_ant = iwx_fw_valid_rx_ant(sc);
3155 	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3156 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
3157 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3158 }
3159 
3160 void
iwx_setup_vht_rates(struct iwx_softc * sc)3161 iwx_setup_vht_rates(struct iwx_softc *sc)
3162 {
3163 	struct ieee80211com *ic = &sc->sc_ic;
3164 	uint8_t rx_ant = iwx_fw_valid_rx_ant(sc);
3165 	int n;
3166 
3167 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3168 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3169 
3170 	if (iwx_mimo_enabled(sc) &&
3171 	    ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3172 	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)) {
3173 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3174 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3175 	} else {
3176 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3177 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3178 	}
3179 
3180 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3181 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3182 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3183 	}
3184 
3185 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3186 }
3187 
3188 void
iwx_init_reorder_buffer(struct iwx_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3189 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3190     uint16_t ssn, uint16_t buf_size)
3191 {
3192 	reorder_buf->head_sn = ssn;
3193 	reorder_buf->num_stored = 0;
3194 	reorder_buf->buf_size = buf_size;
3195 	reorder_buf->last_amsdu = 0;
3196 	reorder_buf->last_sub_index = 0;
3197 	reorder_buf->removed = 0;
3198 	reorder_buf->valid = 0;
3199 	reorder_buf->consec_oldsn_drops = 0;
3200 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3201 	reorder_buf->consec_oldsn_prev_drop = 0;
3202 }
3203 
3204 void
iwx_clear_reorder_buffer(struct iwx_softc * sc,struct iwx_rxba_data * rxba)3205 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3206 {
3207 	int i;
3208 	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3209 	struct iwx_reorder_buf_entry *entry;
3210 
3211 	for (i = 0; i < reorder_buf->buf_size; i++) {
3212 		entry = &rxba->entries[i];
3213 		ml_purge(&entry->frames);
3214 		timerclear(&entry->reorder_time);
3215 	}
3216 
3217 	reorder_buf->removed = 1;
3218 	timeout_del(&reorder_buf->reorder_timer);
3219 	timerclear(&rxba->last_rx);
3220 	timeout_del(&rxba->session_timer);
3221 	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3222 }
3223 
3224 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3225 
3226 void
iwx_rx_ba_session_expired(void * arg)3227 iwx_rx_ba_session_expired(void *arg)
3228 {
3229 	struct iwx_rxba_data *rxba = arg;
3230 	struct iwx_softc *sc = rxba->sc;
3231 	struct ieee80211com *ic = &sc->sc_ic;
3232 	struct ieee80211_node *ni = ic->ic_bss;
3233 	struct timeval now, timeout, expiry;
3234 	int s;
3235 
3236 	s = splnet();
3237 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
3238 	    ic->ic_state == IEEE80211_S_RUN &&
3239 	    rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3240 		getmicrouptime(&now);
3241 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3242 		timeradd(&rxba->last_rx, &timeout, &expiry);
3243 		if (timercmp(&now, &expiry, <)) {
3244 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3245 		} else {
3246 			ic->ic_stats.is_ht_rx_ba_timeout++;
3247 			ieee80211_delba_request(ic, ni,
3248 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3249 		}
3250 	}
3251 	splx(s);
3252 }
3253 
3254 void
iwx_rx_bar_frame_release(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct mbuf_list * ml)3255 iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3256     struct mbuf_list *ml)
3257 {
3258 	struct ieee80211com *ic = &sc->sc_ic;
3259 	struct ieee80211_node *ni = ic->ic_bss;
3260 	struct iwx_bar_frame_release *release = (void *)pkt->data;
3261 	struct iwx_reorder_buffer *buf;
3262 	struct iwx_rxba_data *rxba;
3263 	unsigned int baid, nssn, sta_id, tid;
3264 
3265 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
3266 		return;
3267 
3268 	baid = (le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_BAID_MASK) >>
3269 	    IWX_BAR_FRAME_RELEASE_BAID_SHIFT;
3270 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3271 	    baid >= nitems(sc->sc_rxba_data))
3272 		return;
3273 
3274 	rxba = &sc->sc_rxba_data[baid];
3275 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
3276 		return;
3277 
3278 	tid = le32toh(release->sta_tid) & IWX_BAR_FRAME_RELEASE_TID_MASK;
3279 	sta_id = (le32toh(release->sta_tid) &
3280 	    IWX_BAR_FRAME_RELEASE_STA_MASK) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT;
3281 	if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
3282 		return;
3283 
3284 	nssn = le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_NSSN_MASK;
3285 	buf = &rxba->reorder_buf;
3286 	iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
3287 }
3288 
3289 void
iwx_reorder_timer_expired(void * arg)3290 iwx_reorder_timer_expired(void *arg)
3291 {
3292 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3293 	struct iwx_reorder_buffer *buf = arg;
3294 	struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
3295 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
3296 	struct iwx_softc *sc = rxba->sc;
3297 	struct ieee80211com *ic = &sc->sc_ic;
3298 	struct ieee80211_node *ni = ic->ic_bss;
3299 	int i, s;
3300 	uint16_t sn = 0, index = 0;
3301 	int expired = 0;
3302 	int cont = 0;
3303 	struct timeval now, timeout, expiry;
3304 
3305 	if (!buf->num_stored || buf->removed)
3306 		return;
3307 
3308 	s = splnet();
3309 	getmicrouptime(&now);
3310 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3311 
3312 	for (i = 0; i < buf->buf_size ; i++) {
3313 		index = (buf->head_sn + i) % buf->buf_size;
3314 
3315 		if (ml_empty(&entries[index].frames)) {
3316 			/*
3317 			 * If there is a hole and the next frame didn't expire
3318 			 * we want to break and not advance SN.
3319 			 */
3320 			cont = 0;
3321 			continue;
3322 		}
3323 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3324 		if (!cont && timercmp(&now, &expiry, <))
3325 			break;
3326 
3327 		expired = 1;
3328 		/* continue until next hole after this expired frame */
3329 		cont = 1;
3330 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3331 	}
3332 
3333 	if (expired) {
3334 		/* SN is set to the last expired frame + 1 */
3335 		iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
3336 		if_input(&sc->sc_ic.ic_if, &ml);
3337 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3338 	} else {
3339 		/*
3340 		 * If no frame expired and there are stored frames, index is now
3341 		 * pointing to the first unexpired frame - modify reorder timeout
3342 		 * accordingly.
3343 		 */
3344 		timeout_add_usec(&buf->reorder_timer,
3345 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3346 	}
3347 
3348 	splx(s);
3349 }
3350 
3351 #define IWX_MAX_RX_BA_SESSIONS 16
3352 
3353 struct iwx_rxba_data *
iwx_find_rxba_data(struct iwx_softc * sc,uint8_t tid)3354 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3355 {
3356 	int i;
3357 
3358 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3359 		if (sc->sc_rxba_data[i].baid ==
3360 		    IWX_RX_REORDER_DATA_INVALID_BAID)
3361 			continue;
3362 		if (sc->sc_rxba_data[i].tid == tid)
3363 			return &sc->sc_rxba_data[i];
3364 	}
3365 
3366 	return NULL;
3367 }
3368 
3369 int
iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start,uint8_t * baid)3370 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3371     uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3372     uint8_t *baid)
3373 {
3374 	struct iwx_rx_baid_cfg_cmd cmd;
3375 	uint32_t new_baid = 0;
3376 	int err;
3377 
3378 	splassert(IPL_NET);
3379 
3380 	memset(&cmd, 0, sizeof(cmd));
3381 
3382 	if (start) {
3383 		cmd.action = IWX_RX_BAID_ACTION_ADD;
3384 		cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3385 		cmd.alloc.tid = tid;
3386 		cmd.alloc.ssn = htole16(ssn);
3387 		cmd.alloc.win_size = htole16(winsize);
3388 	} else {
3389 		struct iwx_rxba_data *rxba;
3390 
3391 		rxba = iwx_find_rxba_data(sc, tid);
3392 		if (rxba == NULL)
3393 			return ENOENT;
3394 		*baid = rxba->baid;
3395 
3396 		cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3397 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3398 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3399 			cmd.remove_v1.baid = rxba->baid;
3400 		} else {
3401 			cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3402 			cmd.remove.tid = tid;
3403 		}
3404 	}
3405 
3406 	err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3407 	    IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3408 	if (err)
3409 		return err;
3410 
3411 	if (start) {
3412 		if (new_baid >= nitems(sc->sc_rxba_data))
3413 			return ERANGE;
3414 		*baid = new_baid;
3415 	}
3416 
3417 	return 0;
3418 }
3419 
3420 int
iwx_sta_rx_agg_sta_cmd(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start,uint8_t * baid)3421 iwx_sta_rx_agg_sta_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3422     uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3423     uint8_t *baid)
3424 {
3425 	struct iwx_add_sta_cmd cmd;
3426 	struct iwx_node *in = (void *)ni;
3427 	int err;
3428 	uint32_t status;
3429 
3430 	splassert(IPL_NET);
3431 
3432 	memset(&cmd, 0, sizeof(cmd));
3433 
3434 	cmd.sta_id = IWX_STATION_ID;
3435 	cmd.mac_id_n_color
3436 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3437 	cmd.add_modify = IWX_STA_MODE_MODIFY;
3438 
3439 	if (start) {
3440 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3441 		cmd.add_immediate_ba_ssn = htole16(ssn);
3442 		cmd.rx_ba_window = htole16(winsize);
3443 	} else {
3444 		struct iwx_rxba_data *rxba;
3445 
3446 		rxba = iwx_find_rxba_data(sc, tid);
3447 		if (rxba == NULL)
3448 			return ENOENT;
3449 		*baid = rxba->baid;
3450 
3451 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3452 	}
3453 	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
3454 	    IWX_STA_MODIFY_REMOVE_BA_TID;
3455 
3456 	status = IWX_ADD_STA_SUCCESS;
3457 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
3458 	    &status);
3459 	if (err)
3460 		return err;
3461 
3462 	if ((status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
3463 		return EIO;
3464 
3465 	if (!(status & IWX_ADD_STA_BAID_VALID_MASK))
3466 		return EINVAL;
3467 
3468 	if (start) {
3469 		*baid = (status & IWX_ADD_STA_BAID_MASK) >>
3470 		    IWX_ADD_STA_BAID_SHIFT;
3471 		if (*baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3472 		    *baid >= nitems(sc->sc_rxba_data))
3473 			return ERANGE;
3474 	}
3475 
3476 	return 0;
3477 }
3478 
3479 void
iwx_sta_rx_agg(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3480 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3481     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3482 {
3483 	struct ieee80211com *ic = &sc->sc_ic;
3484 	int err, s;
3485 	struct iwx_rxba_data *rxba = NULL;
3486 	uint8_t baid = 0;
3487 
3488 	s = splnet();
3489 
3490 	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3491 		ieee80211_addba_req_refuse(ic, ni, tid);
3492 		splx(s);
3493 		return;
3494 	}
3495 
3496 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3497 		err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3498 		    timeout_val, start, &baid);
3499 	} else {
3500 		err = iwx_sta_rx_agg_sta_cmd(sc, ni, tid, ssn, winsize,
3501 		    timeout_val, start, &baid);
3502 	}
3503 	if (err) {
3504 		ieee80211_addba_req_refuse(ic, ni, tid);
3505 		splx(s);
3506 		return;
3507 	}
3508 
3509 	rxba = &sc->sc_rxba_data[baid];
3510 
3511 	/* Deaggregation is done in hardware. */
3512 	if (start) {
3513 		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3514 			ieee80211_addba_req_refuse(ic, ni, tid);
3515 			splx(s);
3516 			return;
3517 		}
3518 		rxba->sta_id = IWX_STATION_ID;
3519 		rxba->tid = tid;
3520 		rxba->baid = baid;
3521 		rxba->timeout = timeout_val;
3522 		getmicrouptime(&rxba->last_rx);
3523 		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3524 		    winsize);
3525 		if (timeout_val != 0) {
3526 			struct ieee80211_rx_ba *ba;
3527 			timeout_add_usec(&rxba->session_timer,
3528 			    timeout_val);
3529 			/* XXX disable net80211's BA timeout handler */
3530 			ba = &ni->ni_rx_ba[tid];
3531 			ba->ba_timeout_val = 0;
3532 		}
3533 	} else
3534 		iwx_clear_reorder_buffer(sc, rxba);
3535 
3536 	if (start) {
3537 		sc->sc_rx_ba_sessions++;
3538 		ieee80211_addba_req_accept(ic, ni, tid);
3539 	} else if (sc->sc_rx_ba_sessions > 0)
3540 		sc->sc_rx_ba_sessions--;
3541 
3542 	splx(s);
3543 }
3544 
3545 void
iwx_mac_ctxt_task(void * arg)3546 iwx_mac_ctxt_task(void *arg)
3547 {
3548 	struct iwx_softc *sc = arg;
3549 	struct ieee80211com *ic = &sc->sc_ic;
3550 	struct iwx_node *in = (void *)ic->ic_bss;
3551 	int err, s = splnet();
3552 
3553 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3554 	    ic->ic_state != IEEE80211_S_RUN) {
3555 		refcnt_rele_wake(&sc->task_refs);
3556 		splx(s);
3557 		return;
3558 	}
3559 
3560 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
3561 	if (err)
3562 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3563 
3564 	iwx_unprotect_session(sc, in);
3565 
3566 	refcnt_rele_wake(&sc->task_refs);
3567 	splx(s);
3568 }
3569 
3570 void
iwx_phy_ctxt_task(void * arg)3571 iwx_phy_ctxt_task(void *arg)
3572 {
3573 	struct iwx_softc *sc = arg;
3574 	struct ieee80211com *ic = &sc->sc_ic;
3575 	struct iwx_node *in = (void *)ic->ic_bss;
3576 	struct ieee80211_node *ni = &in->in_ni;
3577 	uint8_t chains, sco, vht_chan_width;
3578 	int err, s = splnet();
3579 
3580 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3581 	    ic->ic_state != IEEE80211_S_RUN ||
3582 	    in->in_phyctxt == NULL) {
3583 		refcnt_rele_wake(&sc->task_refs);
3584 		splx(s);
3585 		return;
3586 	}
3587 
3588 	chains = iwx_mimo_enabled(sc) ? 2 : 1;
3589 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3590 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3591 	    ieee80211_node_supports_ht_chan40(ni))
3592 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3593 	else
3594 		sco = IEEE80211_HTOP0_SCO_SCN;
3595 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3596 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3597 	    ieee80211_node_supports_vht_chan80(ni))
3598 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3599 	else
3600 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3601 	if (in->in_phyctxt->sco != sco ||
3602 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3603 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3604 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3605 		    vht_chan_width);
3606 		if (err)
3607 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3608 	}
3609 
3610 	refcnt_rele_wake(&sc->task_refs);
3611 	splx(s);
3612 }
3613 
3614 void
iwx_updatechan(struct ieee80211com * ic)3615 iwx_updatechan(struct ieee80211com *ic)
3616 {
3617 	struct iwx_softc *sc = ic->ic_softc;
3618 
3619 	if (ic->ic_state == IEEE80211_S_RUN &&
3620 	    !task_pending(&sc->newstate_task))
3621 		iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3622 }
3623 
3624 void
iwx_updateprot(struct ieee80211com * ic)3625 iwx_updateprot(struct ieee80211com *ic)
3626 {
3627 	struct iwx_softc *sc = ic->ic_softc;
3628 
3629 	if (ic->ic_state == IEEE80211_S_RUN &&
3630 	    !task_pending(&sc->newstate_task))
3631 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3632 }
3633 
3634 void
iwx_updateslot(struct ieee80211com * ic)3635 iwx_updateslot(struct ieee80211com *ic)
3636 {
3637 	struct iwx_softc *sc = ic->ic_softc;
3638 
3639 	if (ic->ic_state == IEEE80211_S_RUN &&
3640 	    !task_pending(&sc->newstate_task))
3641 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3642 }
3643 
3644 void
iwx_updateedca(struct ieee80211com * ic)3645 iwx_updateedca(struct ieee80211com *ic)
3646 {
3647 	struct iwx_softc *sc = ic->ic_softc;
3648 
3649 	if (ic->ic_state == IEEE80211_S_RUN &&
3650 	    !task_pending(&sc->newstate_task))
3651 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3652 }
3653 
3654 void
iwx_updatedtim(struct ieee80211com * ic)3655 iwx_updatedtim(struct ieee80211com *ic)
3656 {
3657 	struct iwx_softc *sc = ic->ic_softc;
3658 
3659 	if (ic->ic_state == IEEE80211_S_RUN &&
3660 	    !task_pending(&sc->newstate_task))
3661 		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3662 }
3663 
3664 void
iwx_sta_tx_agg_start(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid)3665 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3666     uint8_t tid)
3667 {
3668 	struct ieee80211com *ic = &sc->sc_ic;
3669 	struct ieee80211_tx_ba *ba;
3670 	int err, qid;
3671 	struct iwx_tx_ring *ring;
3672 
3673 	/* Ensure we can map this TID to an aggregation queue. */
3674 	if (tid >= IWX_MAX_TID_COUNT)
3675 		return;
3676 
3677 	ba = &ni->ni_tx_ba[tid];
3678 	if (ba->ba_state != IEEE80211_BA_REQUESTED)
3679 		return;
3680 
3681 	qid = sc->aggqid[tid];
3682 	if (qid == 0) {
3683 		/* Firmware should pick the next unused Tx queue. */
3684 		qid = fls(sc->qenablemsk);
3685 	}
3686 
3687 	/*
3688 	 * Simply enable the queue.
3689 	 * Firmware handles Tx Ba session setup and teardown.
3690 	 */
3691 	if ((sc->qenablemsk & (1 << qid)) == 0) {
3692 		if (!iwx_nic_lock(sc)) {
3693 			ieee80211_addba_resp_refuse(ic, ni, tid,
3694 			    IEEE80211_STATUS_UNSPECIFIED);
3695 			return;
3696 		}
3697 		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3698 		    IWX_TX_RING_COUNT);
3699 		iwx_nic_unlock(sc);
3700 		if (err) {
3701 			printf("%s: could not enable Tx queue %d "
3702 			    "(error %d)\n", DEVNAME(sc), qid, err);
3703 			ieee80211_addba_resp_refuse(ic, ni, tid,
3704 			    IEEE80211_STATUS_UNSPECIFIED);
3705 			return;
3706 		}
3707 
3708 		ba->ba_winstart = 0;
3709 	} else
3710 		ba->ba_winstart = ni->ni_qos_txseqs[tid];
3711 
3712 	ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3713 
3714 	ring = &sc->txq[qid];
3715 	ba->ba_timeout_val = 0;
3716 	ieee80211_addba_resp_accept(ic, ni, tid);
3717 	sc->aggqid[tid] = qid;
3718 }
3719 
3720 void
iwx_ba_task(void * arg)3721 iwx_ba_task(void *arg)
3722 {
3723 	struct iwx_softc *sc = arg;
3724 	struct ieee80211com *ic = &sc->sc_ic;
3725 	struct ieee80211_node *ni = ic->ic_bss;
3726 	int s = splnet();
3727 	int tid;
3728 
3729 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3730 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3731 			break;
3732 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3733 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3734 			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3735 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3736 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3737 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3738 			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3739 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3740 		}
3741 	}
3742 
3743 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3744 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3745 			break;
3746 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3747 			iwx_sta_tx_agg_start(sc, ni, tid);
3748 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3749 		}
3750 	}
3751 
3752 	refcnt_rele_wake(&sc->task_refs);
3753 	splx(s);
3754 }
3755 
3756 /*
3757  * This function is called by upper layer when an ADDBA request is received
3758  * from another STA and before the ADDBA response is sent.
3759  */
3760 int
iwx_ampdu_rx_start(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3761 iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3762     uint8_t tid)
3763 {
3764 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3765 
3766 	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
3767 	    tid >= IWX_MAX_TID_COUNT)
3768 		return ENOSPC;
3769 
3770 	if (sc->ba_rx.start_tidmask & (1 << tid))
3771 		return EBUSY;
3772 
3773 	sc->ba_rx.start_tidmask |= (1 << tid);
3774 	iwx_add_task(sc, systq, &sc->ba_task);
3775 
3776 	return EBUSY;
3777 }
3778 
3779 /*
3780  * This function is called by upper layer on teardown of an HT-immediate
3781  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3782  */
3783 void
iwx_ampdu_rx_stop(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3784 iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3785     uint8_t tid)
3786 {
3787 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3788 
3789 	if (tid >= IWX_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3790 		return;
3791 
3792 	sc->ba_rx.stop_tidmask |= (1 << tid);
3793 	iwx_add_task(sc, systq, &sc->ba_task);
3794 }
3795 
3796 int
iwx_ampdu_tx_start(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3797 iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3798     uint8_t tid)
3799 {
3800 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3801 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3802 
3803 	/*
3804 	 * Require a firmware version which uses an internal AUX queue.
3805 	 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3806 	 */
3807 	if (sc->first_data_qid != IWX_DQA_CMD_QUEUE + 1)
3808 		return ENOTSUP;
3809 
3810 	/* Ensure we can map this TID to an aggregation queue. */
3811 	if (tid >= IWX_MAX_TID_COUNT)
3812 		return EINVAL;
3813 
3814 	/* We only support a fixed Tx aggregation window size, for now. */
3815 	if (ba->ba_winsize != IWX_FRAME_LIMIT)
3816 		return ENOTSUP;
3817 
3818 	/* Is firmware already using an agg queue with this TID? */
3819 	if (sc->aggqid[tid] != 0)
3820 		return ENOSPC;
3821 
3822 	/* Are we already processing an ADDBA request? */
3823 	if (sc->ba_tx.start_tidmask & (1 << tid))
3824 		return EBUSY;
3825 
3826 	sc->ba_tx.start_tidmask |= (1 << tid);
3827 	iwx_add_task(sc, systq, &sc->ba_task);
3828 
3829 	return EBUSY;
3830 }
3831 
3832 void
iwx_set_mac_addr_from_csr(struct iwx_softc * sc,struct iwx_nvm_data * data)3833 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3834 {
3835 	uint32_t mac_addr0, mac_addr1;
3836 
3837 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3838 
3839 	if (!iwx_nic_lock(sc))
3840 		return;
3841 
3842 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3843 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3844 
3845 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3846 
3847 	/* If OEM fused a valid address, use it instead of the one in OTP. */
3848 	if (iwx_is_valid_mac_addr(data->hw_addr)) {
3849 		iwx_nic_unlock(sc);
3850 		return;
3851 	}
3852 
3853 	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3854 	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3855 
3856 	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3857 
3858 	iwx_nic_unlock(sc);
3859 }
3860 
3861 int
iwx_is_valid_mac_addr(const uint8_t * addr)3862 iwx_is_valid_mac_addr(const uint8_t *addr)
3863 {
3864 	static const uint8_t reserved_mac[] = {
3865 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3866 	};
3867 
3868 	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3869 	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3870 	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3871 	    !ETHER_IS_MULTICAST(addr));
3872 }
3873 
3874 void
iwx_flip_hw_address(uint32_t mac_addr0,uint32_t mac_addr1,uint8_t * dest)3875 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3876 {
3877 	const uint8_t *hw_addr;
3878 
3879 	hw_addr = (const uint8_t *)&mac_addr0;
3880 	dest[0] = hw_addr[3];
3881 	dest[1] = hw_addr[2];
3882 	dest[2] = hw_addr[1];
3883 	dest[3] = hw_addr[0];
3884 
3885 	hw_addr = (const uint8_t *)&mac_addr1;
3886 	dest[4] = hw_addr[1];
3887 	dest[5] = hw_addr[0];
3888 }
3889 
3890 int
iwx_nvm_get(struct iwx_softc * sc)3891 iwx_nvm_get(struct iwx_softc *sc)
3892 {
3893 	struct iwx_nvm_get_info cmd = {};
3894 	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3895 	struct iwx_host_cmd hcmd = {
3896 		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3897 		.data = { &cmd, },
3898 		.len = { sizeof(cmd) },
3899 		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3900 		    IWX_NVM_GET_INFO)
3901 	};
3902 	int err;
3903 	uint32_t mac_flags;
3904 	/*
3905 	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3906 	 * in v3, except for the channel profile part of the
3907 	 * regulatory.  So we can just access the new struct, with the
3908 	 * exception of the latter.
3909 	 */
3910 	struct iwx_nvm_get_info_rsp *rsp;
3911 	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3912 	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3913 	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3914 
3915 	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3916 	err = iwx_send_cmd(sc, &hcmd);
3917 	if (err)
3918 		return err;
3919 
3920 	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3921 		err = EIO;
3922 		goto out;
3923 	}
3924 
3925 	memset(nvm, 0, sizeof(*nvm));
3926 
3927 	iwx_set_mac_addr_from_csr(sc, nvm);
3928 	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3929 		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3930 		err = EINVAL;
3931 		goto out;
3932 	}
3933 
3934 	rsp = (void *)hcmd.resp_pkt->data;
3935 
3936 	/* Initialize general data */
3937 	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3938 	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3939 
3940 	/* Initialize MAC sku data */
3941 	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3942 	nvm->sku_cap_11ac_enable =
3943 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3944 	nvm->sku_cap_11n_enable =
3945 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3946 	nvm->sku_cap_11ax_enable =
3947 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3948 	nvm->sku_cap_band_24GHz_enable =
3949 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3950 	nvm->sku_cap_band_52GHz_enable =
3951 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3952 	nvm->sku_cap_mimo_disable =
3953 		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3954 
3955 	/* Initialize PHY sku data */
3956 	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3957 	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3958 
3959 	if (le32toh(rsp->regulatory.lar_enabled) &&
3960 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3961 		nvm->lar_enabled = 1;
3962 	}
3963 
3964 	if (v4) {
3965 		iwx_init_channel_map(sc, NULL,
3966 		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
3967 	} else {
3968 		rsp_v3 = (void *)rsp;
3969 		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3970 		    NULL, IWX_NUM_CHANNELS_V1);
3971 	}
3972 out:
3973 	iwx_free_resp(sc, &hcmd);
3974 	return err;
3975 }
3976 
3977 int
iwx_load_firmware(struct iwx_softc * sc)3978 iwx_load_firmware(struct iwx_softc *sc)
3979 {
3980 	struct iwx_fw_sects *fws;
3981 	int err;
3982 
3983 	splassert(IPL_NET);
3984 
3985 	sc->sc_uc.uc_intr = 0;
3986 	sc->sc_uc.uc_ok = 0;
3987 
3988 	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3989 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3990 		err = iwx_ctxt_info_gen3_init(sc, fws);
3991 	else
3992 		err = iwx_ctxt_info_init(sc, fws);
3993 	if (err) {
3994 		printf("%s: could not init context info\n", DEVNAME(sc));
3995 		return err;
3996 	}
3997 
3998 	/* wait for the firmware to load */
3999 	err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
4000 	if (err || !sc->sc_uc.uc_ok) {
4001 		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
4002 		iwx_ctxt_info_free_paging(sc);
4003 	}
4004 
4005 	iwx_dma_contig_free(&sc->iml_dma);
4006 	iwx_ctxt_info_free_fw_img(sc);
4007 
4008 	if (!sc->sc_uc.uc_ok)
4009 		return EINVAL;
4010 
4011 	return err;
4012 }
4013 
4014 int
iwx_start_fw(struct iwx_softc * sc)4015 iwx_start_fw(struct iwx_softc *sc)
4016 {
4017 	int err;
4018 
4019 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
4020 
4021 	iwx_disable_interrupts(sc);
4022 
4023 	/* make sure rfkill handshake bits are cleared */
4024 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
4025 	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
4026 	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4027 
4028 	/* clear (again), then enable firmware load interrupt */
4029 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
4030 
4031 	err = iwx_nic_init(sc);
4032 	if (err) {
4033 		printf("%s: unable to init nic\n", DEVNAME(sc));
4034 		return err;
4035 	}
4036 
4037 	iwx_enable_fwload_interrupt(sc);
4038 
4039 	return iwx_load_firmware(sc);
4040 }
4041 
4042 int
iwx_pnvm_setup_fragmented(struct iwx_softc * sc,uint8_t ** pnvm_data,size_t * pnvm_size,int pnvm_segs)4043 iwx_pnvm_setup_fragmented(struct iwx_softc *sc, uint8_t **pnvm_data,
4044     size_t *pnvm_size, int pnvm_segs)
4045 {
4046 	struct iwx_pnvm_info_dram *pnvm_info;
4047 	int i, err;
4048 
4049 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma,
4050 	    sizeof(struct iwx_pnvm_info_dram), 0);
4051 	if (err)
4052 		return err;
4053 	pnvm_info = (struct iwx_pnvm_info_dram *)sc->pnvm_dma.vaddr;
4054 
4055 	for (i = 0; i < pnvm_segs; i++) {
4056 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_seg_dma[i],
4057 		    pnvm_size[i], 0);
4058 		if (err)
4059 			goto fail;
4060 		memcpy(sc->pnvm_seg_dma[i].vaddr, pnvm_data[i], pnvm_size[i]);
4061 		pnvm_info->pnvm_img[i] = htole64(sc->pnvm_seg_dma[i].paddr);
4062 		sc->pnvm_size += pnvm_size[i];
4063 		sc->pnvm_segs++;
4064 	}
4065 
4066 	return 0;
4067 
4068 fail:
4069 	for (i = 0; i < pnvm_segs; i++)
4070 		iwx_dma_contig_free(&sc->pnvm_seg_dma[i]);
4071 	sc->pnvm_size = 0;
4072 	sc->pnvm_segs = 0;
4073 	iwx_dma_contig_free(&sc->pnvm_dma);
4074 
4075 	return err;
4076 }
4077 
4078 int
iwx_pnvm_setup(struct iwx_softc * sc,uint8_t ** pnvm_data,size_t * pnvm_size,int pnvm_segs)4079 iwx_pnvm_setup(struct iwx_softc *sc, uint8_t **pnvm_data,
4080     size_t *pnvm_size, int pnvm_segs)
4081 {
4082 	uint8_t *data;
4083 	size_t size = 0;
4084 	int i, err;
4085 
4086 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
4087 		return iwx_pnvm_setup_fragmented(sc, pnvm_data, pnvm_size, pnvm_segs);
4088 
4089 	for (i = 0; i < pnvm_segs; i++)
4090 		size += pnvm_size[i];
4091 
4092 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 0);
4093 	if (err)
4094 		return err;
4095 
4096 	data = sc->pnvm_dma.vaddr;
4097 	for (i = 0; i < pnvm_segs; i++) {
4098 		memcpy(data, pnvm_data[i], pnvm_size[i]);
4099 		data += pnvm_size[i];
4100 	}
4101 	sc->pnvm_size = size;
4102 
4103 	return 0;
4104 }
4105 
4106 int
iwx_pnvm_handle_section(struct iwx_softc * sc,const uint8_t * data,size_t len)4107 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
4108     size_t len)
4109 {
4110 	const struct iwx_ucode_tlv *tlv;
4111 	uint32_t sha1 = 0;
4112 	uint16_t mac_type = 0, rf_id = 0;
4113 	uint8_t *pnvm_data[IWX_MAX_DRAM_ENTRY];
4114 	size_t pnvm_size[IWX_MAX_DRAM_ENTRY];
4115 	int pnvm_segs = 0;
4116 	int hw_match = 0;
4117 	uint32_t size = 0;
4118 	int err;
4119 	int i;
4120 
4121 	while (len >= sizeof(*tlv)) {
4122 		uint32_t tlv_len, tlv_type;
4123 
4124 		len -= sizeof(*tlv);
4125 		tlv = (const void *)data;
4126 
4127 		tlv_len = le32toh(tlv->length);
4128 		tlv_type = le32toh(tlv->type);
4129 
4130 		if (len < tlv_len) {
4131 			printf("%s: invalid TLV len: %zd/%u\n",
4132 			    DEVNAME(sc), len, tlv_len);
4133 			err = EINVAL;
4134 			goto out;
4135 		}
4136 
4137 		data += sizeof(*tlv);
4138 
4139 		switch (tlv_type) {
4140 		case IWX_UCODE_TLV_PNVM_VERSION:
4141 			if (tlv_len < sizeof(uint32_t))
4142 				break;
4143 
4144 			sha1 = le32_to_cpup((const uint32_t *)data);
4145 			break;
4146 		case IWX_UCODE_TLV_HW_TYPE:
4147 			if (tlv_len < 2 * sizeof(uint16_t))
4148 				break;
4149 
4150 			if (hw_match)
4151 				break;
4152 
4153 			mac_type = le16_to_cpup((const uint16_t *)data);
4154 			rf_id = le16_to_cpup((const uint16_t *)(data +
4155 			    sizeof(uint16_t)));
4156 
4157 			if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
4158 			    rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
4159 				hw_match = 1;
4160 			break;
4161 		case IWX_UCODE_TLV_SEC_RT: {
4162 			const struct iwx_pnvm_section *section;
4163 			uint32_t data_len;
4164 
4165 			section = (const void *)data;
4166 			data_len = tlv_len - sizeof(*section);
4167 
4168 			/* TODO: remove, this is a deprecated separator */
4169 			if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
4170 				break;
4171 
4172 			if (pnvm_segs >= nitems(pnvm_data)) {
4173 				err = ERANGE;
4174 				goto out;
4175 			}
4176 
4177 			pnvm_data[pnvm_segs] = malloc(data_len, M_DEVBUF,
4178 			    M_WAITOK | M_CANFAIL | M_ZERO);
4179 			if (pnvm_data[pnvm_segs] == NULL) {
4180 				err = ENOMEM;
4181 				goto out;
4182 			}
4183 			memcpy(pnvm_data[pnvm_segs], section->data, data_len);
4184 			pnvm_size[pnvm_segs++] = data_len;
4185 			size += data_len;
4186 			break;
4187 		}
4188 		case IWX_UCODE_TLV_PNVM_SKU:
4189 			/* New PNVM section started, stop parsing. */
4190 			goto done;
4191 		default:
4192 			break;
4193 		}
4194 
4195 		if (roundup(tlv_len, 4) > len)
4196 			break;
4197 		len -= roundup(tlv_len, 4);
4198 		data += roundup(tlv_len, 4);
4199 	}
4200 done:
4201 	if (!hw_match || size == 0) {
4202 		err = ENOENT;
4203 		goto out;
4204 	}
4205 
4206 	err = iwx_pnvm_setup(sc, pnvm_data, pnvm_size, pnvm_segs);
4207 	if (err) {
4208 		printf("%s: could not allocate DMA memory for PNVM\n",
4209 		    DEVNAME(sc));
4210 		err = ENOMEM;
4211 		goto out;
4212 	}
4213 
4214 	iwx_ctxt_info_gen3_set_pnvm(sc);
4215 	sc->sc_pnvm_ver = sha1;
4216 out:
4217 	for (i = 0; i < pnvm_segs; i++)
4218 		free(pnvm_data[i], M_DEVBUF, pnvm_size[i]);
4219 	return err;
4220 }
4221 
4222 int
iwx_pnvm_parse(struct iwx_softc * sc,const uint8_t * data,size_t len)4223 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
4224 {
4225 	const struct iwx_ucode_tlv *tlv;
4226 
4227 	while (len >= sizeof(*tlv)) {
4228 		uint32_t tlv_len, tlv_type;
4229 
4230 		len -= sizeof(*tlv);
4231 		tlv = (const void *)data;
4232 
4233 		tlv_len = le32toh(tlv->length);
4234 		tlv_type = le32toh(tlv->type);
4235 
4236 		if (len < tlv_len || roundup(tlv_len, 4) > len)
4237 			return EINVAL;
4238 
4239 		if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
4240 			const struct iwx_sku_id *sku_id =
4241 				(const void *)(data + sizeof(*tlv));
4242 
4243 			data += sizeof(*tlv) + roundup(tlv_len, 4);
4244 			len -= roundup(tlv_len, 4);
4245 
4246 			if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
4247 			    sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
4248 			    sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
4249 			    iwx_pnvm_handle_section(sc, data, len) == 0)
4250 				return 0;
4251 		} else {
4252 			data += sizeof(*tlv) + roundup(tlv_len, 4);
4253 			len -= roundup(tlv_len, 4);
4254 		}
4255 	}
4256 
4257 	return ENOENT;
4258 }
4259 
4260 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
4261 void
iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc * sc)4262 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
4263 {
4264 	struct iwx_prph_scratch *prph_scratch;
4265 	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
4266 	int i;
4267 
4268 	prph_scratch = sc->prph_scratch_dma.vaddr;
4269 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
4270 
4271 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
4272 	prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_size);
4273 
4274 	bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, 0,
4275 	    sc->pnvm_dma.size, BUS_DMASYNC_PREWRITE);
4276 	for (i = 0; i < sc->pnvm_segs; i++)
4277 		bus_dmamap_sync(sc->sc_dmat, sc->pnvm_seg_dma[i].map, 0,
4278 		    sc->pnvm_seg_dma[i].size, BUS_DMASYNC_PREWRITE);
4279 }
4280 
4281 /*
4282  * Load platform-NVM (non-volatile-memory) data from the filesystem.
4283  * This data apparently contains regulatory information and affects device
4284  * channel configuration.
4285  * The SKU of AX210 devices tells us which PNVM file section is needed.
4286  * Pre-AX210 devices store NVM data onboard.
4287  */
4288 int
iwx_load_pnvm(struct iwx_softc * sc)4289 iwx_load_pnvm(struct iwx_softc *sc)
4290 {
4291 	const int wait_flags = IWX_PNVM_COMPLETE;
4292 	int s, err = 0;
4293 	u_char *pnvm_data = NULL;
4294 	size_t pnvm_size = 0;
4295 
4296 	if (sc->sc_sku_id[0] == 0 &&
4297 	    sc->sc_sku_id[1] == 0 &&
4298 	    sc->sc_sku_id[2] == 0)
4299 		return 0;
4300 
4301 	if (sc->sc_pnvm_name) {
4302 		if (sc->pnvm_dma.vaddr == NULL) {
4303 			err = loadfirmware(sc->sc_pnvm_name,
4304 			    &pnvm_data, &pnvm_size);
4305 			if (err) {
4306 				printf("%s: could not read %s (error %d)\n",
4307 				    DEVNAME(sc), sc->sc_pnvm_name, err);
4308 				return err;
4309 			}
4310 
4311 			err = iwx_pnvm_parse(sc, pnvm_data, pnvm_size);
4312 			if (err && err != ENOENT) {
4313 				free(pnvm_data, M_DEVBUF, pnvm_size);
4314 				return err;
4315 			}
4316 		} else
4317 			iwx_ctxt_info_gen3_set_pnvm(sc);
4318 	}
4319 
4320 	s = splnet();
4321 
4322 	if (!iwx_nic_lock(sc)) {
4323 		splx(s);
4324 		free(pnvm_data, M_DEVBUF, pnvm_size);
4325 		return EBUSY;
4326 	}
4327 
4328 	/*
4329 	 * If we don't have a platform NVM file simply ask firmware
4330 	 * to proceed without it.
4331 	 */
4332 
4333 	iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
4334 	    IWX_UREG_DOORBELL_TO_ISR6_PNVM);
4335 
4336 	/* Wait for the pnvm complete notification from firmware. */
4337 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4338 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4339 		    SEC_TO_NSEC(2));
4340 		if (err)
4341 			break;
4342 	}
4343 
4344 	splx(s);
4345 	iwx_nic_unlock(sc);
4346 	free(pnvm_data, M_DEVBUF, pnvm_size);
4347 	return err;
4348 }
4349 
4350 int
iwx_send_tx_ant_cfg(struct iwx_softc * sc,uint8_t valid_tx_ant)4351 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
4352 {
4353 	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
4354 		.valid = htole32(valid_tx_ant),
4355 	};
4356 
4357 	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
4358 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4359 }
4360 
4361 int
iwx_send_phy_cfg_cmd(struct iwx_softc * sc)4362 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
4363 {
4364 	struct iwx_phy_cfg_cmd phy_cfg_cmd;
4365 
4366 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4367 	phy_cfg_cmd.calib_control.event_trigger =
4368 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
4369 	phy_cfg_cmd.calib_control.flow_trigger =
4370 	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
4371 
4372 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
4373 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4374 }
4375 
4376 int
iwx_send_dqa_cmd(struct iwx_softc * sc)4377 iwx_send_dqa_cmd(struct iwx_softc *sc)
4378 {
4379 	struct iwx_dqa_enable_cmd dqa_cmd = {
4380 		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4381 	};
4382 	uint32_t cmd_id;
4383 
4384 	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4385 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4386 }
4387 
4388 int
iwx_load_ucode_wait_alive(struct iwx_softc * sc)4389 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4390 {
4391 	int err;
4392 
4393 	err = iwx_read_firmware(sc);
4394 	if (err)
4395 		return err;
4396 
4397 	err = iwx_start_fw(sc);
4398 	if (err)
4399 		return err;
4400 
4401 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4402 		err = iwx_load_pnvm(sc);
4403 		if (err)
4404 			return err;
4405 	}
4406 
4407 	iwx_post_alive(sc);
4408 
4409 	return 0;
4410 }
4411 
4412 int
iwx_run_init_mvm_ucode(struct iwx_softc * sc,int readnvm)4413 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4414 {
4415 	const int wait_flags = IWX_INIT_COMPLETE;
4416 	struct iwx_nvm_access_complete_cmd nvm_complete = {};
4417 	struct iwx_init_extended_cfg_cmd init_cfg = {
4418 		.init_flags = htole32(IWX_INIT_NVM),
4419 	};
4420 	int err, s;
4421 
4422 	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4423 		printf("%s: radio is disabled by hardware switch\n",
4424 		    DEVNAME(sc));
4425 		return EPERM;
4426 	}
4427 
4428 	s = splnet();
4429 	sc->sc_init_complete = 0;
4430 	err = iwx_load_ucode_wait_alive(sc);
4431 	if (err) {
4432 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4433 		splx(s);
4434 		return err;
4435 	}
4436 
4437 	/*
4438 	 * Send init config command to mark that we are sending NVM
4439 	 * access commands
4440 	 */
4441 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4442 	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4443 	if (err) {
4444 		splx(s);
4445 		return err;
4446 	}
4447 
4448 	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4449 	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4450 	if (err) {
4451 		splx(s);
4452 		return err;
4453 	}
4454 
4455 	/* Wait for the init complete notification from the firmware. */
4456 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4457 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4458 		    SEC_TO_NSEC(2));
4459 		if (err) {
4460 			splx(s);
4461 			return err;
4462 		}
4463 	}
4464 	splx(s);
4465 	if (readnvm) {
4466 		err = iwx_nvm_get(sc);
4467 		if (err) {
4468 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4469 			return err;
4470 		}
4471 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4472 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4473 			    sc->sc_nvm.hw_addr);
4474 
4475 	}
4476 
4477 	/*
4478 	 * Only enable the MLD API on MA devices for now as the API 77
4479 	 * firmware on some of the older firmware devices also claims
4480 	 * support, but doesn't actually work.
4481 	 */
4482 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_MLD_API_SUPPORT) &&
4483 	    IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) == IWX_CFG_MAC_TYPE_MA)
4484 		sc->sc_use_mld_api = 1;
4485 
4486 	return 0;
4487 }
4488 
4489 int
iwx_config_ltr(struct iwx_softc * sc)4490 iwx_config_ltr(struct iwx_softc *sc)
4491 {
4492 	struct iwx_ltr_config_cmd cmd = {
4493 		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4494 	};
4495 
4496 	if (!sc->sc_ltr_enabled)
4497 		return 0;
4498 
4499 	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4500 }
4501 
4502 void
iwx_update_rx_desc(struct iwx_softc * sc,struct iwx_rx_ring * ring,int idx)4503 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
4504 {
4505 	struct iwx_rx_data *data = &ring->data[idx];
4506 
4507 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4508 		struct iwx_rx_transfer_desc *desc = ring->desc;
4509 		desc[idx].rbid = htole16(idx & 0xffff);
4510 		desc[idx].addr = htole64(data->map->dm_segs[0].ds_addr);
4511 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4512 		    idx * sizeof(*desc), sizeof(*desc),
4513 		    BUS_DMASYNC_PREWRITE);
4514 	} else {
4515 		((uint64_t *)ring->desc)[idx] =
4516 		    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
4517 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4518 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4519 		    BUS_DMASYNC_PREWRITE);
4520 	}
4521 }
4522 
4523 int
iwx_rx_addbuf(struct iwx_softc * sc,int size,int idx)4524 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4525 {
4526 	struct iwx_rx_ring *ring = &sc->rxq;
4527 	struct iwx_rx_data *data = &ring->data[idx];
4528 	struct mbuf *m;
4529 	int err;
4530 	int fatal = 0;
4531 
4532 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4533 	if (m == NULL)
4534 		return ENOBUFS;
4535 
4536 	if (size <= MCLBYTES) {
4537 		MCLGET(m, M_DONTWAIT);
4538 	} else {
4539 		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
4540 	}
4541 	if ((m->m_flags & M_EXT) == 0) {
4542 		m_freem(m);
4543 		return ENOBUFS;
4544 	}
4545 
4546 	if (data->m != NULL) {
4547 		bus_dmamap_unload(sc->sc_dmat, data->map);
4548 		fatal = 1;
4549 	}
4550 
4551 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4552 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4553 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4554 	if (err) {
4555 		/* XXX */
4556 		if (fatal)
4557 			panic("%s: could not load RX mbuf", DEVNAME(sc));
4558 		m_freem(m);
4559 		return err;
4560 	}
4561 	data->m = m;
4562 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4563 
4564 	/* Update RX descriptor. */
4565 	iwx_update_rx_desc(sc, ring, idx);
4566 
4567 	return 0;
4568 }
4569 
4570 int
iwx_rxmq_get_signal_strength(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4571 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4572     struct iwx_rx_mpdu_desc *desc)
4573 {
4574 	int energy_a, energy_b;
4575 
4576 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4577 		energy_a = desc->v3.energy_a;
4578 		energy_b = desc->v3.energy_b;
4579 	} else {
4580 		energy_a = desc->v1.energy_a;
4581 		energy_b = desc->v1.energy_b;
4582 	}
4583 	energy_a = energy_a ? -energy_a : -256;
4584 	energy_b = energy_b ? -energy_b : -256;
4585 	return MAX(energy_a, energy_b);
4586 }
4587 
4588 void
iwx_rx_rx_phy_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4589 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4590     struct iwx_rx_data *data)
4591 {
4592 	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4593 
4594 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4595 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4596 
4597 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4598 }
4599 
4600 /*
4601  * Retrieve the average noise (in dBm) among receivers.
4602  */
4603 int
iwx_get_noise(const struct iwx_statistics_rx_non_phy * stats)4604 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4605 {
4606 	int i, total, nbant, noise;
4607 
4608 	total = nbant = noise = 0;
4609 	for (i = 0; i < 3; i++) {
4610 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4611 		if (noise) {
4612 			total += noise;
4613 			nbant++;
4614 		}
4615 	}
4616 
4617 	/* There should be at least one antenna but check anyway. */
4618 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4619 }
4620 
4621 int
iwx_ccmp_decap(struct iwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni,struct ieee80211_rxinfo * rxi)4622 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4623     struct ieee80211_rxinfo *rxi)
4624 {
4625 	struct ieee80211com *ic = &sc->sc_ic;
4626 	struct ieee80211_key *k;
4627 	struct ieee80211_frame *wh;
4628 	uint64_t pn, *prsc;
4629 	uint8_t *ivp;
4630 	uint8_t tid;
4631 	int hdrlen, hasqos;
4632 
4633 	wh = mtod(m, struct ieee80211_frame *);
4634 	hdrlen = ieee80211_get_hdrlen(wh);
4635 	ivp = (uint8_t *)wh + hdrlen;
4636 
4637 	/* find key for decryption */
4638 	k = ieee80211_get_rxkey(ic, m, ni);
4639 	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4640 		return 1;
4641 
4642 	/* Check that ExtIV bit is be set. */
4643 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4644 		return 1;
4645 
4646 	hasqos = ieee80211_has_qos(wh);
4647 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4648 	prsc = &k->k_rsc[tid];
4649 
4650 	/* Extract the 48-bit PN from the CCMP header. */
4651 	pn = (uint64_t)ivp[0]       |
4652 	     (uint64_t)ivp[1] <<  8 |
4653 	     (uint64_t)ivp[4] << 16 |
4654 	     (uint64_t)ivp[5] << 24 |
4655 	     (uint64_t)ivp[6] << 32 |
4656 	     (uint64_t)ivp[7] << 40;
4657 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4658 		if (pn < *prsc) {
4659 			ic->ic_stats.is_ccmp_replays++;
4660 			return 1;
4661 		}
4662 	} else if (pn <= *prsc) {
4663 		ic->ic_stats.is_ccmp_replays++;
4664 		return 1;
4665 	}
4666 	/* Last seen packet number is updated in ieee80211_inputm(). */
4667 
4668 	/*
4669 	 * Some firmware versions strip the MIC, and some don't. It is not
4670 	 * clear which of the capability flags could tell us what to expect.
4671 	 * For now, keep things simple and just leave the MIC in place if
4672 	 * it is present.
4673 	 *
4674 	 * The IV will be stripped by ieee80211_inputm().
4675 	 */
4676 	return 0;
4677 }
4678 
4679 int
iwx_rx_hwdecrypt(struct iwx_softc * sc,struct mbuf * m,uint32_t rx_pkt_status,struct ieee80211_rxinfo * rxi)4680 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4681     struct ieee80211_rxinfo *rxi)
4682 {
4683 	struct ieee80211com *ic = &sc->sc_ic;
4684 	struct ifnet *ifp = IC2IFP(ic);
4685 	struct ieee80211_frame *wh;
4686 	struct ieee80211_node *ni;
4687 	int ret = 0;
4688 	uint8_t type, subtype;
4689 
4690 	wh = mtod(m, struct ieee80211_frame *);
4691 
4692 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4693 	if (type == IEEE80211_FC0_TYPE_CTL)
4694 		return 0;
4695 
4696 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4697 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4698 		return 0;
4699 
4700 	ni = ieee80211_find_rxnode(ic, wh);
4701 	/* Handle hardware decryption. */
4702 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
4703 	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
4704 	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4705 	    ((!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4706 	    ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
4707 	    (IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4708 	    ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
4709 		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4710 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4711 			ic->ic_stats.is_ccmp_dec_errs++;
4712 			ret = 1;
4713 			goto out;
4714 		}
4715 		/* Check whether decryption was successful or not. */
4716 		if ((rx_pkt_status &
4717 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4718 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4719 		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4720 		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4721 			ic->ic_stats.is_ccmp_dec_errs++;
4722 			ret = 1;
4723 			goto out;
4724 		}
4725 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4726 	}
4727 out:
4728 	if (ret)
4729 		ifp->if_ierrors++;
4730 	ieee80211_release_node(ic, ni);
4731 	return ret;
4732 }
4733 
4734 void
iwx_rx_frame(struct iwx_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,struct ieee80211_rxinfo * rxi,struct mbuf_list * ml)4735 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4736     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4737     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4738     struct mbuf_list *ml)
4739 {
4740 	struct ieee80211com *ic = &sc->sc_ic;
4741 	struct ifnet *ifp = IC2IFP(ic);
4742 	struct ieee80211_frame *wh;
4743 	struct ieee80211_node *ni;
4744 
4745 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4746 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4747 
4748 	wh = mtod(m, struct ieee80211_frame *);
4749 	ni = ieee80211_find_rxnode(ic, wh);
4750 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4751 	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4752 		ifp->if_ierrors++;
4753 		m_freem(m);
4754 		ieee80211_release_node(ic, ni);
4755 		return;
4756 	}
4757 
4758 #if NBPFILTER > 0
4759 	if (sc->sc_drvbpf != NULL) {
4760 		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4761 		uint16_t chan_flags;
4762 		int have_legacy_rate = 1;
4763 		uint8_t mcs, rate;
4764 
4765 		tap->wr_flags = 0;
4766 		if (is_shortpre)
4767 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4768 		tap->wr_chan_freq =
4769 		    htole16(ic->ic_channels[chanidx].ic_freq);
4770 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4771 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4772 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4773 			chan_flags &= ~IEEE80211_CHAN_HT;
4774 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4775 		}
4776 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4777 			chan_flags &= ~IEEE80211_CHAN_VHT;
4778 		tap->wr_chan_flags = htole16(chan_flags);
4779 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4780 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4781 		tap->wr_tsft = device_timestamp;
4782 		if (sc->sc_rate_n_flags_version >= 2) {
4783 			uint32_t mod_type = (rate_n_flags &
4784 			    IWX_RATE_MCS_MOD_TYPE_MSK);
4785 			const struct ieee80211_rateset *rs = NULL;
4786 			uint32_t ridx;
4787 			have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4788 			    mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4789 			mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4790 			ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4791 			if (mod_type == IWX_RATE_MCS_CCK_MSK)
4792 				rs = &ieee80211_std_rateset_11b;
4793 			else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4794 				rs = &ieee80211_std_rateset_11a;
4795 			if (rs && ridx < rs->rs_nrates) {
4796 				rate = (rs->rs_rates[ridx] &
4797 				    IEEE80211_RATE_VAL);
4798 			} else
4799 				rate = 0;
4800 		} else {
4801 			have_legacy_rate = ((rate_n_flags &
4802 			    (IWX_RATE_MCS_HT_MSK_V1 |
4803 			    IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4804 			mcs = (rate_n_flags &
4805 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4806 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
4807 			rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4808 		}
4809 		if (!have_legacy_rate) {
4810 			tap->wr_rate = (0x80 | mcs);
4811 		} else {
4812 			switch (rate) {
4813 			/* CCK rates. */
4814 			case  10: tap->wr_rate =   2; break;
4815 			case  20: tap->wr_rate =   4; break;
4816 			case  55: tap->wr_rate =  11; break;
4817 			case 110: tap->wr_rate =  22; break;
4818 			/* OFDM rates. */
4819 			case 0xd: tap->wr_rate =  12; break;
4820 			case 0xf: tap->wr_rate =  18; break;
4821 			case 0x5: tap->wr_rate =  24; break;
4822 			case 0x7: tap->wr_rate =  36; break;
4823 			case 0x9: tap->wr_rate =  48; break;
4824 			case 0xb: tap->wr_rate =  72; break;
4825 			case 0x1: tap->wr_rate =  96; break;
4826 			case 0x3: tap->wr_rate = 108; break;
4827 			/* Unknown rate: should not happen. */
4828 			default:  tap->wr_rate =   0;
4829 			}
4830 		}
4831 
4832 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4833 		    m, BPF_DIRECTION_IN);
4834 	}
4835 #endif
4836 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4837 	ieee80211_release_node(ic, ni);
4838 }
4839 
4840 /*
4841  * Drop duplicate 802.11 retransmissions
4842  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4843  * and handle pseudo-duplicate frames which result from deaggregation
4844  * of A-MSDU frames in hardware.
4845  */
4846 int
iwx_detect_duplicate(struct iwx_softc * sc,struct mbuf * m,struct iwx_rx_mpdu_desc * desc,struct ieee80211_rxinfo * rxi)4847 iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4848     struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4849 {
4850 	struct ieee80211com *ic = &sc->sc_ic;
4851 	struct iwx_node *in = (void *)ic->ic_bss;
4852 	struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4853 	uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
4854 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4855 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4856 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4857 	int hasqos = ieee80211_has_qos(wh);
4858 	uint16_t seq;
4859 
4860 	if (type == IEEE80211_FC0_TYPE_CTL ||
4861 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4862 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4863 		return 0;
4864 
4865 	if (hasqos) {
4866 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4867 		if (tid > IWX_MAX_TID_COUNT)
4868 			tid = IWX_MAX_TID_COUNT;
4869 	}
4870 
4871 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4872 	subframe_idx = desc->amsdu_info &
4873 		IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4874 
4875 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4876 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4877 	    dup_data->last_seq[tid] == seq &&
4878 	    dup_data->last_sub_frame[tid] >= subframe_idx)
4879 		return 1;
4880 
4881 	/*
4882 	 * Allow the same frame sequence number for all A-MSDU subframes
4883 	 * following the first subframe.
4884 	 * Otherwise these subframes would be discarded as replays.
4885 	 */
4886 	if (dup_data->last_seq[tid] == seq &&
4887 	    subframe_idx > dup_data->last_sub_frame[tid] &&
4888 	    (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
4889 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4890 	}
4891 
4892 	dup_data->last_seq[tid] = seq;
4893 	dup_data->last_sub_frame[tid] = subframe_idx;
4894 
4895 	return 0;
4896 }
4897 
4898 /*
4899  * Returns true if sn2 - buffer_size < sn1 < sn2.
4900  * To be used only in order to compare reorder buffer head with NSSN.
4901  * We fully trust NSSN unless it is behind us due to reorder timeout.
4902  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4903  */
4904 int
iwx_is_sn_less(uint16_t sn1,uint16_t sn2,uint16_t buffer_size)4905 iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4906 {
4907 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4908 }
4909 
4910 void
iwx_release_frames(struct iwx_softc * sc,struct ieee80211_node * ni,struct iwx_rxba_data * rxba,struct iwx_reorder_buffer * reorder_buf,uint16_t nssn,struct mbuf_list * ml)4911 iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4912     struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4913     uint16_t nssn, struct mbuf_list *ml)
4914 {
4915 	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4916 	uint16_t ssn = reorder_buf->head_sn;
4917 
4918 	/* ignore nssn smaller than head sn - this can happen due to timeout */
4919 	if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4920 		goto set_timer;
4921 
4922 	while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4923 		int index = ssn % reorder_buf->buf_size;
4924 		struct mbuf *m;
4925 		int chanidx, is_shortpre;
4926 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4927 		struct ieee80211_rxinfo *rxi;
4928 
4929 		/* This data is the same for all A-MSDU subframes. */
4930 		chanidx = entries[index].chanidx;
4931 		rx_pkt_status = entries[index].rx_pkt_status;
4932 		is_shortpre = entries[index].is_shortpre;
4933 		rate_n_flags = entries[index].rate_n_flags;
4934 		device_timestamp = entries[index].device_timestamp;
4935 		rxi = &entries[index].rxi;
4936 
4937 		/*
4938 		 * Empty the list. Will have more than one frame for A-MSDU.
4939 		 * Empty list is valid as well since nssn indicates frames were
4940 		 * received.
4941 		 */
4942 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4943 			iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4944 			    rate_n_flags, device_timestamp, rxi, ml);
4945 			reorder_buf->num_stored--;
4946 
4947 			/*
4948 			 * Allow the same frame sequence number and CCMP PN for
4949 			 * all A-MSDU subframes following the first subframe.
4950 			 * Otherwise they would be discarded as replays.
4951 			 */
4952 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4953 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4954 		}
4955 
4956 		ssn = (ssn + 1) & 0xfff;
4957 	}
4958 	reorder_buf->head_sn = nssn;
4959 
4960 set_timer:
4961 	if (reorder_buf->num_stored && !reorder_buf->removed) {
4962 		timeout_add_usec(&reorder_buf->reorder_timer,
4963 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4964 	} else
4965 		timeout_del(&reorder_buf->reorder_timer);
4966 }
4967 
4968 int
iwx_oldsn_workaround(struct iwx_softc * sc,struct ieee80211_node * ni,int tid,struct iwx_reorder_buffer * buffer,uint32_t reorder_data,uint32_t gp2)4969 iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4970     struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4971 {
4972 	struct ieee80211com *ic = &sc->sc_ic;
4973 
4974 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4975 		/* we have a new (A-)MPDU ... */
4976 
4977 		/*
4978 		 * reset counter to 0 if we didn't have any oldsn in
4979 		 * the last A-MPDU (as detected by GP2 being identical)
4980 		 */
4981 		if (!buffer->consec_oldsn_prev_drop)
4982 			buffer->consec_oldsn_drops = 0;
4983 
4984 		/* either way, update our tracking state */
4985 		buffer->consec_oldsn_ampdu_gp2 = gp2;
4986 	} else if (buffer->consec_oldsn_prev_drop) {
4987 		/*
4988 		 * tracking state didn't change, and we had an old SN
4989 		 * indication before - do nothing in this case, we
4990 		 * already noted this one down and are waiting for the
4991 		 * next A-MPDU (by GP2)
4992 		 */
4993 		return 0;
4994 	}
4995 
4996 	/* return unless this MPDU has old SN */
4997 	if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
4998 		return 0;
4999 
5000 	/* update state */
5001 	buffer->consec_oldsn_prev_drop = 1;
5002 	buffer->consec_oldsn_drops++;
5003 
5004 	/* if limit is reached, send del BA and reset state */
5005 	if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
5006 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
5007 		    0, tid);
5008 		buffer->consec_oldsn_prev_drop = 0;
5009 		buffer->consec_oldsn_drops = 0;
5010 		return 1;
5011 	}
5012 
5013 	return 0;
5014 }
5015 
5016 /*
5017  * Handle re-ordering of frames which were de-aggregated in hardware.
5018  * Returns 1 if the MPDU was consumed (buffered or dropped).
5019  * Returns 0 if the MPDU should be passed to upper layer.
5020  */
5021 int
iwx_rx_reorder(struct iwx_softc * sc,struct mbuf * m,int chanidx,struct iwx_rx_mpdu_desc * desc,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,struct ieee80211_rxinfo * rxi,struct mbuf_list * ml)5022 iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
5023     struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
5024     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
5025     struct mbuf_list *ml)
5026 {
5027 	struct ieee80211com *ic = &sc->sc_ic;
5028 	struct ieee80211_frame *wh;
5029 	struct ieee80211_node *ni;
5030 	struct iwx_rxba_data *rxba;
5031 	struct iwx_reorder_buffer *buffer;
5032 	uint32_t reorder_data = le32toh(desc->reorder_data);
5033 	int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
5034 	int last_subframe =
5035 		(desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
5036 	uint8_t tid;
5037 	uint8_t subframe_idx = (desc->amsdu_info &
5038 	    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5039 	struct iwx_reorder_buf_entry *entries;
5040 	int index;
5041 	uint16_t nssn, sn;
5042 	uint8_t baid, type, subtype;
5043 	int hasqos;
5044 
5045 	wh = mtod(m, struct ieee80211_frame *);
5046 	hasqos = ieee80211_has_qos(wh);
5047 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
5048 
5049 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5050 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5051 
5052 	/*
5053 	 * We are only interested in Block Ack requests and unicast QoS data.
5054 	 */
5055 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
5056 		return 0;
5057 	if (hasqos) {
5058 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
5059 			return 0;
5060 	} else {
5061 		if (type != IEEE80211_FC0_TYPE_CTL ||
5062 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
5063 			return 0;
5064 	}
5065 
5066 	baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
5067 		IWX_RX_MPDU_REORDER_BAID_SHIFT;
5068 	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
5069 	    baid >= nitems(sc->sc_rxba_data))
5070 		return 0;
5071 
5072 	rxba = &sc->sc_rxba_data[baid];
5073 	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
5074 	    tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
5075 		return 0;
5076 
5077 	if (rxba->timeout != 0)
5078 		getmicrouptime(&rxba->last_rx);
5079 
5080 	/* Bypass A-MPDU re-ordering in net80211. */
5081 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
5082 
5083 	nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
5084 	sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
5085 		IWX_RX_MPDU_REORDER_SN_SHIFT;
5086 
5087 	buffer = &rxba->reorder_buf;
5088 	entries = &rxba->entries[0];
5089 
5090 	if (!buffer->valid) {
5091 		if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
5092 			return 0;
5093 		buffer->valid = 1;
5094 	}
5095 
5096 	ni = ieee80211_find_rxnode(ic, wh);
5097 	if (type == IEEE80211_FC0_TYPE_CTL &&
5098 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5099 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5100 		goto drop;
5101 	}
5102 
5103 	/*
5104 	 * If there was a significant jump in the nssn - adjust.
5105 	 * If the SN is smaller than the NSSN it might need to first go into
5106 	 * the reorder buffer, in which case we just release up to it and the
5107 	 * rest of the function will take care of storing it and releasing up to
5108 	 * the nssn.
5109 	 */
5110 	if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5111 	    buffer->buf_size) ||
5112 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5113 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5114 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5115 		iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5116 	}
5117 
5118 	if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5119 	    device_timestamp)) {
5120 		 /* BA session will be torn down. */
5121 		ic->ic_stats.is_ht_rx_ba_window_jump++;
5122 		goto drop;
5123 
5124 	}
5125 
5126 	/* drop any outdated packets */
5127 	if (SEQ_LT(sn, buffer->head_sn)) {
5128 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5129 		goto drop;
5130 	}
5131 
5132 	/* release immediately if allowed by nssn and no stored frames */
5133 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5134 		if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5135 		   (!is_amsdu || last_subframe))
5136 			buffer->head_sn = nssn;
5137 		ieee80211_release_node(ic, ni);
5138 		return 0;
5139 	}
5140 
5141 	/*
5142 	 * release immediately if there are no stored frames, and the sn is
5143 	 * equal to the head.
5144 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5145 	 * When we released everything, and we got the next frame in the
5146 	 * sequence, according to the NSSN we can't release immediately,
5147 	 * while technically there is no hole and we can move forward.
5148 	 */
5149 	if (!buffer->num_stored && sn == buffer->head_sn) {
5150 		if (!is_amsdu || last_subframe)
5151 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5152 		ieee80211_release_node(ic, ni);
5153 		return 0;
5154 	}
5155 
5156 	index = sn % buffer->buf_size;
5157 
5158 	/*
5159 	 * Check if we already stored this frame
5160 	 * As AMSDU is either received or not as whole, logic is simple:
5161 	 * If we have frames in that position in the buffer and the last frame
5162 	 * originated from AMSDU had a different SN then it is a retransmission.
5163 	 * If it is the same SN then if the subframe index is incrementing it
5164 	 * is the same AMSDU - otherwise it is a retransmission.
5165 	 */
5166 	if (!ml_empty(&entries[index].frames)) {
5167 		if (!is_amsdu) {
5168 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5169 			goto drop;
5170 		} else if (sn != buffer->last_amsdu ||
5171 		    buffer->last_sub_index >= subframe_idx) {
5172 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5173 			goto drop;
5174 		}
5175 	} else {
5176 		/* This data is the same for all A-MSDU subframes. */
5177 		entries[index].chanidx = chanidx;
5178 		entries[index].is_shortpre = is_shortpre;
5179 		entries[index].rate_n_flags = rate_n_flags;
5180 		entries[index].device_timestamp = device_timestamp;
5181 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5182 	}
5183 
5184 	/* put in reorder buffer */
5185 	ml_enqueue(&entries[index].frames, m);
5186 	buffer->num_stored++;
5187 	getmicrouptime(&entries[index].reorder_time);
5188 
5189 	if (is_amsdu) {
5190 		buffer->last_amsdu = sn;
5191 		buffer->last_sub_index = subframe_idx;
5192 	}
5193 
5194 	/*
5195 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5196 	 * The reason is that NSSN advances on the first sub-frame, and may
5197 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5198 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5199 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5200 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5201 	 * already ahead and it will be dropped.
5202 	 * If the last sub-frame is not on this queue - we will get frame
5203 	 * release notification with up to date NSSN.
5204 	 */
5205 	if (!is_amsdu || last_subframe)
5206 		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5207 
5208 	ieee80211_release_node(ic, ni);
5209 	return 1;
5210 
5211 drop:
5212 	m_freem(m);
5213 	ieee80211_release_node(ic, ni);
5214 	return 1;
5215 }
5216 
5217 void
iwx_rx_mpdu_mq(struct iwx_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen,struct mbuf_list * ml)5218 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
5219     size_t maxlen, struct mbuf_list *ml)
5220 {
5221 	struct ieee80211com *ic = &sc->sc_ic;
5222 	struct ieee80211_rxinfo rxi;
5223 	struct iwx_rx_mpdu_desc *desc;
5224 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5225 	int rssi;
5226 	uint8_t chanidx;
5227 	uint16_t phy_info;
5228 	size_t desc_size;
5229 
5230 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
5231 		desc_size = sizeof(*desc);
5232 	else
5233 		desc_size = IWX_RX_DESC_SIZE_V1;
5234 
5235 	if (maxlen < desc_size) {
5236 		m_freem(m);
5237 		return; /* drop */
5238 	}
5239 
5240 	desc = (struct iwx_rx_mpdu_desc *)pktdata;
5241 
5242 	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
5243 	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5244 		m_freem(m);
5245 		return; /* drop */
5246 	}
5247 
5248 	len = le16toh(desc->mpdu_len);
5249 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5250 		/* Allow control frames in monitor mode. */
5251 		if (len < sizeof(struct ieee80211_frame_cts)) {
5252 			ic->ic_stats.is_rx_tooshort++;
5253 			IC2IFP(ic)->if_ierrors++;
5254 			m_freem(m);
5255 			return;
5256 		}
5257 	} else if (len < sizeof(struct ieee80211_frame)) {
5258 		ic->ic_stats.is_rx_tooshort++;
5259 		IC2IFP(ic)->if_ierrors++;
5260 		m_freem(m);
5261 		return;
5262 	}
5263 	if (len > maxlen - desc_size) {
5264 		IC2IFP(ic)->if_ierrors++;
5265 		m_freem(m);
5266 		return;
5267 	}
5268 
5269 	m->m_data = pktdata + desc_size;
5270 	m->m_pkthdr.len = m->m_len = len;
5271 
5272 	/* Account for padding following the frame header. */
5273 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
5274 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5275 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5276 		if (type == IEEE80211_FC0_TYPE_CTL) {
5277 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5278 			case IEEE80211_FC0_SUBTYPE_CTS:
5279 				hdrlen = sizeof(struct ieee80211_frame_cts);
5280 				break;
5281 			case IEEE80211_FC0_SUBTYPE_ACK:
5282 				hdrlen = sizeof(struct ieee80211_frame_ack);
5283 				break;
5284 			default:
5285 				hdrlen = sizeof(struct ieee80211_frame_min);
5286 				break;
5287 			}
5288 		} else
5289 			hdrlen = ieee80211_get_hdrlen(wh);
5290 
5291 		if ((le16toh(desc->status) &
5292 		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5293 		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5294 			/* Padding is inserted after the IV. */
5295 			hdrlen += IEEE80211_CCMP_HDRLEN;
5296 		}
5297 
5298 		memmove(m->m_data + 2, m->m_data, hdrlen);
5299 		m_adj(m, 2);
5300 	}
5301 
5302 	memset(&rxi, 0, sizeof(rxi));
5303 
5304 	/*
5305 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5306 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5307 	 * bit set in the frame header. We need to clear this bit ourselves.
5308 	 * (XXX This workaround is not required on AX200/AX201 devices that
5309 	 * have been tested by me, but it's unclear when this problem was
5310 	 * fixed in the hardware. It definitely affects the 9k generation.
5311 	 * Leaving this in place for now since some 9k/AX200 hybrids seem
5312 	 * to exist that we may eventually add support for.)
5313 	 *
5314 	 * And we must allow the same CCMP PN for subframes following the
5315 	 * first subframe. Otherwise they would be discarded as replays.
5316 	 */
5317 	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
5318 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5319 		uint8_t subframe_idx = (desc->amsdu_info &
5320 		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5321 		if (subframe_idx > 0)
5322 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5323 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5324 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5325 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5326 			    struct ieee80211_qosframe_addr4 *);
5327 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5328 		} else if (ieee80211_has_qos(wh) &&
5329 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5330 			struct ieee80211_qosframe *qwh = mtod(m,
5331 			    struct ieee80211_qosframe *);
5332 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5333 		}
5334 	}
5335 
5336 	/*
5337 	 * Verify decryption before duplicate detection. The latter uses
5338 	 * the TID supplied in QoS frame headers and this TID is implicitly
5339 	 * verified as part of the CCMP nonce.
5340 	 */
5341 	if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5342 		m_freem(m);
5343 		return;
5344 	}
5345 
5346 	if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
5347 		m_freem(m);
5348 		return;
5349 	}
5350 
5351 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5352 		rate_n_flags = le32toh(desc->v3.rate_n_flags);
5353 		chanidx = desc->v3.channel;
5354 		device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
5355 	} else {
5356 		rate_n_flags = le32toh(desc->v1.rate_n_flags);
5357 		chanidx = desc->v1.channel;
5358 		device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
5359 	}
5360 
5361 	phy_info = le16toh(desc->phy_info);
5362 
5363 	rssi = iwx_rxmq_get_signal_strength(sc, desc);
5364 	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
5365 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5366 
5367 	rxi.rxi_rssi = rssi;
5368 	rxi.rxi_tstamp = device_timestamp;
5369 	rxi.rxi_chan = chanidx;
5370 
5371 	if (iwx_rx_reorder(sc, m, chanidx, desc,
5372 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5373 	    rate_n_flags, device_timestamp, &rxi, ml))
5374 		return;
5375 
5376 	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
5377 	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5378 	    rate_n_flags, device_timestamp, &rxi, ml);
5379 }
5380 
5381 void
iwx_clear_tx_desc(struct iwx_softc * sc,struct iwx_tx_ring * ring,int idx)5382 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
5383 {
5384 	struct iwx_tfh_tfd *desc = &ring->desc[idx];
5385 	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
5386 	int i;
5387 
5388 	/* First TB is never cleared - it is bidirectional DMA data. */
5389 	for (i = 1; i < num_tbs; i++) {
5390 		struct iwx_tfh_tb *tb = &desc->tbs[i];
5391 		memset(tb, 0, sizeof(*tb));
5392 	}
5393 	desc->num_tbs = htole16(1);
5394 
5395 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5396 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5397 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
5398 }
5399 
5400 void
iwx_txd_done(struct iwx_softc * sc,struct iwx_tx_data * txd)5401 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
5402 {
5403 	struct ieee80211com *ic = &sc->sc_ic;
5404 
5405 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5406 	    BUS_DMASYNC_POSTWRITE);
5407 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5408 	m_freem(txd->m);
5409 	txd->m = NULL;
5410 
5411 	KASSERT(txd->in);
5412 	ieee80211_release_node(ic, &txd->in->in_ni);
5413 	txd->in = NULL;
5414 }
5415 
5416 void
iwx_txq_advance(struct iwx_softc * sc,struct iwx_tx_ring * ring,uint16_t idx)5417 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
5418 {
5419  	struct iwx_tx_data *txd;
5420 
5421 	while (ring->tail_hw != idx) {
5422 		txd = &ring->data[ring->tail];
5423 		if (txd->m != NULL) {
5424 			iwx_clear_tx_desc(sc, ring, ring->tail);
5425 			iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
5426 			iwx_txd_done(sc, txd);
5427 			ring->queued--;
5428 		}
5429 		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
5430 		ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
5431 	}
5432 }
5433 
5434 void
iwx_rx_tx_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)5435 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5436     struct iwx_rx_data *data)
5437 {
5438 	struct ieee80211com *ic = &sc->sc_ic;
5439 	struct ifnet *ifp = IC2IFP(ic);
5440 	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
5441 	int qid = cmd_hdr->qid, status, txfail;
5442 	struct iwx_tx_ring *ring = &sc->txq[qid];
5443 	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
5444 	uint32_t ssn;
5445 	uint32_t len = iwx_rx_packet_len(pkt);
5446 
5447 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
5448 	    BUS_DMASYNC_POSTREAD);
5449 
5450 	/* Sanity checks. */
5451 	if (sizeof(*tx_resp) > len)
5452 		return;
5453 	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5454 		return;
5455 	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
5456 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5457 		return;
5458 
5459 	sc->sc_tx_timer[qid] = 0;
5460 
5461 	if (tx_resp->frame_count > 1) /* A-MPDU */
5462 		return;
5463 
5464 	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
5465 	txfail = (status != IWX_TX_STATUS_SUCCESS &&
5466 	    status != IWX_TX_STATUS_DIRECT_DONE);
5467 
5468 	if (txfail)
5469 		ifp->if_oerrors++;
5470 
5471 	/*
5472 	 * On hardware supported by iwx(4) the SSN counter corresponds
5473 	 * to a Tx ring index rather than a sequence number.
5474 	 * Frames up to this index (non-inclusive) can now be freed.
5475 	 */
5476 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5477 	ssn = le32toh(ssn);
5478 	if (ssn < sc->max_tfd_queue_size) {
5479 		iwx_txq_advance(sc, ring, ssn);
5480 		iwx_clear_oactive(sc, ring);
5481 	}
5482 }
5483 
5484 void
iwx_clear_oactive(struct iwx_softc * sc,struct iwx_tx_ring * ring)5485 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
5486 {
5487 	struct ieee80211com *ic = &sc->sc_ic;
5488 	struct ifnet *ifp = IC2IFP(ic);
5489 
5490 	if (ring->queued < IWX_TX_RING_LOMARK) {
5491 		sc->qfullmsk &= ~(1 << ring->qid);
5492 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5493 			ifq_clr_oactive(&ifp->if_snd);
5494 			/*
5495 			 * Well, we're in interrupt context, but then again
5496 			 * I guess net80211 does all sorts of stunts in
5497 			 * interrupt context, so maybe this is no biggie.
5498 			 */
5499 			(*ifp->if_start)(ifp);
5500 		}
5501 	}
5502 }
5503 
5504 void
iwx_rx_compressed_ba(struct iwx_softc * sc,struct iwx_rx_packet * pkt)5505 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
5506 {
5507 	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
5508 	struct ieee80211com *ic = &sc->sc_ic;
5509 	struct ieee80211_node *ni;
5510 	struct ieee80211_tx_ba *ba;
5511 	struct iwx_node *in;
5512 	struct iwx_tx_ring *ring;
5513 	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
5514 	int qid;
5515 
5516 	if (ic->ic_state != IEEE80211_S_RUN)
5517 		return;
5518 
5519 	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
5520 		return;
5521 
5522 	if (ba_res->sta_id != IWX_STATION_ID)
5523 		return;
5524 
5525 	ni = ic->ic_bss;
5526 	in = (void *)ni;
5527 
5528 	tfd_cnt = le16toh(ba_res->tfd_cnt);
5529 	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
5530 	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
5531 	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
5532 	    sizeof(ba_res->tfd[0]) * tfd_cnt))
5533 		return;
5534 
5535 	for (i = 0; i < tfd_cnt; i++) {
5536 		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
5537 		uint8_t tid;
5538 
5539 		tid = ba_tfd->tid;
5540 		if (tid >= nitems(sc->aggqid))
5541 			continue;
5542 
5543 		qid = sc->aggqid[tid];
5544 		if (qid != htole16(ba_tfd->q_num))
5545 			continue;
5546 
5547 		ring = &sc->txq[qid];
5548 
5549 		ba = &ni->ni_tx_ba[tid];
5550 		if (ba->ba_state != IEEE80211_BA_AGREED)
5551 			continue;
5552 
5553 		idx = le16toh(ba_tfd->tfd_index);
5554 		sc->sc_tx_timer[qid] = 0;
5555 		iwx_txq_advance(sc, ring, idx);
5556 		iwx_clear_oactive(sc, ring);
5557 	}
5558 }
5559 
5560 void
iwx_rx_bmiss(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)5561 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5562     struct iwx_rx_data *data)
5563 {
5564 	struct ieee80211com *ic = &sc->sc_ic;
5565 	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
5566 	uint32_t missed;
5567 
5568 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
5569 	    (ic->ic_state != IEEE80211_S_RUN))
5570 		return;
5571 
5572 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
5573 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
5574 
5575 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
5576 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5577 		if (ic->ic_if.if_flags & IFF_DEBUG)
5578 			printf("%s: receiving no beacons from %s; checking if "
5579 			    "this AP is still responding to probe requests\n",
5580 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
5581 		/*
5582 		 * Rather than go directly to scan state, try to send a
5583 		 * directed probe request first. If that fails then the
5584 		 * state machine will drop us into scanning after timing
5585 		 * out waiting for a probe response.
5586 		 */
5587 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
5588 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
5589 	}
5590 
5591 }
5592 
5593 int
iwx_binding_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action)5594 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
5595 {
5596 	struct iwx_binding_cmd cmd;
5597 	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
5598 	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
5599 	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
5600 	uint32_t status;
5601 
5602 	/* No need to bind with MLD firmware. */
5603 	if (sc->sc_use_mld_api)
5604 		return 0;
5605 
5606 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
5607 		panic("binding already added");
5608 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
5609 		panic("binding already removed");
5610 
5611 	if (phyctxt == NULL) /* XXX race with iwx_stop() */
5612 		return EINVAL;
5613 
5614 	memset(&cmd, 0, sizeof(cmd));
5615 
5616 	cmd.id_and_color
5617 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5618 	cmd.action = htole32(action);
5619 	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5620 
5621 	cmd.macs[0] = htole32(mac_id);
5622 	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
5623 		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
5624 
5625 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
5626 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5627 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5628 	else
5629 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5630 
5631 	status = 0;
5632 	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
5633 	    &cmd, &status);
5634 	if (err == 0 && status != 0)
5635 		err = EIO;
5636 
5637 	return err;
5638 }
5639 
5640 uint8_t
iwx_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)5641 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
5642 {
5643 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
5644 	int primary_idx = ic->ic_bss->ni_primary_chan;
5645 	/*
5646 	 * The FW is expected to check the control channel position only
5647 	 * when in HT/VHT and the channel width is not 20MHz. Return
5648 	 * this value as the default one:
5649 	 */
5650 	uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5651 
5652 	switch (primary_idx - center_idx) {
5653 	case -6:
5654 		pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
5655 		break;
5656 	case -2:
5657 		pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5658 		break;
5659 	case 2:
5660 		pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5661 		break;
5662 	case 6:
5663 		pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
5664 		break;
5665 	default:
5666 		break;
5667 	}
5668 
5669 	return pos;
5670 }
5671 
5672 int
iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint8_t sco,uint8_t vht_chan_width,int cmdver)5673 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5674     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5675     uint8_t vht_chan_width, int cmdver)
5676 {
5677 	struct ieee80211com *ic = &sc->sc_ic;
5678 	struct iwx_phy_context_cmd_uhb cmd;
5679 	uint8_t active_cnt, idle_cnt;
5680 	struct ieee80211_channel *chan = ctxt->channel;
5681 
5682 	memset(&cmd, 0, sizeof(cmd));
5683 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5684 	    ctxt->color));
5685 	cmd.action = htole32(action);
5686 
5687 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5688 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5689 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5690 	else
5691 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5692 
5693 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5694 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5695 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5696 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5697 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5698 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5699 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5700 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5701 			/* secondary chan above -> control chan below */
5702 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5703 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5704 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5705 			/* secondary chan below -> control chan above */
5706 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5707 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5708 		} else {
5709 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5710 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5711 		}
5712 	} else {
5713 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5714 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5715 	}
5716 
5717 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5718 	    IWX_RLC_CONFIG_CMD) != 2) {
5719 		idle_cnt = chains_static;
5720 		active_cnt = chains_dynamic;
5721 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5722 		    IWX_PHY_RX_CHAIN_VALID_POS);
5723 		cmd.rxchain_info |= htole32(idle_cnt <<
5724 		    IWX_PHY_RX_CHAIN_CNT_POS);
5725 		cmd.rxchain_info |= htole32(active_cnt <<
5726 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5727 	}
5728 
5729 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5730 }
5731 
5732 int
iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint8_t sco,uint8_t vht_chan_width,int cmdver)5733 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5734     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5735     uint8_t vht_chan_width, int cmdver)
5736 {
5737 	struct ieee80211com *ic = &sc->sc_ic;
5738 	struct iwx_phy_context_cmd cmd;
5739 	uint8_t active_cnt, idle_cnt;
5740 	struct ieee80211_channel *chan = ctxt->channel;
5741 
5742 	memset(&cmd, 0, sizeof(cmd));
5743 	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5744 	    ctxt->color));
5745 	cmd.action = htole32(action);
5746 
5747 	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5748 	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5749 		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5750 	else
5751 		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5752 
5753 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5754 	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5755 	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5756 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5757 		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5758 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5759 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5760 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5761 			/* secondary chan above -> control chan below */
5762 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5763 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5764 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5765 			/* secondary chan below -> control chan above */
5766 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5767 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5768 		} else {
5769 			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5770 			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5771 		}
5772 	} else {
5773 		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5774 		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5775 	}
5776 
5777 	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5778 	    IWX_RLC_CONFIG_CMD) != 2) {
5779 		idle_cnt = chains_static;
5780 		active_cnt = chains_dynamic;
5781 		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5782 		    IWX_PHY_RX_CHAIN_VALID_POS);
5783 		cmd.rxchain_info |= htole32(idle_cnt <<
5784 		    IWX_PHY_RX_CHAIN_CNT_POS);
5785 		cmd.rxchain_info |= htole32(active_cnt <<
5786 		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5787 	}
5788 
5789 	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5790 }
5791 
5792 int
iwx_phy_ctxt_cmd(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)5793 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5794     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5795     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5796 {
5797 	int cmdver;
5798 
5799 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5800 	if (cmdver != 3 && cmdver != 4) {
5801 		printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5802 		    DEVNAME(sc));
5803 		return ENOTSUP;
5804 	}
5805 
5806 	/*
5807 	 * Intel increased the size of the fw_channel_info struct and neglected
5808 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5809 	 * member in the middle.
5810 	 * To keep things simple we use a separate function to handle the larger
5811 	 * variant of the phy context command.
5812 	 */
5813 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5814 		return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5815 		    chains_dynamic, action, sco, vht_chan_width, cmdver);
5816 	}
5817 
5818 	return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5819 	    action, sco, vht_chan_width, cmdver);
5820 }
5821 
5822 int
iwx_send_cmd(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5823 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5824 {
5825 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5826 	struct iwx_tfh_tfd *desc;
5827 	struct iwx_tx_data *txdata;
5828 	struct iwx_device_cmd *cmd;
5829 	struct mbuf *m;
5830 	bus_addr_t paddr;
5831 	uint64_t addr;
5832 	int err = 0, i, paylen, off, s;
5833 	int idx, code, async, group_id;
5834 	size_t hdrlen, datasz;
5835 	uint8_t *data;
5836 	int generation = sc->sc_generation;
5837 
5838 	code = hcmd->id;
5839 	async = hcmd->flags & IWX_CMD_ASYNC;
5840 	idx = ring->cur;
5841 
5842 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5843 		paylen += hcmd->len[i];
5844 	}
5845 
5846 	/* If this command waits for a response, allocate response buffer. */
5847 	hcmd->resp_pkt = NULL;
5848 	if (hcmd->flags & IWX_CMD_WANT_RESP) {
5849 		uint8_t *resp_buf;
5850 		KASSERT(!async);
5851 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
5852 		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
5853 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5854 			return ENOSPC;
5855 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5856 		    M_NOWAIT | M_ZERO);
5857 		if (resp_buf == NULL)
5858 			return ENOMEM;
5859 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5860 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5861 	} else {
5862 		sc->sc_cmd_resp_pkt[idx] = NULL;
5863 	}
5864 
5865 	s = splnet();
5866 
5867 	desc = &ring->desc[idx];
5868 	txdata = &ring->data[idx];
5869 
5870 	/*
5871 	 * XXX Intel inside (tm)
5872 	 * Firmware API versions >= 50 reject old-style commands in
5873 	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5874 	 * that such commands were in the LONG_GROUP instead in order
5875 	 * for firmware to accept them.
5876 	 */
5877 	if (iwx_cmd_groupid(code) == 0) {
5878 		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5879 		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5880 	} else
5881 		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5882 
5883 	group_id = iwx_cmd_groupid(code);
5884 
5885 	hdrlen = sizeof(cmd->hdr_wide);
5886 	datasz = sizeof(cmd->data_wide);
5887 
5888 	if (paylen > datasz) {
5889 		/* Command is too large to fit in pre-allocated space. */
5890 		size_t totlen = hdrlen + paylen;
5891 		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5892 			printf("%s: firmware command too long (%zd bytes)\n",
5893 			    DEVNAME(sc), totlen);
5894 			err = EINVAL;
5895 			goto out;
5896 		}
5897 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
5898 		if (m == NULL) {
5899 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5900 			    DEVNAME(sc), totlen);
5901 			err = ENOMEM;
5902 			goto out;
5903 		}
5904 		cmd = mtod(m, struct iwx_device_cmd *);
5905 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
5906 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5907 		if (err) {
5908 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5909 			    DEVNAME(sc), totlen);
5910 			m_freem(m);
5911 			goto out;
5912 		}
5913 		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5914 		paddr = txdata->map->dm_segs[0].ds_addr;
5915 	} else {
5916 		cmd = &ring->cmd[idx];
5917 		paddr = txdata->cmd_paddr;
5918 	}
5919 
5920 	memset(cmd, 0, sizeof(*cmd));
5921 	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5922 	cmd->hdr_wide.group_id = group_id;
5923 	cmd->hdr_wide.qid = ring->qid;
5924 	cmd->hdr_wide.idx = idx;
5925 	cmd->hdr_wide.length = htole16(paylen);
5926 	cmd->hdr_wide.version = iwx_cmd_version(code);
5927 	data = cmd->data_wide;
5928 
5929 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5930 		if (hcmd->len[i] == 0)
5931 			continue;
5932 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5933 		off += hcmd->len[i];
5934 	}
5935 	KASSERT(off == paylen);
5936 
5937 	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5938 	addr = htole64(paddr);
5939 	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5940 	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5941 		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5942 		    IWX_FIRST_TB_SIZE);
5943 		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5944 		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5945 		desc->num_tbs = htole16(2);
5946 	} else
5947 		desc->num_tbs = htole16(1);
5948 
5949 	if (paylen > datasz) {
5950 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5951 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5952 	} else {
5953 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5954 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5955 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5956 	}
5957 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5958 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5959 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5960 	/* Kick command ring. */
5961 	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
5962 	ring->queued++;
5963 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5964 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5965 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5966 
5967 	if (!async) {
5968 		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
5969 		if (err == 0) {
5970 			/* if hardware is no longer up, return error */
5971 			if (generation != sc->sc_generation) {
5972 				err = ENXIO;
5973 				goto out;
5974 			}
5975 
5976 			/* Response buffer will be freed in iwx_free_resp(). */
5977 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5978 			sc->sc_cmd_resp_pkt[idx] = NULL;
5979 		} else if (generation == sc->sc_generation) {
5980 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5981 			    sc->sc_cmd_resp_len[idx]);
5982 			sc->sc_cmd_resp_pkt[idx] = NULL;
5983 		}
5984 	}
5985  out:
5986 	splx(s);
5987 
5988 	return err;
5989 }
5990 
5991 int
iwx_send_cmd_pdu(struct iwx_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)5992 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5993     uint16_t len, const void *data)
5994 {
5995 	struct iwx_host_cmd cmd = {
5996 		.id = id,
5997 		.len = { len, },
5998 		.data = { data, },
5999 		.flags = flags,
6000 	};
6001 
6002 	return iwx_send_cmd(sc, &cmd);
6003 }
6004 
6005 int
iwx_send_cmd_status(struct iwx_softc * sc,struct iwx_host_cmd * cmd,uint32_t * status)6006 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
6007     uint32_t *status)
6008 {
6009 	struct iwx_rx_packet *pkt;
6010 	struct iwx_cmd_response *resp;
6011 	int err, resp_len;
6012 
6013 	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
6014 	cmd->flags |= IWX_CMD_WANT_RESP;
6015 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
6016 
6017 	err = iwx_send_cmd(sc, cmd);
6018 	if (err)
6019 		return err;
6020 
6021 	pkt = cmd->resp_pkt;
6022 	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
6023 		return EIO;
6024 
6025 	resp_len = iwx_rx_packet_payload_len(pkt);
6026 	if (resp_len != sizeof(*resp)) {
6027 		iwx_free_resp(sc, cmd);
6028 		return EIO;
6029 	}
6030 
6031 	resp = (void *)pkt->data;
6032 	*status = le32toh(resp->status);
6033 	iwx_free_resp(sc, cmd);
6034 	return err;
6035 }
6036 
6037 int
iwx_send_cmd_pdu_status(struct iwx_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)6038 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
6039     const void *data, uint32_t *status)
6040 {
6041 	struct iwx_host_cmd cmd = {
6042 		.id = id,
6043 		.len = { len, },
6044 		.data = { data, },
6045 	};
6046 
6047 	return iwx_send_cmd_status(sc, &cmd, status);
6048 }
6049 
6050 void
iwx_free_resp(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)6051 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
6052 {
6053 	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
6054 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
6055 	hcmd->resp_pkt = NULL;
6056 }
6057 
6058 void
iwx_cmd_done(struct iwx_softc * sc,int qid,int idx,int code)6059 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
6060 {
6061 	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
6062 	struct iwx_tx_data *data;
6063 
6064 	if (qid != IWX_DQA_CMD_QUEUE) {
6065 		return;	/* Not a command ack. */
6066 	}
6067 
6068 	data = &ring->data[idx];
6069 
6070 	if (data->m != NULL) {
6071 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6072 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6073 		bus_dmamap_unload(sc->sc_dmat, data->map);
6074 		m_freem(data->m);
6075 		data->m = NULL;
6076 	}
6077 	wakeup(&ring->desc[idx]);
6078 
6079 	DPRINTF(("%s: command 0x%x done\n", __func__, code));
6080 	if (ring->queued == 0) {
6081 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
6082 			DEVNAME(sc), code));
6083 	} else if (ring->queued > 0)
6084 		ring->queued--;
6085 }
6086 
6087 uint32_t
iwx_fw_rateidx_ofdm(uint8_t rval)6088 iwx_fw_rateidx_ofdm(uint8_t rval)
6089 {
6090 	/* Firmware expects indices which match our 11a rate set. */
6091 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
6092 	int i;
6093 
6094 	for (i = 0; i < rs->rs_nrates; i++) {
6095 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6096 			return i;
6097 	}
6098 
6099 	return 0;
6100 }
6101 
6102 uint32_t
iwx_fw_rateidx_cck(uint8_t rval)6103 iwx_fw_rateidx_cck(uint8_t rval)
6104 {
6105 	/* Firmware expects indices which match our 11b rate set. */
6106 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
6107 	int i;
6108 
6109 	for (i = 0; i < rs->rs_nrates; i++) {
6110 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6111 			return i;
6112 	}
6113 
6114 	return 0;
6115 }
6116 
6117 /*
6118  * Determine the Tx command flags and Tx rate+flags to use.
6119  * Return the selected Tx rate.
6120  */
6121 const struct iwx_rate *
iwx_tx_fill_cmd(struct iwx_softc * sc,struct iwx_node * in,struct ieee80211_frame * wh,uint16_t * flags,uint32_t * rate_n_flags)6122 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
6123     struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags)
6124 {
6125 	struct ieee80211com *ic = &sc->sc_ic;
6126 	struct ieee80211_node *ni = &in->in_ni;
6127 	struct ieee80211_rateset *rs = &ni->ni_rates;
6128 	const struct iwx_rate *rinfo;
6129 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6130 	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
6131 	int ridx, rate_flags;
6132 	uint8_t rval;
6133 
6134 	*flags = 0;
6135 
6136 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6137 	    type != IEEE80211_FC0_TYPE_DATA) {
6138 		/* for non-data, use the lowest supported rate */
6139 		ridx = min_ridx;
6140 		*flags |= IWX_TX_FLAGS_CMD_RATE;
6141 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
6142 		ridx = iwx_mcs2ridx[ni->ni_txmcs];
6143 	} else {
6144 		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
6145 		ridx = iwx_rval2ridx(rval);
6146 		if (ridx < min_ridx)
6147 			ridx = min_ridx;
6148 	}
6149 
6150 	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
6151 	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
6152 		*flags |= IWX_TX_FLAGS_HIGH_PRI;
6153 
6154 	rinfo = &iwx_rates[ridx];
6155 
6156 	/*
6157 	 * Do not fill rate_n_flags if firmware controls the Tx rate.
6158 	 * For data frames we rely on Tx rate scaling in firmware by default.
6159 	 */
6160 	if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
6161 		*rate_n_flags = 0;
6162 		return rinfo;
6163 	}
6164 
6165 	/*
6166 	 * Forcing a CCK/OFDM legacy rate is important for management frames.
6167 	 * Association will only succeed if we do this correctly.
6168 	 */
6169 	rate_flags = IWX_RATE_MCS_ANT_A_MSK;
6170 	if (IWX_RIDX_IS_CCK(ridx)) {
6171 		if (sc->sc_rate_n_flags_version >= 2)
6172 			rate_flags |= IWX_RATE_MCS_CCK_MSK;
6173 		else
6174 			rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
6175 	} else if (sc->sc_rate_n_flags_version >= 2)
6176 		rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
6177 
6178 	if (sc->sc_rate_n_flags_version >= 2) {
6179 		if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
6180 			rate_flags |= (iwx_fw_rateidx_ofdm(rinfo->rate) &
6181 			    IWX_RATE_LEGACY_RATE_MSK);
6182 		} else {
6183 			rate_flags |= (iwx_fw_rateidx_cck(rinfo->rate) &
6184 			    IWX_RATE_LEGACY_RATE_MSK);
6185 		}
6186 	} else
6187 		rate_flags |= rinfo->plcp;
6188 
6189 	*rate_n_flags = rate_flags;
6190 
6191 	return rinfo;
6192 }
6193 
6194 void
iwx_tx_update_byte_tbl(struct iwx_softc * sc,struct iwx_tx_ring * txq,int idx,uint16_t byte_cnt,uint16_t num_tbs)6195 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
6196     int idx, uint16_t byte_cnt, uint16_t num_tbs)
6197 {
6198 	uint8_t filled_tfd_size, num_fetch_chunks;
6199 	uint16_t len = byte_cnt;
6200 	uint16_t bc_ent;
6201 
6202 	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
6203 			  num_tbs * sizeof(struct iwx_tfh_tb);
6204 	/*
6205 	 * filled_tfd_size contains the number of filled bytes in the TFD.
6206 	 * Dividing it by 64 will give the number of chunks to fetch
6207 	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
6208 	 * If, for example, TFD contains only 3 TBs then 32 bytes
6209 	 * of the TFD are used, and only one chunk of 64 bytes should
6210 	 * be fetched
6211 	 */
6212 	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
6213 
6214 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
6215 		struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
6216 		/* Starting from AX210, the HW expects bytes */
6217 		bc_ent = htole16(len | (num_fetch_chunks << 14));
6218 		scd_bc_tbl[idx].tfd_offset = bc_ent;
6219 	} else {
6220 		struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
6221 		/* Before AX210, the HW expects DW */
6222 		len = howmany(len, 4);
6223 		bc_ent = htole16(len | (num_fetch_chunks << 12));
6224 		scd_bc_tbl->tfd_offset[idx] = bc_ent;
6225 	}
6226 
6227 	bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, 0,
6228 	    txq->bc_tbl.map->dm_mapsize, BUS_DMASYNC_PREWRITE);
6229 }
6230 
6231 int
iwx_tx(struct iwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)6232 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
6233 {
6234 	struct ieee80211com *ic = &sc->sc_ic;
6235 	struct iwx_node *in = (void *)ni;
6236 	struct iwx_tx_ring *ring;
6237 	struct iwx_tx_data *data;
6238 	struct iwx_tfh_tfd *desc;
6239 	struct iwx_device_cmd *cmd;
6240 	struct ieee80211_frame *wh;
6241 	struct ieee80211_key *k = NULL;
6242 	const struct iwx_rate *rinfo;
6243 	uint64_t paddr;
6244 	u_int hdrlen;
6245 	bus_dma_segment_t *seg;
6246 	uint32_t rate_n_flags;
6247 	uint16_t num_tbs, flags, offload_assist = 0;
6248 	uint8_t type, subtype;
6249 	int i, totlen, err, pad, qid;
6250 	size_t txcmd_size;
6251 
6252 	wh = mtod(m, struct ieee80211_frame *);
6253 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6254 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6255 	if (type == IEEE80211_FC0_TYPE_CTL)
6256 		hdrlen = sizeof(struct ieee80211_frame_min);
6257 	else
6258 		hdrlen = ieee80211_get_hdrlen(wh);
6259 
6260 	qid = sc->first_data_qid;
6261 
6262 	/* Put QoS frames on the data queue which maps to their TID. */
6263 	if (ieee80211_has_qos(wh)) {
6264 		struct ieee80211_tx_ba *ba;
6265 		uint16_t qos = ieee80211_get_qos(wh);
6266 		uint8_t tid = qos & IEEE80211_QOS_TID;
6267 
6268 		ba = &ni->ni_tx_ba[tid];
6269 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6270 		    type == IEEE80211_FC0_TYPE_DATA &&
6271 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6272 		    sc->aggqid[tid] != 0 &&
6273 		    ba->ba_state == IEEE80211_BA_AGREED) {
6274 			qid = sc->aggqid[tid];
6275 		}
6276 	}
6277 
6278 	ring = &sc->txq[qid];
6279 	desc = &ring->desc[ring->cur];
6280 	memset(desc, 0, sizeof(*desc));
6281 	data = &ring->data[ring->cur];
6282 
6283 	cmd = &ring->cmd[ring->cur];
6284 	cmd->hdr.code = IWX_TX_CMD;
6285 	cmd->hdr.flags = 0;
6286 	cmd->hdr.qid = ring->qid;
6287 	cmd->hdr.idx = ring->cur;
6288 
6289 	rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags);
6290 
6291 #if NBPFILTER > 0
6292 	if (sc->sc_drvbpf != NULL) {
6293 		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
6294 		uint16_t chan_flags;
6295 
6296 		tap->wt_flags = 0;
6297 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6298 		chan_flags = ni->ni_chan->ic_flags;
6299 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6300 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6301 			chan_flags &= ~IEEE80211_CHAN_HT;
6302 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6303 		}
6304 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6305 			chan_flags &= ~IEEE80211_CHAN_VHT;
6306 		tap->wt_chan_flags = htole16(chan_flags);
6307 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6308 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6309 		    type == IEEE80211_FC0_TYPE_DATA &&
6310 		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
6311 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
6312 		} else
6313 			tap->wt_rate = rinfo->rate;
6314 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6315 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6316 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6317 
6318 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6319 		    m, BPF_DIRECTION_OUT);
6320 	}
6321 #endif
6322 
6323 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6324                 k = ieee80211_get_txkey(ic, wh, ni);
6325 		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
6326 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6327 				return ENOBUFS;
6328 			/* 802.11 header may have moved. */
6329 			wh = mtod(m, struct ieee80211_frame *);
6330 			flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6331 		} else {
6332 			k->k_tsc++;
6333 			/* Hardware increments PN internally and adds IV. */
6334 		}
6335 	} else
6336 		flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6337 
6338 	totlen = m->m_pkthdr.len;
6339 
6340 	if (hdrlen & 3) {
6341 		/* First segment length must be a multiple of 4. */
6342 		pad = 4 - (hdrlen & 3);
6343 		offload_assist |= IWX_TX_CMD_OFFLD_PAD;
6344 	} else
6345 		pad = 0;
6346 
6347 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
6348 		struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
6349 		memset(tx, 0, sizeof(*tx));
6350 		tx->len = htole16(totlen);
6351 		tx->offload_assist = htole32(offload_assist);
6352 		tx->flags = htole16(flags);
6353 		tx->rate_n_flags = htole32(rate_n_flags);
6354 		memcpy(tx->hdr, wh, hdrlen);
6355 		txcmd_size = sizeof(*tx);
6356 	} else {
6357 		struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
6358 		memset(tx, 0, sizeof(*tx));
6359 		tx->len = htole16(totlen);
6360 		tx->offload_assist = htole16(offload_assist);
6361 		tx->flags = htole32(flags);
6362 		tx->rate_n_flags = htole32(rate_n_flags);
6363 		memcpy(tx->hdr, wh, hdrlen);
6364 		txcmd_size = sizeof(*tx);
6365 	}
6366 
6367 	/* Trim 802.11 header. */
6368 	m_adj(m, hdrlen);
6369 
6370 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6371 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6372 	if (err && err != EFBIG) {
6373 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6374 		m_freem(m);
6375 		return err;
6376 	}
6377 	if (err) {
6378 		/* Too many DMA segments, linearize mbuf. */
6379 		if (m_defrag(m, M_DONTWAIT)) {
6380 			m_freem(m);
6381 			return ENOBUFS;
6382 		}
6383 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6384 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6385 		if (err) {
6386 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6387 			    err);
6388 			m_freem(m);
6389 			return err;
6390 		}
6391 	}
6392 	data->m = m;
6393 	data->in = in;
6394 
6395 	/* Fill TX descriptor. */
6396 	num_tbs = 2 + data->map->dm_nsegs;
6397 	desc->num_tbs = htole16(num_tbs);
6398 
6399 	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
6400 	paddr = htole64(data->cmd_paddr);
6401 	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
6402 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
6403 		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
6404 	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
6405 	    txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
6406 	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
6407 	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
6408 
6409 	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
6410 		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
6411 
6412 	/* Other DMA segments are for data payload. */
6413 	seg = data->map->dm_segs;
6414 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6415 		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
6416 		paddr = htole64(seg->ds_addr);
6417 		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
6418 		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
6419 			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
6420 	}
6421 
6422 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6423 	    BUS_DMASYNC_PREWRITE);
6424 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6425 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6426 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6427 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6428 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6429 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6430 
6431 	iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
6432 
6433 	/* Kick TX ring. */
6434 	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
6435 	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
6436 	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
6437 
6438 	/* Mark TX ring as full if we reach a certain threshold. */
6439 	if (++ring->queued > IWX_TX_RING_HIMARK) {
6440 		sc->qfullmsk |= 1 << ring->qid;
6441 	}
6442 
6443 	if (ic->ic_if.if_flags & IFF_UP)
6444 		sc->sc_tx_timer[ring->qid] = 15;
6445 
6446 	return 0;
6447 }
6448 
6449 int
iwx_flush_sta_tids(struct iwx_softc * sc,int sta_id,uint16_t tids)6450 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
6451 {
6452 	struct iwx_rx_packet *pkt;
6453 	struct iwx_tx_path_flush_cmd_rsp *resp;
6454 	struct iwx_tx_path_flush_cmd flush_cmd = {
6455 		.sta_id = htole32(sta_id),
6456 		.tid_mask = htole16(tids),
6457 	};
6458 	struct iwx_host_cmd hcmd = {
6459 		.id = IWX_TXPATH_FLUSH,
6460 		.len = { sizeof(flush_cmd), },
6461 		.data = { &flush_cmd, },
6462 		.flags = IWX_CMD_WANT_RESP,
6463 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
6464 	};
6465 	int err, resp_len, i, num_flushed_queues;
6466 
6467 	err = iwx_send_cmd(sc, &hcmd);
6468 	if (err)
6469 		return err;
6470 
6471 	pkt = hcmd.resp_pkt;
6472 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
6473 		err = EIO;
6474 		goto out;
6475 	}
6476 
6477 	resp_len = iwx_rx_packet_payload_len(pkt);
6478 	if (resp_len != sizeof(*resp)) {
6479 		err = EIO;
6480 		goto out;
6481 	}
6482 
6483 	resp = (void *)pkt->data;
6484 
6485 	if (le16toh(resp->sta_id) != sta_id) {
6486 		err = EIO;
6487 		goto out;
6488 	}
6489 
6490 	num_flushed_queues = le16toh(resp->num_flushed_queues);
6491 	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
6492 		err = EIO;
6493 		goto out;
6494 	}
6495 
6496 	for (i = 0; i < num_flushed_queues; i++) {
6497 		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
6498 		uint16_t tid = le16toh(queue_info->tid);
6499 		uint16_t read_after = le16toh(queue_info->read_after_flush);
6500 		uint16_t qid = le16toh(queue_info->queue_num);
6501 		struct iwx_tx_ring *txq;
6502 
6503 		if (qid >= nitems(sc->txq))
6504 			continue;
6505 
6506 		txq = &sc->txq[qid];
6507 		if (tid != txq->tid)
6508 			continue;
6509 
6510 		iwx_txq_advance(sc, txq, read_after);
6511 	}
6512 out:
6513 	iwx_free_resp(sc, &hcmd);
6514 	return err;
6515 }
6516 
6517 #define IWX_FLUSH_WAIT_MS	2000
6518 
6519 int
iwx_drain_sta(struct iwx_softc * sc,struct iwx_node * in,int drain)6520 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
6521 {
6522 	struct iwx_add_sta_cmd cmd;
6523 	int err;
6524 	uint32_t status;
6525 
6526 	/* No need to drain with MLD firmware. */
6527 	if (sc->sc_use_mld_api)
6528 		return 0;
6529 
6530 	memset(&cmd, 0, sizeof(cmd));
6531 	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6532 	    in->in_color));
6533 	cmd.sta_id = IWX_STATION_ID;
6534 	cmd.add_modify = IWX_STA_MODE_MODIFY;
6535 	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
6536 	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
6537 
6538 	status = IWX_ADD_STA_SUCCESS;
6539 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
6540 	    sizeof(cmd), &cmd, &status);
6541 	if (err) {
6542 		printf("%s: could not update sta (error %d)\n",
6543 		    DEVNAME(sc), err);
6544 		return err;
6545 	}
6546 
6547 	switch (status & IWX_ADD_STA_STATUS_MASK) {
6548 	case IWX_ADD_STA_SUCCESS:
6549 		break;
6550 	default:
6551 		err = EIO;
6552 		printf("%s: Couldn't %s draining for station\n",
6553 		    DEVNAME(sc), drain ? "enable" : "disable");
6554 		break;
6555 	}
6556 
6557 	return err;
6558 }
6559 
6560 int
iwx_flush_sta(struct iwx_softc * sc,struct iwx_node * in)6561 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
6562 {
6563 	int err;
6564 
6565 	splassert(IPL_NET);
6566 
6567 	sc->sc_flags |= IWX_FLAG_TXFLUSH;
6568 
6569 	err = iwx_drain_sta(sc, in, 1);
6570 	if (err)
6571 		goto done;
6572 
6573 	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
6574 	if (err) {
6575 		printf("%s: could not flush Tx path (error %d)\n",
6576 		    DEVNAME(sc), err);
6577 		goto done;
6578 	}
6579 
6580 	err = iwx_drain_sta(sc, in, 0);
6581 done:
6582 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
6583 	return err;
6584 }
6585 
6586 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
6587 
6588 int
iwx_beacon_filter_send_cmd(struct iwx_softc * sc,struct iwx_beacon_filter_cmd * cmd)6589 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
6590     struct iwx_beacon_filter_cmd *cmd)
6591 {
6592 	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
6593 	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
6594 }
6595 
6596 int
iwx_update_beacon_abort(struct iwx_softc * sc,struct iwx_node * in,int enable)6597 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
6598 {
6599 	struct iwx_beacon_filter_cmd cmd = {
6600 		IWX_BF_CMD_CONFIG_DEFAULTS,
6601 		.bf_enable_beacon_filter = htole32(1),
6602 		.ba_enable_beacon_abort = htole32(enable),
6603 	};
6604 
6605 	if (!sc->sc_bf.bf_enabled)
6606 		return 0;
6607 
6608 	sc->sc_bf.ba_enabled = enable;
6609 	return iwx_beacon_filter_send_cmd(sc, &cmd);
6610 }
6611 
6612 void
iwx_power_build_cmd(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_power_cmd * cmd)6613 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
6614     struct iwx_mac_power_cmd *cmd)
6615 {
6616 	struct ieee80211com *ic = &sc->sc_ic;
6617 	struct ieee80211_node *ni = &in->in_ni;
6618 	int dtim_period, dtim_msec, keep_alive;
6619 
6620 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6621 	    in->in_color));
6622 	if (ni->ni_dtimperiod)
6623 		dtim_period = ni->ni_dtimperiod;
6624 	else
6625 		dtim_period = 1;
6626 
6627 	/*
6628 	 * Regardless of power management state the driver must set
6629 	 * keep alive period. FW will use it for sending keep alive NDPs
6630 	 * immediately after association. Check that keep alive period
6631 	 * is at least 3 * DTIM.
6632 	 */
6633 	dtim_msec = dtim_period * ni->ni_intval;
6634 	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6635 	keep_alive = roundup(keep_alive, 1000) / 1000;
6636 	cmd->keep_alive_seconds = htole16(keep_alive);
6637 
6638 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6639 		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6640 }
6641 
6642 int
iwx_power_mac_update_mode(struct iwx_softc * sc,struct iwx_node * in)6643 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6644 {
6645 	int err;
6646 	int ba_enable;
6647 	struct iwx_mac_power_cmd cmd;
6648 
6649 	memset(&cmd, 0, sizeof(cmd));
6650 
6651 	iwx_power_build_cmd(sc, in, &cmd);
6652 
6653 	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6654 	    sizeof(cmd), &cmd);
6655 	if (err != 0)
6656 		return err;
6657 
6658 	ba_enable = !!(cmd.flags &
6659 	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6660 	return iwx_update_beacon_abort(sc, in, ba_enable);
6661 }
6662 
6663 int
iwx_power_update_device(struct iwx_softc * sc)6664 iwx_power_update_device(struct iwx_softc *sc)
6665 {
6666 	struct iwx_device_power_cmd cmd = { };
6667 	struct ieee80211com *ic = &sc->sc_ic;
6668 
6669 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6670 		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6671 
6672 	return iwx_send_cmd_pdu(sc,
6673 	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6674 }
6675 
6676 int
iwx_enable_beacon_filter(struct iwx_softc * sc,struct iwx_node * in)6677 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6678 {
6679 	struct iwx_beacon_filter_cmd cmd = {
6680 		IWX_BF_CMD_CONFIG_DEFAULTS,
6681 		.bf_enable_beacon_filter = htole32(1),
6682 		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6683 	};
6684 	int err;
6685 
6686 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6687 	if (err == 0)
6688 		sc->sc_bf.bf_enabled = 1;
6689 
6690 	return err;
6691 }
6692 
6693 int
iwx_disable_beacon_filter(struct iwx_softc * sc)6694 iwx_disable_beacon_filter(struct iwx_softc *sc)
6695 {
6696 	struct iwx_beacon_filter_cmd cmd;
6697 	int err;
6698 
6699 	memset(&cmd, 0, sizeof(cmd));
6700 
6701 	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6702 	if (err == 0)
6703 		sc->sc_bf.bf_enabled = 0;
6704 
6705 	return err;
6706 }
6707 
6708 int
iwx_add_sta_cmd(struct iwx_softc * sc,struct iwx_node * in,int update)6709 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6710 {
6711 	struct iwx_add_sta_cmd add_sta_cmd;
6712 	int err;
6713 	uint32_t status, aggsize;
6714 	const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6715 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6716 	struct ieee80211com *ic = &sc->sc_ic;
6717 
6718 	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6719 		panic("STA already added");
6720 
6721 	if (sc->sc_use_mld_api)
6722 		return iwx_mld_add_sta_cmd(sc, in, update);
6723 
6724 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6725 
6726 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6727 		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6728 		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6729 	} else {
6730 		add_sta_cmd.sta_id = IWX_STATION_ID;
6731 		add_sta_cmd.station_type = IWX_STA_LINK;
6732 	}
6733 	add_sta_cmd.mac_id_n_color
6734 	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6735 	if (!update) {
6736 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6737 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6738 			    etheranyaddr);
6739 		else
6740 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6741 			    in->in_macaddr);
6742 	}
6743 	add_sta_cmd.add_modify = update ? 1 : 0;
6744 	add_sta_cmd.station_flags_msk
6745 	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6746 
6747 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6748 		add_sta_cmd.station_flags_msk
6749 		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6750 		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6751 
6752 		if (iwx_mimo_enabled(sc)) {
6753 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6754 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
6755 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
6756 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
6757 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
6758 					add_sta_cmd.station_flags |=
6759 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6760 				}
6761 			} else {
6762 				if (in->in_ni.ni_rxmcs[1] != 0) {
6763 					add_sta_cmd.station_flags |=
6764 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6765 				}
6766 				if (in->in_ni.ni_rxmcs[2] != 0) {
6767 					add_sta_cmd.station_flags |=
6768 					    htole32(IWX_STA_FLG_MIMO_EN_MIMO3);
6769 				}
6770 			}
6771 		}
6772 
6773 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
6774 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
6775 			add_sta_cmd.station_flags |= htole32(
6776 			    IWX_STA_FLG_FAT_EN_40MHZ);
6777 		}
6778 
6779 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6780 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
6781 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
6782 				add_sta_cmd.station_flags |= htole32(
6783 				    IWX_STA_FLG_FAT_EN_80MHZ);
6784 			}
6785 			aggsize = (in->in_ni.ni_vhtcaps &
6786 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
6787 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
6788 		} else {
6789 			aggsize = (in->in_ni.ni_ampdu_param &
6790 			    IEEE80211_AMPDU_PARAM_LE);
6791 		}
6792 		if (aggsize > max_aggsize)
6793 			aggsize = max_aggsize;
6794 		add_sta_cmd.station_flags |= htole32((aggsize <<
6795 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6796 		    IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6797 
6798 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
6799 		case IEEE80211_AMPDU_PARAM_SS_2:
6800 			add_sta_cmd.station_flags
6801 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6802 			break;
6803 		case IEEE80211_AMPDU_PARAM_SS_4:
6804 			add_sta_cmd.station_flags
6805 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6806 			break;
6807 		case IEEE80211_AMPDU_PARAM_SS_8:
6808 			add_sta_cmd.station_flags
6809 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6810 			break;
6811 		case IEEE80211_AMPDU_PARAM_SS_16:
6812 			add_sta_cmd.station_flags
6813 			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6814 			break;
6815 		default:
6816 			break;
6817 		}
6818 	}
6819 
6820 	status = IWX_ADD_STA_SUCCESS;
6821 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6822 	    &add_sta_cmd, &status);
6823 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6824 		err = EIO;
6825 
6826 	return err;
6827 }
6828 
6829 void
iwx_mld_modify_link_fill(struct iwx_softc * sc,struct iwx_node * in,struct iwx_link_config_cmd * cmd,int changes,int active)6830 iwx_mld_modify_link_fill(struct iwx_softc *sc, struct iwx_node *in,
6831     struct iwx_link_config_cmd *cmd, int changes, int active)
6832 {
6833 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6834 	struct ieee80211com *ic = &sc->sc_ic;
6835 	struct ieee80211_node *ni = &in->in_ni;
6836 	int cck_ack_rates, ofdm_ack_rates;
6837 	int i;
6838 
6839 	cmd->link_id = htole32(0);
6840 	cmd->mac_id = htole32(in->in_id);
6841 	KASSERT(in->in_phyctxt);
6842 	cmd->phy_id = htole32(in->in_phyctxt->id);
6843 	IEEE80211_ADDR_COPY(cmd->local_link_addr, ic->ic_myaddr);
6844 	cmd->active = htole32(active);
6845 
6846 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6847 	cmd->cck_rates = htole32(cck_ack_rates);
6848 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
6849 	cmd->cck_short_preamble
6850 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? 1 : 0);
6851 	cmd->short_slot
6852 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT) ? 1 : 0);
6853 
6854 	for (i = 0; i < EDCA_NUM_AC; i++) {
6855 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
6856 		int txf = iwx_ac_to_tx_fifo[i];
6857 
6858 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
6859 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
6860 		cmd->ac[txf].aifsn = ac->ac_aifsn;
6861 		cmd->ac[txf].fifos_mask = (1 << txf);
6862 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
6863 	}
6864 	if (ni->ni_flags & IEEE80211_NODE_QOS)
6865 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6866 
6867 	if (ni->ni_flags & IEEE80211_NODE_HT) {
6868 		enum ieee80211_htprot htprot =
6869 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
6870 		switch (htprot) {
6871 		case IEEE80211_HTPROT_NONE:
6872 			break;
6873 		case IEEE80211_HTPROT_NONMEMBER:
6874 		case IEEE80211_HTPROT_NONHT_MIXED:
6875 			cmd->protection_flags |=
6876 			    htole32(IWX_LINK_PROT_FLG_HT_PROT |
6877 			        IWX_LINK_PROT_FLG_FAT_PROT);
6878 			break;
6879 		case IEEE80211_HTPROT_20MHZ:
6880 			if (in->in_phyctxt &&
6881 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
6882 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
6883 				cmd->protection_flags |=
6884 				    htole32(IWX_LINK_PROT_FLG_HT_PROT |
6885 				        IWX_LINK_PROT_FLG_FAT_PROT);
6886 			}
6887 			break;
6888 		default:
6889 			break;
6890 		}
6891 
6892 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6893 	}
6894 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6895 		cmd->protection_flags |= htole32(IWX_LINK_PROT_FLG_TGG_PROTECT);
6896 
6897 	cmd->bi = htole32(ni->ni_intval);
6898 	cmd->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
6899 
6900 	cmd->modify_mask = htole32(changes);
6901 	cmd->flags = 0;
6902 	cmd->flags_mask = 0;
6903 	cmd->spec_link_id = 0;
6904 	cmd->listen_lmac = 0;
6905 	cmd->action = IWX_FW_CTXT_ACTION_MODIFY;
6906 #undef IWX_EXP2
6907 }
6908 
6909 int
iwx_mld_add_sta_cmd(struct iwx_softc * sc,struct iwx_node * in,int update)6910 iwx_mld_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6911 {
6912 	struct ieee80211com *ic = &sc->sc_ic;
6913 	struct iwx_link_config_cmd link_cmd;
6914 	struct iwx_mvm_sta_cfg_cmd sta_cmd;
6915 	uint32_t aggsize;
6916 	const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6917 		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6918 	int err, changes;
6919 
6920 	if (!update) {
6921 		memset(&link_cmd, 0, sizeof(link_cmd));
6922 		link_cmd.link_id = htole32(0);
6923 		link_cmd.mac_id = htole32(in->in_id);
6924 		link_cmd.spec_link_id = 0;
6925 		if (in->in_phyctxt)
6926 			link_cmd.phy_id = htole32(in->in_phyctxt->id);
6927 		else
6928 			link_cmd.phy_id = htole32(IWX_FW_CTXT_INVALID);
6929 		IEEE80211_ADDR_COPY(link_cmd.local_link_addr, ic->ic_myaddr);
6930 		link_cmd.listen_lmac = 0;
6931 		link_cmd.action = IWX_FW_CTXT_ACTION_ADD;
6932 
6933 		err = iwx_send_cmd_pdu(sc,
6934 		    IWX_WIDE_ID(IWX_MAC_CONF_GROUP, IWX_LINK_CONFIG_CMD),
6935 		    0, sizeof(link_cmd), &link_cmd);
6936 		if (err)
6937 			return err;
6938 	}
6939 
6940 	changes = IWX_LINK_CONTEXT_MODIFY_ACTIVE;
6941 	changes |= IWX_LINK_CONTEXT_MODIFY_RATES_INFO;
6942 	if (update) {
6943 		changes |= IWX_LINK_CONTEXT_MODIFY_PROTECT_FLAGS;
6944 		changes |= IWX_LINK_CONTEXT_MODIFY_QOS_PARAMS;
6945 		changes |= IWX_LINK_CONTEXT_MODIFY_BEACON_TIMING;
6946 	}
6947 
6948 	memset(&link_cmd, 0, sizeof(link_cmd));
6949 	iwx_mld_modify_link_fill(sc, in, &link_cmd, changes, 1);
6950 	err = iwx_send_cmd_pdu(sc,
6951 	    IWX_WIDE_ID(IWX_MAC_CONF_GROUP, IWX_LINK_CONFIG_CMD),
6952 	    0, sizeof(link_cmd), &link_cmd);
6953 	if (err)
6954 		return err;
6955 
6956 	memset(&sta_cmd, 0, sizeof(sta_cmd));
6957 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6958 		sta_cmd.sta_id = htole32(IWX_MONITOR_STA_ID);
6959 		sta_cmd.station_type = htole32(IWX_STA_GENERAL_PURPOSE);
6960 	} else {
6961 		sta_cmd.sta_id = htole32(IWX_STATION_ID);
6962 		sta_cmd.station_type = htole32(IWX_STA_LINK);
6963 	}
6964 	sta_cmd.link_id = htole32(0);
6965 	IEEE80211_ADDR_COPY(sta_cmd.peer_mld_address, in->in_macaddr);
6966 	IEEE80211_ADDR_COPY(sta_cmd.peer_link_address, in->in_macaddr);
6967 	sta_cmd.assoc_id = htole32(in->in_ni.ni_associd);
6968 
6969 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6970 		if (iwx_mimo_enabled(sc))
6971 			sta_cmd.mimo = htole32(1);
6972 
6973 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6974 			aggsize = (in->in_ni.ni_vhtcaps &
6975 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
6976 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
6977 		} else {
6978 			aggsize = (in->in_ni.ni_ampdu_param &
6979 			    IEEE80211_AMPDU_PARAM_LE);
6980 		}
6981 		if (aggsize > max_aggsize)
6982 			aggsize = max_aggsize;
6983 
6984 		sta_cmd.tx_ampdu_spacing = htole32(0);
6985 		sta_cmd.tx_ampdu_max_size = aggsize;
6986 	}
6987 
6988 	return iwx_send_cmd_pdu(sc,
6989 	    IWX_WIDE_ID(IWX_MAC_CONF_GROUP, IWX_STA_CONFIG_CMD),
6990 	    0, sizeof(sta_cmd), &sta_cmd);
6991 }
6992 
6993 int
iwx_rm_sta_cmd(struct iwx_softc * sc,struct iwx_node * in)6994 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6995 {
6996 	struct ieee80211com *ic = &sc->sc_ic;
6997 	struct iwx_rm_sta_cmd rm_sta_cmd;
6998 	int err;
6999 
7000 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
7001 		panic("sta already removed");
7002 
7003 	if (sc->sc_use_mld_api)
7004 		return iwx_mld_rm_sta_cmd(sc, in);
7005 
7006 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
7007 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7008 		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
7009 	else
7010 		rm_sta_cmd.sta_id = IWX_STATION_ID;
7011 
7012 	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
7013 	    &rm_sta_cmd);
7014 
7015 	return err;
7016 }
7017 
7018 int
iwx_rm_sta(struct iwx_softc * sc,struct iwx_node * in)7019 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
7020 {
7021 	struct ieee80211com *ic = &sc->sc_ic;
7022 	struct ieee80211_node *ni = &in->in_ni;
7023 	int err, i, cmd_ver;
7024 
7025 	err = iwx_flush_sta(sc, in);
7026 	if (err) {
7027 		printf("%s: could not flush Tx path (error %d)\n",
7028 		    DEVNAME(sc), err);
7029 		return err;
7030 	}
7031 
7032 	/*
7033 	 * New SCD_QUEUE_CONFIG API requires explicit queue removal
7034 	 * before a station gets removed.
7035 	 */
7036 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7037 	    IWX_SCD_QUEUE_CONFIG_CMD);
7038 	if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
7039 		err = iwx_disable_mgmt_queue(sc);
7040 		if (err)
7041 			return err;
7042 		for (i = IWX_FIRST_AGG_TX_QUEUE;
7043 		    i < IWX_LAST_AGG_TX_QUEUE; i++) {
7044 			struct iwx_tx_ring *ring = &sc->txq[i];
7045 			if ((sc->qenablemsk & (1 << i)) == 0)
7046 				continue;
7047 			err = iwx_disable_txq(sc, IWX_STATION_ID,
7048 			    ring->qid, ring->tid);
7049 			if (err) {
7050 				printf("%s: could not disable Tx queue %d "
7051 				    "(error %d)\n", DEVNAME(sc), ring->qid,
7052 				    err);
7053 				return err;
7054 			}
7055 		}
7056 	}
7057 
7058 	err = iwx_rm_sta_cmd(sc, in);
7059 	if (err) {
7060 		printf("%s: could not remove STA (error %d)\n",
7061 		    DEVNAME(sc), err);
7062 		return err;
7063 	}
7064 
7065 	in->in_flags = 0;
7066 
7067 	sc->sc_rx_ba_sessions = 0;
7068 	sc->ba_rx.start_tidmask = 0;
7069 	sc->ba_rx.stop_tidmask = 0;
7070 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
7071 	sc->ba_tx.start_tidmask = 0;
7072 	sc->ba_tx.stop_tidmask = 0;
7073 	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
7074 		sc->qenablemsk &= ~(1 << i);
7075 	for (i = 0; i < IEEE80211_NUM_TID; i++) {
7076 		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
7077 		if (ba->ba_state != IEEE80211_BA_AGREED)
7078 			continue;
7079 		ieee80211_delba_request(ic, ni, 0, 1, i);
7080 	}
7081 
7082 	return 0;
7083 }
7084 
7085 int
iwx_mld_rm_sta_cmd(struct iwx_softc * sc,struct iwx_node * in)7086 iwx_mld_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
7087 {
7088 	struct iwx_mvm_remove_sta_cmd sta_cmd;
7089 	struct iwx_link_config_cmd link_cmd;
7090 	int err;
7091 
7092 	memset(&sta_cmd, 0, sizeof(sta_cmd));
7093 	sta_cmd.sta_id = htole32(IWX_STATION_ID);
7094 
7095 	err = iwx_send_cmd_pdu(sc,
7096 	    IWX_WIDE_ID(IWX_MAC_CONF_GROUP, IWX_STA_REMOVE_CMD),
7097 	    0, sizeof(sta_cmd), &sta_cmd);
7098 	if (err)
7099 		return err;
7100 
7101 	memset(&link_cmd, 0, sizeof(link_cmd));
7102 	iwx_mld_modify_link_fill(sc, in, &link_cmd,
7103 	    IWX_LINK_CONTEXT_MODIFY_ACTIVE, 0);
7104 	err = iwx_send_cmd_pdu(sc,
7105 	    IWX_WIDE_ID(IWX_MAC_CONF_GROUP, IWX_LINK_CONFIG_CMD),
7106 	    0, sizeof(link_cmd), &link_cmd);
7107 	if (err)
7108 		return err;
7109 
7110 	memset(&link_cmd, 0, sizeof(link_cmd));
7111 	link_cmd.link_id = htole32(0);
7112 	link_cmd.spec_link_id = 0;
7113 	link_cmd.action = IWX_FW_CTXT_ACTION_REMOVE;
7114 
7115 	return iwx_send_cmd_pdu(sc,
7116 	    IWX_WIDE_ID(IWX_MAC_CONF_GROUP, IWX_LINK_CONFIG_CMD),
7117 	    0, sizeof(link_cmd), &link_cmd);
7118 }
7119 
7120 uint8_t
iwx_umac_scan_fill_channels(struct iwx_softc * sc,struct iwx_scan_channel_cfg_umac * chan,size_t chan_nitems,int n_ssids,uint32_t channel_cfg_flags)7121 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
7122     struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
7123     int n_ssids, uint32_t channel_cfg_flags)
7124 {
7125 	struct ieee80211com *ic = &sc->sc_ic;
7126 	struct ieee80211_channel *c;
7127 	uint8_t nchan;
7128 
7129 	for (nchan = 0, c = &ic->ic_channels[1];
7130 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7131 	    nchan < chan_nitems &&
7132 	    nchan < sc->sc_capa_n_scan_channels;
7133 	    c++) {
7134 		uint8_t channel_num;
7135 
7136 		if (c->ic_flags == 0)
7137 			continue;
7138 
7139 		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
7140 		if (isset(sc->sc_ucode_api,
7141 		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
7142 			chan->v2.channel_num = channel_num;
7143 			if (IEEE80211_IS_CHAN_2GHZ(c))
7144 				chan->v2.band = IWX_PHY_BAND_24;
7145 			else
7146 				chan->v2.band = IWX_PHY_BAND_5;
7147 			chan->v2.iter_count = 1;
7148 			chan->v2.iter_interval = 0;
7149 		} else {
7150 			chan->v1.channel_num = channel_num;
7151 			chan->v1.iter_count = 1;
7152 			chan->v1.iter_interval = htole16(0);
7153 		}
7154 
7155 		chan->flags = htole32(channel_cfg_flags);
7156 		chan++;
7157 		nchan++;
7158 	}
7159 
7160 	return nchan;
7161 }
7162 
7163 int
iwx_fill_probe_req(struct iwx_softc * sc,struct iwx_scan_probe_req * preq)7164 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
7165 {
7166 	struct ieee80211com *ic = &sc->sc_ic;
7167 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7168 	struct ieee80211_rateset *rs;
7169 	size_t remain = sizeof(preq->buf);
7170 	uint8_t *frm, *pos;
7171 
7172 	memset(preq, 0, sizeof(*preq));
7173 
7174 	if (remain < sizeof(*wh) + 2)
7175 		return ENOBUFS;
7176 
7177 	/*
7178 	 * Build a probe request frame.  Most of the following code is a
7179 	 * copy & paste of what is done in net80211.
7180 	 */
7181 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
7182 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
7183 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
7184 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
7185 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
7186 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
7187 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
7188 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
7189 
7190 	frm = (uint8_t *)(wh + 1);
7191 	*frm++ = IEEE80211_ELEMID_SSID;
7192 	*frm++ = 0;
7193 	/* hardware inserts SSID */
7194 
7195 	/* Tell the firmware where the MAC header is. */
7196 	preq->mac_header.offset = 0;
7197 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
7198 	remain -= frm - (uint8_t *)wh;
7199 
7200 	/* Fill in 2GHz IEs and tell firmware where they are. */
7201 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7202 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7203 		if (remain < 4 + rs->rs_nrates)
7204 			return ENOBUFS;
7205 	} else if (remain < 2 + rs->rs_nrates)
7206 		return ENOBUFS;
7207 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
7208 	pos = frm;
7209 	frm = ieee80211_add_rates(frm, rs);
7210 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7211 		frm = ieee80211_add_xrates(frm, rs);
7212 	remain -= frm - pos;
7213 
7214 	if (isset(sc->sc_enabled_capa,
7215 	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
7216 		if (remain < 3)
7217 			return ENOBUFS;
7218 		*frm++ = IEEE80211_ELEMID_DSPARMS;
7219 		*frm++ = 1;
7220 		*frm++ = 0;
7221 		remain -= 3;
7222 	}
7223 	preq->band_data[0].len = htole16(frm - pos);
7224 
7225 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7226 		/* Fill in 5GHz IEs. */
7227 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7228 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7229 			if (remain < 4 + rs->rs_nrates)
7230 				return ENOBUFS;
7231 		} else if (remain < 2 + rs->rs_nrates)
7232 			return ENOBUFS;
7233 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
7234 		pos = frm;
7235 		frm = ieee80211_add_rates(frm, rs);
7236 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7237 			frm = ieee80211_add_xrates(frm, rs);
7238 		preq->band_data[1].len = htole16(frm - pos);
7239 		remain -= frm - pos;
7240 		if (ic->ic_flags & IEEE80211_F_VHTON) {
7241 			if (remain < 14)
7242 				return ENOBUFS;
7243 			frm = ieee80211_add_vhtcaps(frm, ic);
7244 			remain -= frm - pos;
7245 			preq->band_data[1].len = htole16(frm - pos);
7246 		}
7247 	}
7248 
7249 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
7250 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
7251 	pos = frm;
7252 	if (ic->ic_flags & IEEE80211_F_HTON) {
7253 		if (remain < 28)
7254 			return ENOBUFS;
7255 		frm = ieee80211_add_htcaps(frm, ic);
7256 		/* XXX add WME info? */
7257 		remain -= frm - pos;
7258 	}
7259 
7260 	preq->common_data.len = htole16(frm - pos);
7261 
7262 	return 0;
7263 }
7264 
7265 int
iwx_config_umac_scan_reduced(struct iwx_softc * sc)7266 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
7267 {
7268 	struct iwx_scan_config scan_cfg;
7269 	struct iwx_host_cmd hcmd = {
7270 		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
7271 		.len[0] = sizeof(scan_cfg),
7272 		.data[0] = &scan_cfg,
7273 		.flags = 0,
7274 	};
7275 	int cmdver;
7276 
7277 	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
7278 		printf("%s: firmware does not support reduced scan config\n",
7279 		    DEVNAME(sc));
7280 		return ENOTSUP;
7281 	}
7282 
7283 	memset(&scan_cfg, 0, sizeof(scan_cfg));
7284 
7285 	/*
7286 	 * SCAN_CFG version >= 5 implies that the broadcast
7287 	 * STA ID field is deprecated.
7288 	 */
7289 	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
7290 	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
7291 		scan_cfg.bcast_sta_id = 0xff;
7292 
7293 	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
7294 	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
7295 
7296 	return iwx_send_cmd(sc, &hcmd);
7297 }
7298 
7299 uint16_t
iwx_scan_umac_flags_v2(struct iwx_softc * sc,int bgscan)7300 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
7301 {
7302 	struct ieee80211com *ic = &sc->sc_ic;
7303 	uint16_t flags = 0;
7304 
7305 	if (ic->ic_des_esslen == 0)
7306 		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
7307 
7308 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
7309 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
7310 	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
7311 
7312 	return flags;
7313 }
7314 
7315 #define IWX_SCAN_DWELL_ACTIVE		10
7316 #define IWX_SCAN_DWELL_PASSIVE		110
7317 
7318 /* adaptive dwell max budget time [TU] for full scan */
7319 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7320 /* adaptive dwell max budget time [TU] for directed scan */
7321 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7322 /* adaptive dwell default high band APs number */
7323 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7324 /* adaptive dwell default low band APs number */
7325 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7326 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7327 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7328 /* adaptive dwell number of APs override for p2p friendly GO channels */
7329 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
7330 /* adaptive dwell number of APs override for social channels */
7331 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
7332 
7333 void
iwx_scan_umac_dwell_v10(struct iwx_softc * sc,struct iwx_scan_general_params_v10 * general_params,int bgscan)7334 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
7335     struct iwx_scan_general_params_v10 *general_params, int bgscan)
7336 {
7337 	uint32_t suspend_time, max_out_time;
7338 	uint8_t active_dwell, passive_dwell;
7339 
7340 	active_dwell = IWX_SCAN_DWELL_ACTIVE;
7341 	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
7342 
7343 	general_params->adwell_default_social_chn =
7344 		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
7345 	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
7346 	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
7347 
7348 	if (bgscan)
7349 		general_params->adwell_max_budget =
7350 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
7351 	else
7352 		general_params->adwell_max_budget =
7353 			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
7354 
7355 	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
7356 	if (bgscan) {
7357 		max_out_time = htole32(120);
7358 		suspend_time = htole32(120);
7359 	} else {
7360 		max_out_time = htole32(0);
7361 		suspend_time = htole32(0);
7362 	}
7363 	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
7364 		htole32(max_out_time);
7365 	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
7366 		htole32(suspend_time);
7367 	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
7368 		htole32(max_out_time);
7369 	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
7370 		htole32(suspend_time);
7371 
7372 	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
7373 	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
7374 	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
7375 	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
7376 }
7377 
7378 void
iwx_scan_umac_fill_general_p_v10(struct iwx_softc * sc,struct iwx_scan_general_params_v10 * gp,uint16_t gen_flags,int bgscan)7379 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
7380     struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
7381 {
7382 	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
7383 
7384 	gp->flags = htole16(gen_flags);
7385 
7386 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
7387 		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
7388 	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
7389 		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
7390 
7391 	gp->scan_start_mac_id = 0;
7392 }
7393 
7394 void
iwx_scan_umac_fill_ch_p_v6(struct iwx_softc * sc,struct iwx_scan_channel_params_v6 * cp,uint32_t channel_cfg_flags,int n_ssid)7395 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
7396     struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
7397     int n_ssid)
7398 {
7399 	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
7400 
7401 	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
7402 	    nitems(cp->channel_config), n_ssid, channel_cfg_flags);
7403 
7404 	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
7405 	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
7406 }
7407 
7408 int
iwx_umac_scan_v14(struct iwx_softc * sc,int bgscan)7409 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
7410 {
7411 	struct ieee80211com *ic = &sc->sc_ic;
7412 	struct iwx_host_cmd hcmd = {
7413 		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
7414 		.len = { 0, },
7415 		.data = { NULL, },
7416 		.flags = 0,
7417 	};
7418 	struct iwx_scan_req_umac_v14 *cmd;
7419 	struct iwx_scan_req_params_v14 *scan_p;
7420 	int err, async = bgscan, n_ssid = 0;
7421 	uint16_t gen_flags;
7422 	uint32_t bitmap_ssid = 0;
7423 
7424 	cmd = malloc(sizeof(*cmd), M_DEVBUF,
7425 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7426 	if (cmd == NULL)
7427 		return ENOMEM;
7428 
7429 	scan_p = &cmd->scan_params;
7430 
7431 	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
7432 	cmd->uid = htole32(0);
7433 
7434 	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
7435 	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
7436 	    gen_flags, bgscan);
7437 
7438 	scan_p->periodic_params.schedule[0].interval = htole16(0);
7439 	scan_p->periodic_params.schedule[0].iter_count = 1;
7440 
7441 	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
7442 	if (err) {
7443 		free(cmd, M_DEVBUF, sizeof(*cmd));
7444 		return err;
7445 	}
7446 
7447 	if (ic->ic_des_esslen != 0) {
7448 		scan_p->probe_params.direct_scan[0].id = IEEE80211_ELEMID_SSID;
7449 		scan_p->probe_params.direct_scan[0].len = ic->ic_des_esslen;
7450 		memcpy(scan_p->probe_params.direct_scan[0].ssid,
7451 		    ic->ic_des_essid, ic->ic_des_esslen);
7452 		bitmap_ssid |= (1 << 0);
7453 		n_ssid = 1;
7454 	}
7455 
7456 	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
7457 	    n_ssid);
7458 
7459 	hcmd.len[0] = sizeof(*cmd);
7460 	hcmd.data[0] = (void *)cmd;
7461 	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
7462 
7463 	err = iwx_send_cmd(sc, &hcmd);
7464 	free(cmd, M_DEVBUF, sizeof(*cmd));
7465 	return err;
7466 }
7467 
7468 void
iwx_mcc_update(struct iwx_softc * sc,struct iwx_mcc_chub_notif * notif)7469 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
7470 {
7471 	struct ieee80211com *ic = &sc->sc_ic;
7472 	struct ifnet *ifp = IC2IFP(ic);
7473 	char alpha2[3];
7474 
7475 	snprintf(alpha2, sizeof(alpha2), "%c%c",
7476 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
7477 
7478 	if (ifp->if_flags & IFF_DEBUG) {
7479 		printf("%s: firmware has detected regulatory domain '%s' "
7480 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
7481 	}
7482 
7483 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
7484 }
7485 
7486 uint8_t
iwx_ridx2rate(struct ieee80211_rateset * rs,int ridx)7487 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
7488 {
7489 	int i;
7490 	uint8_t rval;
7491 
7492 	for (i = 0; i < rs->rs_nrates; i++) {
7493 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
7494 		if (rval == iwx_rates[ridx].rate)
7495 			return rs->rs_rates[i];
7496 	}
7497 
7498 	return 0;
7499 }
7500 
7501 int
iwx_rval2ridx(int rval)7502 iwx_rval2ridx(int rval)
7503 {
7504 	int ridx;
7505 
7506 	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
7507 		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
7508 			continue;
7509 		if (rval == iwx_rates[ridx].rate)
7510 			break;
7511 	}
7512 
7513 	return ridx;
7514 }
7515 
7516 void
iwx_ack_rates(struct iwx_softc * sc,struct iwx_node * in,int * cck_rates,int * ofdm_rates)7517 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
7518     int *ofdm_rates)
7519 {
7520 	struct ieee80211_node *ni = &in->in_ni;
7521 	struct ieee80211_rateset *rs = &ni->ni_rates;
7522 	int lowest_present_ofdm = -1;
7523 	int lowest_present_cck = -1;
7524 	uint8_t cck = 0;
7525 	uint8_t ofdm = 0;
7526 	int i;
7527 
7528 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
7529 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
7530 		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
7531 			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7532 				continue;
7533 			cck |= (1 << i);
7534 			if (lowest_present_cck == -1 || lowest_present_cck > i)
7535 				lowest_present_cck = i;
7536 		}
7537 	}
7538 	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
7539 		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7540 			continue;
7541 		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
7542 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
7543 			lowest_present_ofdm = i;
7544 	}
7545 
7546 	/*
7547 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
7548 	 * variables. This isn't sufficient though, as there might not
7549 	 * be all the right rates in the bitmap. E.g. if the only basic
7550 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
7551 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
7552 	 *
7553 	 *    [...] a STA responding to a received frame shall transmit
7554 	 *    its Control Response frame [...] at the highest rate in the
7555 	 *    BSSBasicRateSet parameter that is less than or equal to the
7556 	 *    rate of the immediately previous frame in the frame exchange
7557 	 *    sequence ([...]) and that is of the same modulation class
7558 	 *    ([...]) as the received frame. If no rate contained in the
7559 	 *    BSSBasicRateSet parameter meets these conditions, then the
7560 	 *    control frame sent in response to a received frame shall be
7561 	 *    transmitted at the highest mandatory rate of the PHY that is
7562 	 *    less than or equal to the rate of the received frame, and
7563 	 *    that is of the same modulation class as the received frame.
7564 	 *
7565 	 * As a consequence, we need to add all mandatory rates that are
7566 	 * lower than all of the basic rates to these bitmaps.
7567 	 */
7568 
7569 	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
7570 		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
7571 	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
7572 		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
7573 	/* 6M already there or needed so always add */
7574 	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
7575 
7576 	/*
7577 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
7578 	 * Note, however:
7579 	 *  - if no CCK rates are basic, it must be ERP since there must
7580 	 *    be some basic rates at all, so they're OFDM => ERP PHY
7581 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
7582 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
7583 	 *  - if 5.5M is basic, 1M and 2M are mandatory
7584 	 *  - if 2M is basic, 1M is mandatory
7585 	 *  - if 1M is basic, that's the only valid ACK rate.
7586 	 * As a consequence, it's not as complicated as it sounds, just add
7587 	 * any lower rates to the ACK rate bitmap.
7588 	 */
7589 	if (IWX_RATE_11M_INDEX < lowest_present_cck)
7590 		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
7591 	if (IWX_RATE_5M_INDEX < lowest_present_cck)
7592 		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
7593 	if (IWX_RATE_2M_INDEX < lowest_present_cck)
7594 		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
7595 	/* 1M already there or needed so always add */
7596 	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
7597 
7598 	*cck_rates = cck;
7599 	*ofdm_rates = ofdm;
7600 }
7601 
7602 void
iwx_mac_ctxt_cmd_common(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_ctx_cmd * cmd,uint32_t action)7603 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
7604     struct iwx_mac_ctx_cmd *cmd, uint32_t action)
7605 {
7606 #define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
7607 	struct ieee80211com *ic = &sc->sc_ic;
7608 	struct ieee80211_node *ni = ic->ic_bss;
7609 	int cck_ack_rates, ofdm_ack_rates;
7610 	int i;
7611 
7612 	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
7613 	    in->in_color));
7614 	cmd->action = htole32(action);
7615 
7616 	if (action == IWX_FW_CTXT_ACTION_REMOVE)
7617 		return;
7618 
7619 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7620 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
7621 	else if (ic->ic_opmode == IEEE80211_M_STA)
7622 		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
7623 	else
7624 		panic("unsupported operating mode %d", ic->ic_opmode);
7625 	cmd->tsf_id = htole32(IWX_TSF_ID_A);
7626 
7627 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
7628 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7629 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
7630 		return;
7631 	}
7632 
7633 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
7634 	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
7635 	cmd->cck_rates = htole32(cck_ack_rates);
7636 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
7637 
7638 	cmd->cck_short_preamble
7639 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
7640 	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
7641 	cmd->short_slot
7642 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
7643 	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
7644 
7645 	for (i = 0; i < EDCA_NUM_AC; i++) {
7646 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
7647 		int txf = iwx_ac_to_tx_fifo[i];
7648 
7649 		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
7650 		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
7651 		cmd->ac[txf].aifsn = ac->ac_aifsn;
7652 		cmd->ac[txf].fifos_mask = (1 << txf);
7653 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
7654 	}
7655 	if (ni->ni_flags & IEEE80211_NODE_QOS)
7656 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
7657 
7658 	if (ni->ni_flags & IEEE80211_NODE_HT) {
7659 		enum ieee80211_htprot htprot =
7660 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
7661 		switch (htprot) {
7662 		case IEEE80211_HTPROT_NONE:
7663 			break;
7664 		case IEEE80211_HTPROT_NONMEMBER:
7665 		case IEEE80211_HTPROT_NONHT_MIXED:
7666 			cmd->protection_flags |=
7667 			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7668 			    IWX_MAC_PROT_FLG_FAT_PROT);
7669 			break;
7670 		case IEEE80211_HTPROT_20MHZ:
7671 			if (in->in_phyctxt &&
7672 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7673 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
7674 				cmd->protection_flags |=
7675 				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7676 				    IWX_MAC_PROT_FLG_FAT_PROT);
7677 			}
7678 			break;
7679 		default:
7680 			break;
7681 		}
7682 
7683 		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
7684 	}
7685 	if (ic->ic_flags & IEEE80211_F_USEPROT)
7686 		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
7687 
7688 	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
7689 #undef IWX_EXP2
7690 }
7691 
7692 void
iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_data_sta * sta,int assoc)7693 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
7694     struct iwx_mac_data_sta *sta, int assoc)
7695 {
7696 	struct ieee80211_node *ni = &in->in_ni;
7697 	uint32_t dtim_off;
7698 	uint64_t tsf;
7699 
7700 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
7701 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
7702 	tsf = letoh64(tsf);
7703 
7704 	sta->is_assoc = htole32(assoc);
7705 	if (assoc) {
7706 		sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
7707 		sta->dtim_tsf = htole64(tsf + dtim_off);
7708 		sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
7709 	}
7710 	sta->bi = htole32(ni->ni_intval);
7711 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
7712 	sta->data_policy = htole32(0);
7713 	sta->listen_interval = htole32(10);
7714 	sta->assoc_id = htole32(ni->ni_associd);
7715 }
7716 
7717 int
iwx_mac_ctxt_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action,int assoc)7718 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
7719     int assoc)
7720 {
7721 	struct ieee80211com *ic = &sc->sc_ic;
7722 	struct ieee80211_node *ni = &in->in_ni;
7723 	struct iwx_mac_ctx_cmd cmd;
7724 	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
7725 
7726 	if (action == IWX_FW_CTXT_ACTION_ADD && active)
7727 		panic("MAC already added");
7728 	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
7729 		panic("MAC already removed");
7730 
7731 	if (sc->sc_use_mld_api)
7732 		return iwx_mld_mac_ctxt_cmd(sc, in, action, assoc);
7733 
7734 	memset(&cmd, 0, sizeof(cmd));
7735 
7736 	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
7737 
7738 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
7739 		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
7740 		    sizeof(cmd), &cmd);
7741 	}
7742 
7743 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7744 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
7745 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
7746 		    IWX_MAC_FILTER_ACCEPT_GRP |
7747 		    IWX_MAC_FILTER_IN_BEACON |
7748 		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
7749 		    IWX_MAC_FILTER_IN_CRC32);
7750 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod) {
7751 		/*
7752 		 * Allow beacons to pass through as long as we are not
7753 		 * associated or we do not have dtim period information.
7754 		 */
7755 		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
7756 	}
7757 	iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
7758 	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
7759 }
7760 
7761 int
iwx_mld_mac_ctxt_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action,int assoc)7762 iwx_mld_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in,
7763     uint32_t action, int assoc)
7764 {
7765 	struct ieee80211com *ic = &sc->sc_ic;
7766 	struct ieee80211_node *ni = &in->in_ni;
7767 	struct iwx_mac_config_cmd cmd;
7768 
7769 	memset(&cmd, 0, sizeof(cmd));
7770 	cmd.id_and_color = htole32(in->in_id);
7771 	cmd.action = htole32(action);
7772 
7773 	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
7774 		return iwx_send_cmd_pdu(sc,
7775 		    IWX_WIDE_ID(IWX_MAC_CONF_GROUP, IWX_MAC_CONFIG_CMD),
7776 		    0, sizeof(cmd), &cmd);
7777 	}
7778 
7779 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7780 		cmd.mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
7781 	else if (ic->ic_opmode == IEEE80211_M_STA)
7782 		cmd.mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
7783 	else
7784 		panic("unsupported operating mode %d", ic->ic_opmode);
7785 	IEEE80211_ADDR_COPY(cmd.local_mld_addr, ic->ic_myaddr);
7786 	cmd.client.assoc_id = htole32(ni->ni_associd);
7787 
7788 	cmd.filter_flags = htole32(IWX_MAC_CFG_FILTER_ACCEPT_GRP);
7789 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7790 		cmd.filter_flags |= htole32(IWX_MAC_CFG_FILTER_PROMISC |
7791 		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
7792 		    IWX_MAC_CFG_FILTER_ACCEPT_BEACON |
7793 		    IWX_MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
7794 		    IWX_MAC_CFG_FILTER_ACCEPT_GRP);
7795 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod) {
7796 		/*
7797 		 * Allow beacons to pass through as long as we are not
7798 		 * associated or we do not have dtim period information.
7799 		 */
7800 		cmd.filter_flags |= htole32(IWX_MAC_CFG_FILTER_ACCEPT_BEACON);
7801 	}
7802 
7803 	return iwx_send_cmd_pdu(sc,
7804 	    IWX_WIDE_ID(IWX_MAC_CONF_GROUP, IWX_MAC_CONFIG_CMD),
7805 	    0, sizeof(cmd), &cmd);
7806 }
7807 
7808 int
iwx_clear_statistics(struct iwx_softc * sc)7809 iwx_clear_statistics(struct iwx_softc *sc)
7810 {
7811 	struct iwx_statistics_cmd scmd = {
7812 		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
7813 	};
7814 	struct iwx_host_cmd cmd = {
7815 		.id = IWX_STATISTICS_CMD,
7816 		.len[0] = sizeof(scmd),
7817 		.data[0] = &scmd,
7818 		.flags = IWX_CMD_WANT_RESP,
7819 		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
7820 	};
7821 	int err;
7822 
7823 	err = iwx_send_cmd(sc, &cmd);
7824 	if (err)
7825 		return err;
7826 
7827 	iwx_free_resp(sc, &cmd);
7828 	return 0;
7829 }
7830 
7831 void
iwx_add_task(struct iwx_softc * sc,struct taskq * taskq,struct task * task)7832 iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7833 {
7834 	int s = splnet();
7835 
7836 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7837 		splx(s);
7838 		return;
7839 	}
7840 
7841 	refcnt_take(&sc->task_refs);
7842 	if (!task_add(taskq, task))
7843 		refcnt_rele_wake(&sc->task_refs);
7844 	splx(s);
7845 }
7846 
7847 void
iwx_del_task(struct iwx_softc * sc,struct taskq * taskq,struct task * task)7848 iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7849 {
7850 	if (task_del(taskq, task))
7851 		refcnt_rele(&sc->task_refs);
7852 }
7853 
7854 int
iwx_scan(struct iwx_softc * sc)7855 iwx_scan(struct iwx_softc *sc)
7856 {
7857 	struct ieee80211com *ic = &sc->sc_ic;
7858 	struct ifnet *ifp = IC2IFP(ic);
7859 	int err;
7860 
7861 	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
7862 		err = iwx_scan_abort(sc);
7863 		if (err) {
7864 			printf("%s: could not abort background scan\n",
7865 			    DEVNAME(sc));
7866 			return err;
7867 		}
7868 	}
7869 
7870 	err = iwx_umac_scan_v14(sc, 0);
7871 	if (err) {
7872 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7873 		return err;
7874 	}
7875 
7876 	/*
7877 	 * The current mode might have been fixed during association.
7878 	 * Ensure all channels get scanned.
7879 	 */
7880 	if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
7881 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
7882 
7883 	sc->sc_flags |= IWX_FLAG_SCANNING;
7884 	if (ifp->if_flags & IFF_DEBUG)
7885 		printf("%s: %s -> %s\n", ifp->if_xname,
7886 		    ieee80211_state_name[ic->ic_state],
7887 		    ieee80211_state_name[IEEE80211_S_SCAN]);
7888 	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
7889 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
7890 		ieee80211_node_cleanup(ic, ic->ic_bss);
7891 	}
7892 	ic->ic_state = IEEE80211_S_SCAN;
7893 	wakeup(&ic->ic_state); /* wake iwx_init() */
7894 
7895 	return 0;
7896 }
7897 
7898 int
iwx_bgscan(struct ieee80211com * ic)7899 iwx_bgscan(struct ieee80211com *ic)
7900 {
7901 	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
7902 	int err;
7903 
7904 	if (sc->sc_flags & IWX_FLAG_SCANNING)
7905 		return 0;
7906 
7907 	err = iwx_umac_scan_v14(sc, 1);
7908 	if (err) {
7909 		printf("%s: could not initiate scan\n", DEVNAME(sc));
7910 		return err;
7911 	}
7912 
7913 	sc->sc_flags |= IWX_FLAG_BGSCAN;
7914 	return 0;
7915 }
7916 
7917 void
iwx_bgscan_done(struct ieee80211com * ic,struct ieee80211_node_switch_bss_arg * arg,size_t arg_size)7918 iwx_bgscan_done(struct ieee80211com *ic,
7919     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
7920 {
7921 	struct iwx_softc *sc = ic->ic_softc;
7922 
7923 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7924 	sc->bgscan_unref_arg = arg;
7925 	sc->bgscan_unref_arg_size = arg_size;
7926 	iwx_add_task(sc, systq, &sc->bgscan_done_task);
7927 }
7928 
7929 void
iwx_bgscan_done_task(void * arg)7930 iwx_bgscan_done_task(void *arg)
7931 {
7932 	struct iwx_softc *sc = arg;
7933 	struct ieee80211com *ic = &sc->sc_ic;
7934 	struct iwx_node *in = (void *)ic->ic_bss;
7935 	struct ieee80211_node *ni = &in->in_ni;
7936 	int tid, err = 0, s = splnet();
7937 
7938 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
7939 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
7940 	    ic->ic_state != IEEE80211_S_RUN) {
7941 		err = ENXIO;
7942 		goto done;
7943 	}
7944 
7945 	err = iwx_flush_sta(sc, in);
7946 	if (err)
7947 		goto done;
7948 
7949 	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
7950 		int qid = IWX_FIRST_AGG_TX_QUEUE + tid;
7951 
7952 		if (sc->aggqid[tid] == 0)
7953 			continue;
7954 
7955 		err = iwx_disable_txq(sc, IWX_STATION_ID, qid, tid);
7956 		if (err)
7957 			goto done;
7958 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
7959 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
7960 		    IEEE80211_ACTION_DELBA,
7961 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
7962 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
7963 #endif
7964 		ieee80211_node_tx_ba_clear(ni, tid);
7965 		sc->aggqid[tid] = 0;
7966 	}
7967 
7968 	/*
7969 	 * Tx queues have been flushed and Tx agg has been stopped.
7970 	 * Allow roaming to proceed.
7971 	 */
7972 	ni->ni_unref_arg = sc->bgscan_unref_arg;
7973 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
7974 	sc->bgscan_unref_arg = NULL;
7975 	sc->bgscan_unref_arg_size = 0;
7976 	ieee80211_node_tx_stopped(ic, &in->in_ni);
7977 done:
7978 	if (err) {
7979 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7980 		sc->bgscan_unref_arg = NULL;
7981 		sc->bgscan_unref_arg_size = 0;
7982 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7983 			task_add(systq, &sc->init_task);
7984 	}
7985 	refcnt_rele_wake(&sc->task_refs);
7986 	splx(s);
7987 }
7988 
7989 int
iwx_umac_scan_abort(struct iwx_softc * sc)7990 iwx_umac_scan_abort(struct iwx_softc *sc)
7991 {
7992 	struct iwx_umac_scan_abort cmd = { 0 };
7993 
7994 	return iwx_send_cmd_pdu(sc,
7995 	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
7996 	    0, sizeof(cmd), &cmd);
7997 }
7998 
7999 int
iwx_scan_abort(struct iwx_softc * sc)8000 iwx_scan_abort(struct iwx_softc *sc)
8001 {
8002 	int err;
8003 
8004 	err = iwx_umac_scan_abort(sc);
8005 	if (err == 0)
8006 		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8007 	return err;
8008 }
8009 
8010 int
iwx_enable_mgmt_queue(struct iwx_softc * sc)8011 iwx_enable_mgmt_queue(struct iwx_softc *sc)
8012 {
8013 	int err;
8014 
8015 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
8016 
8017 	/*
8018 	 * Non-QoS frames use the "MGMT" TID and queue.
8019 	 * Other TIDs and data queues are reserved for QoS data frames.
8020 	 */
8021 	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
8022 	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
8023 	if (err) {
8024 		printf("%s: could not enable Tx queue %d (error %d)\n",
8025 		    DEVNAME(sc), sc->first_data_qid, err);
8026 		return err;
8027 	}
8028 
8029 	return 0;
8030 }
8031 
8032 int
iwx_disable_mgmt_queue(struct iwx_softc * sc)8033 iwx_disable_mgmt_queue(struct iwx_softc *sc)
8034 {
8035 	int err, cmd_ver;
8036 
8037 	/* Explicit removal is only required with old SCD_QUEUE_CFG command. */
8038 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8039 	    IWX_SCD_QUEUE_CONFIG_CMD);
8040 	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
8041 		return 0;
8042 
8043 	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
8044 
8045 	err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
8046 	    IWX_MGMT_TID);
8047 	if (err) {
8048 		printf("%s: could not disable Tx queue %d (error %d)\n",
8049 		    DEVNAME(sc), sc->first_data_qid, err);
8050 		return err;
8051 	}
8052 
8053 	return 0;
8054 }
8055 
8056 int
iwx_rs_rval2idx(uint8_t rval)8057 iwx_rs_rval2idx(uint8_t rval)
8058 {
8059 	/* Firmware expects indices which match our 11g rate set. */
8060 	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
8061 	int i;
8062 
8063 	for (i = 0; i < rs->rs_nrates; i++) {
8064 		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
8065 			return i;
8066 	}
8067 
8068 	return -1;
8069 }
8070 
8071 uint16_t
iwx_rs_ht_rates(struct iwx_softc * sc,struct ieee80211_node * ni,int rsidx)8072 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
8073 {
8074 	struct ieee80211com *ic = &sc->sc_ic;
8075 	const struct ieee80211_ht_rateset *rs;
8076 	uint16_t htrates = 0;
8077 	int mcs;
8078 
8079 	rs = &ieee80211_std_ratesets_11n[rsidx];
8080 	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
8081 		if (!isset(ni->ni_rxmcs, mcs) ||
8082 		    !isset(ic->ic_sup_mcs, mcs))
8083 			continue;
8084 		htrates |= (1 << (mcs - rs->min_mcs));
8085 	}
8086 
8087 	return htrates;
8088 }
8089 
8090 uint16_t
iwx_rs_vht_rates(struct iwx_softc * sc,struct ieee80211_node * ni,int num_ss)8091 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
8092 {
8093 	uint16_t rx_mcs;
8094 	int max_mcs = -1;
8095 
8096 	rx_mcs = (ni->ni_vht_rxmcs & IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
8097 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
8098 	switch (rx_mcs) {
8099 	case IEEE80211_VHT_MCS_SS_NOT_SUPP:
8100 		break;
8101 	case IEEE80211_VHT_MCS_0_7:
8102 		max_mcs = 7;
8103 		break;
8104 	case IEEE80211_VHT_MCS_0_8:
8105 		max_mcs = 8;
8106 		break;
8107 	case IEEE80211_VHT_MCS_0_9:
8108 		/* Disable VHT MCS 9 for 20MHz-only stations. */
8109 		if (!ieee80211_node_supports_ht_chan40(ni))
8110 			max_mcs = 8;
8111 		else
8112 			max_mcs = 9;
8113 		break;
8114 	default:
8115 		/* Should not happen; Values above cover the possible range. */
8116 		panic("invalid VHT Rx MCS value %u", rx_mcs);
8117 	}
8118 
8119 	return ((1 << (max_mcs + 1)) - 1);
8120 }
8121 
8122 int
iwx_rs_init_v3(struct iwx_softc * sc,struct iwx_node * in)8123 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
8124 {
8125 	struct ieee80211_node *ni = &in->in_ni;
8126 	struct ieee80211_rateset *rs = &ni->ni_rates;
8127 	struct iwx_tlc_config_cmd_v3 cfg_cmd;
8128 	uint32_t cmd_id;
8129 	int i;
8130 	size_t cmd_size = sizeof(cfg_cmd);
8131 
8132 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
8133 
8134 	for (i = 0; i < rs->rs_nrates; i++) {
8135 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
8136 		int idx = iwx_rs_rval2idx(rval);
8137 		if (idx == -1)
8138 			return EINVAL;
8139 		cfg_cmd.non_ht_rates |= (1 << idx);
8140 	}
8141 
8142 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
8143 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
8144 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
8145 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
8146 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
8147 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
8148 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8149 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
8150 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
8151 		    htole16(iwx_rs_ht_rates(sc, ni,
8152 		    IEEE80211_HT_RATESET_SISO));
8153 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
8154 		    htole16(iwx_rs_ht_rates(sc, ni,
8155 		    IEEE80211_HT_RATESET_MIMO2));
8156 	} else
8157 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
8158 
8159 	cfg_cmd.sta_id = IWX_STATION_ID;
8160 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
8161 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
8162 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
8163 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
8164 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
8165 	else
8166 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
8167 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
8168 	if (ni->ni_flags & IEEE80211_NODE_VHT)
8169 		cfg_cmd.max_mpdu_len = htole16(3895);
8170 	else
8171 		cfg_cmd.max_mpdu_len = htole16(3839);
8172 	if (ni->ni_flags & IEEE80211_NODE_HT) {
8173 		if (ieee80211_node_supports_ht_sgi20(ni)) {
8174 			cfg_cmd.sgi_ch_width_supp |= (1 <<
8175 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
8176 		}
8177 		if (ieee80211_node_supports_ht_sgi40(ni)) {
8178 			cfg_cmd.sgi_ch_width_supp |= (1 <<
8179 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
8180 		}
8181 	}
8182 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8183 	    ieee80211_node_supports_vht_sgi80(ni))
8184 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
8185 
8186 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
8187 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
8188 }
8189 
8190 int
iwx_rs_init_v4(struct iwx_softc * sc,struct iwx_node * in)8191 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
8192 {
8193 	struct ieee80211_node *ni = &in->in_ni;
8194 	struct ieee80211_rateset *rs = &ni->ni_rates;
8195 	struct iwx_tlc_config_cmd_v4 cfg_cmd;
8196 	uint32_t cmd_id;
8197 	int i;
8198 	size_t cmd_size = sizeof(cfg_cmd);
8199 
8200 	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
8201 
8202 	for (i = 0; i < rs->rs_nrates; i++) {
8203 		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
8204 		int idx = iwx_rs_rval2idx(rval);
8205 		if (idx == -1)
8206 			return EINVAL;
8207 		cfg_cmd.non_ht_rates |= (1 << idx);
8208 	}
8209 
8210 	if (ni->ni_flags & IEEE80211_NODE_VHT) {
8211 		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
8212 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
8213 		    htole16(iwx_rs_vht_rates(sc, ni, 1));
8214 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
8215 		    htole16(iwx_rs_vht_rates(sc, ni, 2));
8216 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8217 		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
8218 		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
8219 		    htole16(iwx_rs_ht_rates(sc, ni,
8220 		    IEEE80211_HT_RATESET_SISO));
8221 		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
8222 		    htole16(iwx_rs_ht_rates(sc, ni,
8223 		    IEEE80211_HT_RATESET_MIMO2));
8224 	} else
8225 		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
8226 
8227 	cfg_cmd.sta_id = IWX_STATION_ID;
8228 	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
8229 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
8230 	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
8231 	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
8232 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
8233 	else
8234 		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
8235 	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
8236 	if (ni->ni_flags & IEEE80211_NODE_VHT)
8237 		cfg_cmd.max_mpdu_len = htole16(3895);
8238 	else
8239 		cfg_cmd.max_mpdu_len = htole16(3839);
8240 	if (ni->ni_flags & IEEE80211_NODE_HT) {
8241 		if (ieee80211_node_supports_ht_sgi20(ni)) {
8242 			cfg_cmd.sgi_ch_width_supp |= (1 <<
8243 			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
8244 		}
8245 		if (ieee80211_node_supports_ht_sgi40(ni)) {
8246 			cfg_cmd.sgi_ch_width_supp |= (1 <<
8247 			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
8248 		}
8249 	}
8250 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8251 	    ieee80211_node_supports_vht_sgi80(ni))
8252 		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
8253 
8254 	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
8255 	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
8256 }
8257 
8258 int
iwx_rs_init(struct iwx_softc * sc,struct iwx_node * in)8259 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
8260 {
8261 	int cmd_ver;
8262 
8263 	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8264 	    IWX_TLC_MNG_CONFIG_CMD);
8265 	if (cmd_ver == 4)
8266 		return iwx_rs_init_v4(sc, in);
8267 	return iwx_rs_init_v3(sc, in);
8268 }
8269 
8270 void
iwx_rs_update(struct iwx_softc * sc,struct iwx_tlc_update_notif * notif)8271 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
8272 {
8273 	struct ieee80211com *ic = &sc->sc_ic;
8274 	struct ieee80211_node *ni = ic->ic_bss;
8275 	struct ieee80211_rateset *rs = &ni->ni_rates;
8276 	uint32_t rate_n_flags;
8277 	uint8_t plcp, rval;
8278 	int i, cmd_ver, rate_n_flags_ver2 = 0;
8279 
8280 	if (notif->sta_id != IWX_STATION_ID ||
8281 	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
8282 		return;
8283 
8284 	rate_n_flags = le32toh(notif->rate);
8285 
8286 	cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
8287 	    IWX_TLC_MNG_UPDATE_NOTIF);
8288 	if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
8289 		rate_n_flags_ver2 = 1;
8290 	if (rate_n_flags_ver2) {
8291 		uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
8292 		if (mod_type == IWX_RATE_MCS_VHT_MSK) {
8293 			ni->ni_txmcs = (rate_n_flags &
8294 			    IWX_RATE_HT_MCS_CODE_MSK);
8295 			ni->ni_vht_ss = ((rate_n_flags &
8296 			    IWX_RATE_MCS_NSS_MSK) >>
8297 			    IWX_RATE_MCS_NSS_POS) + 1;
8298 			return;
8299 		} else if (mod_type == IWX_RATE_MCS_HT_MSK) {
8300 			ni->ni_txmcs = IWX_RATE_HT_MCS_INDEX(rate_n_flags);
8301 			return;
8302 		}
8303 	} else {
8304 		if (rate_n_flags & IWX_RATE_MCS_VHT_MSK_V1) {
8305 			ni->ni_txmcs = (rate_n_flags &
8306 			    IWX_RATE_VHT_MCS_RATE_CODE_MSK);
8307 			ni->ni_vht_ss = ((rate_n_flags &
8308 			    IWX_RATE_VHT_MCS_NSS_MSK) >>
8309 			    IWX_RATE_VHT_MCS_NSS_POS) + 1;
8310 			return;
8311 		} else if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
8312 			ni->ni_txmcs = (rate_n_flags &
8313 			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
8314 			    IWX_RATE_HT_MCS_NSS_MSK_V1));
8315 			return;
8316 		}
8317 	}
8318 
8319 	if (rate_n_flags_ver2) {
8320 		const struct ieee80211_rateset *rs;
8321 		uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
8322 		if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
8323 			rs = &ieee80211_std_rateset_11a;
8324 		else
8325 			rs = &ieee80211_std_rateset_11b;
8326 		if (ridx < rs->rs_nrates)
8327 			rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
8328 		else
8329 			rval = 0;
8330 	} else {
8331 		plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
8332 
8333 		rval = 0;
8334 		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
8335 			if (iwx_rates[i].plcp == plcp) {
8336 				rval = iwx_rates[i].rate;
8337 				break;
8338 			}
8339 		}
8340 	}
8341 
8342 	if (rval) {
8343 		uint8_t rv;
8344 		for (i = 0; i < rs->rs_nrates; i++) {
8345 			rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
8346 			if (rv == rval) {
8347 				ni->ni_txrate = i;
8348 				break;
8349 			}
8350 		}
8351 	}
8352 }
8353 
8354 int
iwx_phy_send_rlc(struct iwx_softc * sc,struct iwx_phy_ctxt * phyctxt,uint8_t chains_static,uint8_t chains_dynamic)8355 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
8356     uint8_t chains_static, uint8_t chains_dynamic)
8357 {
8358 	struct iwx_rlc_config_cmd cmd;
8359 	uint32_t cmd_id;
8360 	uint8_t active_cnt, idle_cnt;
8361 
8362 	memset(&cmd, 0, sizeof(cmd));
8363 
8364 	idle_cnt = chains_static;
8365 	active_cnt = chains_dynamic;
8366 
8367 	cmd.phy_id = htole32(phyctxt->id);
8368 	cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
8369 	    IWX_PHY_RX_CHAIN_VALID_POS);
8370 	cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
8371 	cmd.rlc.rx_chain_info |= htole32(active_cnt <<
8372 	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
8373 
8374 	cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
8375 	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8376 }
8377 
8378 int
iwx_phy_ctxt_update(struct iwx_softc * sc,struct iwx_phy_ctxt * phyctxt,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)8379 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
8380     struct ieee80211_channel *chan, uint8_t chains_static,
8381     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8382     uint8_t vht_chan_width)
8383 {
8384 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8385 	int err;
8386 
8387 	if (isset(sc->sc_enabled_capa,
8388 	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8389 	    (phyctxt->channel->ic_flags & band_flags) !=
8390 	    (chan->ic_flags & band_flags)) {
8391 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8392 		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
8393 		    vht_chan_width);
8394 		if (err) {
8395 			printf("%s: could not remove PHY context "
8396 			    "(error %d)\n", DEVNAME(sc), err);
8397 			return err;
8398 		}
8399 		phyctxt->channel = chan;
8400 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8401 		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
8402 		    vht_chan_width);
8403 		if (err) {
8404 			printf("%s: could not add PHY context "
8405 			    "(error %d)\n", DEVNAME(sc), err);
8406 			return err;
8407 		}
8408 	} else {
8409 		phyctxt->channel = chan;
8410 		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8411 		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
8412 		    vht_chan_width);
8413 		if (err) {
8414 			printf("%s: could not update PHY context (error %d)\n",
8415 			    DEVNAME(sc), err);
8416 			return err;
8417 		}
8418 	}
8419 
8420 	phyctxt->sco = sco;
8421 	phyctxt->vht_chan_width = vht_chan_width;
8422 
8423 	if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8424 	    IWX_RLC_CONFIG_CMD) == 2)
8425 		return iwx_phy_send_rlc(sc, phyctxt,
8426 		    chains_static, chains_dynamic);
8427 
8428 	return 0;
8429 }
8430 
8431 int
iwx_auth(struct iwx_softc * sc)8432 iwx_auth(struct iwx_softc *sc)
8433 {
8434 	struct ieee80211com *ic = &sc->sc_ic;
8435 	struct iwx_node *in = (void *)ic->ic_bss;
8436 	uint32_t duration;
8437 	int generation = sc->sc_generation, err;
8438 
8439 	splassert(IPL_NET);
8440 
8441 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8442 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8443 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8444 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8445 		if (err)
8446 			return err;
8447 	} else {
8448 		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8449 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8450 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8451 		if (err)
8452 			return err;
8453 	}
8454 	in->in_phyctxt = &sc->sc_phyctxt[0];
8455 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8456 
8457 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
8458 	if (err) {
8459 		printf("%s: could not add MAC context (error %d)\n",
8460 		    DEVNAME(sc), err);
8461 		return err;
8462  	}
8463 	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
8464 
8465 	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
8466 	if (err) {
8467 		printf("%s: could not add binding (error %d)\n",
8468 		    DEVNAME(sc), err);
8469 		goto rm_mac_ctxt;
8470 	}
8471 	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
8472 
8473 	err = iwx_add_sta_cmd(sc, in, 0);
8474 	if (err) {
8475 		printf("%s: could not add sta (error %d)\n",
8476 		    DEVNAME(sc), err);
8477 		goto rm_binding;
8478 	}
8479 	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
8480 
8481 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8482 		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
8483 		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
8484 		    IWX_TX_RING_COUNT);
8485 		if (err)
8486 			goto rm_sta;
8487 		return 0;
8488 	}
8489 
8490 	err = iwx_enable_mgmt_queue(sc);
8491 	if (err)
8492 		goto rm_sta;
8493 
8494 	err = iwx_clear_statistics(sc);
8495 	if (err)
8496 		goto rm_mgmt_queue;
8497 
8498 	/*
8499 	 * Prevent the FW from wandering off channel during association
8500 	 * by "protecting" the session with a time event.
8501 	 */
8502 	if (in->in_ni.ni_intval)
8503 		duration = in->in_ni.ni_intval * 9;
8504 	else
8505 		duration = 900;
8506 	return iwx_schedule_session_protection(sc, in, duration);
8507 rm_mgmt_queue:
8508 	if (generation == sc->sc_generation)
8509 		iwx_disable_mgmt_queue(sc);
8510 rm_sta:
8511 	if (generation == sc->sc_generation) {
8512 		iwx_rm_sta_cmd(sc, in);
8513 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8514 	}
8515 rm_binding:
8516 	if (generation == sc->sc_generation) {
8517 		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
8518 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8519 	}
8520 rm_mac_ctxt:
8521 	if (generation == sc->sc_generation) {
8522 		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
8523 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8524 	}
8525 	return err;
8526 }
8527 
8528 int
iwx_deauth(struct iwx_softc * sc)8529 iwx_deauth(struct iwx_softc *sc)
8530 {
8531 	struct ieee80211com *ic = &sc->sc_ic;
8532 	struct iwx_node *in = (void *)ic->ic_bss;
8533 	int err;
8534 
8535 	splassert(IPL_NET);
8536 
8537 	iwx_unprotect_session(sc, in);
8538 
8539 	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
8540 		err = iwx_rm_sta(sc, in);
8541 		if (err)
8542 			return err;
8543 		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8544 	}
8545 
8546 	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
8547 		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
8548 		if (err) {
8549 			printf("%s: could not remove binding (error %d)\n",
8550 			    DEVNAME(sc), err);
8551 			return err;
8552 		}
8553 		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8554 	}
8555 
8556 	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
8557 		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
8558 		if (err) {
8559 			printf("%s: could not remove MAC context (error %d)\n",
8560 			    DEVNAME(sc), err);
8561 			return err;
8562 		}
8563 		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8564 	}
8565 
8566 	/* Move unused PHY context to a default channel. */
8567 	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8568 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8569 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8570 	if (err)
8571 		return err;
8572 
8573 	return 0;
8574 }
8575 
8576 int
iwx_run(struct iwx_softc * sc)8577 iwx_run(struct iwx_softc *sc)
8578 {
8579 	struct ieee80211com *ic = &sc->sc_ic;
8580 	struct iwx_node *in = (void *)ic->ic_bss;
8581 	struct ieee80211_node *ni = &in->in_ni;
8582 	int err;
8583 
8584 	splassert(IPL_NET);
8585 
8586 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8587 		/* Add a MAC context and a sniffing STA. */
8588 		err = iwx_auth(sc);
8589 		if (err)
8590 			return err;
8591 	}
8592 
8593 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8594 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8595 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
8596 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
8597 		    in->in_phyctxt->channel, chains, chains,
8598 		    0, IEEE80211_HTOP0_SCO_SCN,
8599 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8600 		if (err) {
8601 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8602 			return err;
8603 		}
8604 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8605 		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
8606 		uint8_t sco, vht_chan_width;
8607 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
8608 		    ieee80211_node_supports_ht_chan40(ni))
8609 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8610 		else
8611 			sco = IEEE80211_HTOP0_SCO_SCN;
8612 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8613 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
8614 		    ieee80211_node_supports_vht_chan80(ni))
8615 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
8616 		else
8617 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
8618 		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
8619 		    in->in_phyctxt->channel, chains, chains,
8620 		    0, sco, vht_chan_width);
8621 		if (err) {
8622 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8623 			return err;
8624 		}
8625 	}
8626 
8627 	/* Update STA again to apply HT and VHT settings. */
8628 	err = iwx_add_sta_cmd(sc, in, 1);
8629 	if (err) {
8630 		printf("%s: could not update STA (error %d)\n",
8631 		    DEVNAME(sc), err);
8632 		return err;
8633 	}
8634 
8635 	/* We have now been assigned an associd by the AP. */
8636 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
8637 	if (err) {
8638 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8639 		return err;
8640 	}
8641 
8642 	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
8643 	if (err) {
8644 		printf("%s: could not set sf full on (error %d)\n",
8645 		    DEVNAME(sc), err);
8646 		return err;
8647 	}
8648 
8649 	err = iwx_allow_mcast(sc);
8650 	if (err) {
8651 		printf("%s: could not allow mcast (error %d)\n",
8652 		    DEVNAME(sc), err);
8653 		return err;
8654 	}
8655 
8656 	err = iwx_power_update_device(sc);
8657 	if (err) {
8658 		printf("%s: could not send power command (error %d)\n",
8659 		    DEVNAME(sc), err);
8660 		return err;
8661 	}
8662 #ifdef notyet
8663 	/*
8664 	 * Disabled for now. Default beacon filter settings
8665 	 * prevent net80211 from getting ERP and HT protection
8666 	 * updates from beacons.
8667 	 */
8668 	err = iwx_enable_beacon_filter(sc, in);
8669 	if (err) {
8670 		printf("%s: could not enable beacon filter\n",
8671 		    DEVNAME(sc));
8672 		return err;
8673 	}
8674 #endif
8675 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8676 		return 0;
8677 
8678 	err = iwx_power_mac_update_mode(sc, in);
8679 	if (err) {
8680 		printf("%s: could not update MAC power (error %d)\n",
8681 		    DEVNAME(sc), err);
8682 		return err;
8683 	}
8684 
8685 	/* Start at lowest available bit-rate. Firmware will raise. */
8686 	in->in_ni.ni_txrate = 0;
8687 	in->in_ni.ni_txmcs = 0;
8688 
8689 	err = iwx_rs_init(sc, in);
8690 	if (err) {
8691 		printf("%s: could not init rate scaling (error %d)\n",
8692 		    DEVNAME(sc), err);
8693 		return err;
8694 	}
8695 
8696 	return 0;
8697 }
8698 
8699 int
iwx_run_stop(struct iwx_softc * sc)8700 iwx_run_stop(struct iwx_softc *sc)
8701 {
8702 	struct ieee80211com *ic = &sc->sc_ic;
8703 	struct iwx_node *in = (void *)ic->ic_bss;
8704 	struct ieee80211_node *ni = &in->in_ni;
8705 	int err, i;
8706 
8707 	splassert(IPL_NET);
8708 
8709 	err = iwx_flush_sta(sc, in);
8710 	if (err) {
8711 		printf("%s: could not flush Tx path (error %d)\n",
8712 		    DEVNAME(sc), err);
8713 		return err;
8714 	}
8715 
8716 	/*
8717 	 * Stop Rx BA sessions now. We cannot rely on the BA task
8718 	 * for this when moving out of RUN state since it runs in a
8719 	 * separate thread.
8720 	 * Note that in->in_ni (struct ieee80211_node) already represents
8721 	 * our new access point in case we are roaming between APs.
8722 	 * This means we cannot rely on struct ieee802111_node to tell
8723 	 * us which BA sessions exist.
8724 	 */
8725 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8726 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8727 		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
8728 			continue;
8729 		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
8730 	}
8731 
8732 	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
8733 	if (err)
8734 		return err;
8735 
8736 	err = iwx_disable_beacon_filter(sc);
8737 	if (err) {
8738 		printf("%s: could not disable beacon filter (error %d)\n",
8739 		    DEVNAME(sc), err);
8740 		return err;
8741 	}
8742 
8743 	/* Mark station as disassociated. */
8744 	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
8745 	if (err) {
8746 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8747 		return err;
8748 	}
8749 
8750 	return 0;
8751 }
8752 
8753 struct ieee80211_node *
iwx_node_alloc(struct ieee80211com * ic)8754 iwx_node_alloc(struct ieee80211com *ic)
8755 {
8756 	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
8757 }
8758 
8759 int
iwx_set_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)8760 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8761     struct ieee80211_key *k)
8762 {
8763 	struct iwx_softc *sc = ic->ic_softc;
8764 	struct iwx_node *in = (void *)ni;
8765 	struct iwx_setkey_task_arg *a;
8766 	int err;
8767 
8768 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8769 		/* Fallback to software crypto for other ciphers. */
8770 		err = ieee80211_set_key(ic, ni, k);
8771 		if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
8772 			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8773 		return err;
8774 	}
8775 
8776 	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
8777 		return ENOSPC;
8778 
8779 	a = &sc->setkey_arg[sc->setkey_cur];
8780 	a->sta_id = IWX_STATION_ID;
8781 	a->ni = ni;
8782 	a->k = k;
8783 	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
8784 	sc->setkey_nkeys++;
8785 	iwx_add_task(sc, systq, &sc->setkey_task);
8786 	return EBUSY;
8787 }
8788 
8789 int
iwx_mld_add_sta_key_cmd(struct iwx_softc * sc,int sta_id,struct ieee80211_node * ni,struct ieee80211_key * k)8790 iwx_mld_add_sta_key_cmd(struct iwx_softc *sc, int sta_id,
8791     struct ieee80211_node *ni, struct ieee80211_key *k)
8792 {
8793 	struct ieee80211com *ic = &sc->sc_ic;
8794 	struct iwx_sec_key_cmd cmd;
8795 	uint32_t flags = IWX_SEC_KEY_FLAG_CIPHER_CCMP;
8796 	int err;
8797 
8798 	if (k->k_flags & IEEE80211_KEY_GROUP)
8799 		flags |= IWX_SEC_KEY_FLAG_MCAST_KEY;
8800 
8801 	memset(&cmd, 0, sizeof(cmd));
8802 	cmd.u.add.sta_mask = htole32(1 << sta_id);
8803 	cmd.u.add.key_id = htole32(k->k_id);
8804 	cmd.u.add.key_flags = htole32(flags);
8805 	cmd.u.add.tx_seq = htole64(k->k_tsc);
8806 	memcpy(cmd.u.add.key, k->k_key, k->k_len);
8807 	cmd.action = IWX_FW_CTXT_ACTION_ADD;
8808 
8809 	err = iwx_send_cmd_pdu(sc,
8810 	    IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_SEC_KEY_CMD),
8811 	    0, sizeof(cmd), &cmd);
8812 	if (err) {
8813 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
8814 		    IEEE80211_REASON_AUTH_LEAVE);
8815 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
8816 		return err;
8817 	}
8818 
8819 	return 0;
8820 }
8821 
8822 int
iwx_add_sta_key_cmd(struct iwx_softc * sc,int sta_id,struct ieee80211_node * ni,struct ieee80211_key * k)8823 iwx_add_sta_key_cmd(struct iwx_softc *sc, int sta_id,
8824     struct ieee80211_node *ni, struct ieee80211_key *k)
8825 {
8826 	struct ieee80211com *ic = &sc->sc_ic;
8827 	struct iwx_add_sta_key_cmd cmd;
8828 	uint32_t status;
8829 	int err;
8830 
8831 	memset(&cmd, 0, sizeof(cmd));
8832 
8833 	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
8834 	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
8835 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8836 	    IWX_STA_KEY_FLG_KEYID_MSK));
8837 	if (k->k_flags & IEEE80211_KEY_GROUP) {
8838 		cmd.common.key_offset = 1;
8839 		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
8840 	} else
8841 		cmd.common.key_offset = 0;
8842 
8843 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8844 	cmd.common.sta_id = sta_id;
8845 
8846 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
8847 
8848 	status = IWX_ADD_STA_SUCCESS;
8849 	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
8850 	    &status);
8851 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
8852 		return ECANCELED;
8853 	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
8854 		err = EIO;
8855 	if (err) {
8856 		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
8857 		    IEEE80211_REASON_AUTH_LEAVE);
8858 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
8859 		return err;
8860 	}
8861 
8862 	return 0;
8863 }
8864 
8865 int
iwx_add_sta_key(struct iwx_softc * sc,int sta_id,struct ieee80211_node * ni,struct ieee80211_key * k)8866 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
8867     struct ieee80211_key *k)
8868 {
8869 	struct ieee80211com *ic = &sc->sc_ic;
8870 	struct iwx_node *in = (void *)ni;
8871 	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
8872 	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
8873 	uint8_t sec_key_ver;
8874 	int err;
8875 
8876 	/*
8877 	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
8878 	 * Currently we only implement station mode where 'ni' is always
8879 	 * ic->ic_bss so there is no need to validate arguments beyond this:
8880 	 */
8881 	KASSERT(ni == ic->ic_bss);
8882 
8883 	sec_key_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8884 	    IWX_SEC_KEY_CMD);
8885 	if (sec_key_ver != 0 && sec_key_ver != IWX_FW_CMD_VER_UNKNOWN)
8886 		err = iwx_mld_add_sta_key_cmd(sc, sta_id, ni, k);
8887 	else
8888 		err = iwx_add_sta_key_cmd(sc, sta_id, ni, k);
8889 	if (err)
8890 		return err;
8891 
8892 	if (k->k_flags & IEEE80211_KEY_GROUP)
8893 		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8894 	else
8895 		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
8896 
8897 	if ((in->in_flags & want_keymask) == want_keymask) {
8898 		DPRINTF(("marking port %s valid\n",
8899 		    ether_sprintf(ni->ni_macaddr)));
8900 		ni->ni_port_valid = 1;
8901 		ieee80211_set_link_state(ic, LINK_STATE_UP);
8902 	}
8903 
8904 	return 0;
8905 }
8906 
8907 void
iwx_setkey_task(void * arg)8908 iwx_setkey_task(void *arg)
8909 {
8910 	struct iwx_softc *sc = arg;
8911 	struct iwx_setkey_task_arg *a;
8912 	int err = 0, s = splnet();
8913 
8914 	while (sc->setkey_nkeys > 0) {
8915 		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
8916 			break;
8917 		a = &sc->setkey_arg[sc->setkey_tail];
8918 		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
8919 		a->sta_id = 0;
8920 		a->ni = NULL;
8921 		a->k = NULL;
8922 		sc->setkey_tail = (sc->setkey_tail + 1) %
8923 		    nitems(sc->setkey_arg);
8924 		sc->setkey_nkeys--;
8925 	}
8926 
8927 	refcnt_rele_wake(&sc->task_refs);
8928 	splx(s);
8929 }
8930 
8931 void
iwx_delete_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)8932 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8933     struct ieee80211_key *k)
8934 {
8935 	struct iwx_softc *sc = ic->ic_softc;
8936 	struct iwx_add_sta_key_cmd cmd;
8937 
8938 	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8939 		/* Fallback to software crypto for other ciphers. */
8940                 ieee80211_delete_key(ic, ni, k);
8941 		return;
8942 	}
8943 
8944 	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
8945 		return;
8946 
8947 	memset(&cmd, 0, sizeof(cmd));
8948 
8949 	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
8950 	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
8951 	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8952 	    IWX_STA_KEY_FLG_KEYID_MSK));
8953 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8954 	if (k->k_flags & IEEE80211_KEY_GROUP)
8955 		cmd.common.key_offset = 1;
8956 	else
8957 		cmd.common.key_offset = 0;
8958 	cmd.common.sta_id = IWX_STATION_ID;
8959 
8960 	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
8961 }
8962 
8963 int
iwx_media_change(struct ifnet * ifp)8964 iwx_media_change(struct ifnet *ifp)
8965 {
8966 	int err;
8967 
8968 	err = ieee80211_media_change(ifp);
8969 	if (err != ENETRESET)
8970 		return err;
8971 
8972 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8973 	    (IFF_UP | IFF_RUNNING)) {
8974 		iwx_stop(ifp);
8975 		err = iwx_init(ifp);
8976 	}
8977 	return err;
8978 }
8979 
8980 void
iwx_newstate_task(void * psc)8981 iwx_newstate_task(void *psc)
8982 {
8983 	struct iwx_softc *sc = (struct iwx_softc *)psc;
8984 	struct ieee80211com *ic = &sc->sc_ic;
8985 	enum ieee80211_state nstate = sc->ns_nstate;
8986 	enum ieee80211_state ostate = ic->ic_state;
8987 	int arg = sc->ns_arg;
8988 	int err = 0, s = splnet();
8989 
8990 	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8991 		/* iwx_stop() is waiting for us. */
8992 		refcnt_rele_wake(&sc->task_refs);
8993 		splx(s);
8994 		return;
8995 	}
8996 
8997 	if (ostate == IEEE80211_S_SCAN) {
8998 		if (nstate == ostate) {
8999 			if (sc->sc_flags & IWX_FLAG_SCANNING) {
9000 				refcnt_rele_wake(&sc->task_refs);
9001 				splx(s);
9002 				return;
9003 			}
9004 			/* Firmware is no longer scanning. Do another scan. */
9005 			goto next_scan;
9006 		}
9007 	}
9008 
9009 	if (nstate <= ostate) {
9010 		switch (ostate) {
9011 		case IEEE80211_S_RUN:
9012 			err = iwx_run_stop(sc);
9013 			if (err)
9014 				goto out;
9015 			/* FALLTHROUGH */
9016 		case IEEE80211_S_ASSOC:
9017 		case IEEE80211_S_AUTH:
9018 			if (nstate <= IEEE80211_S_AUTH) {
9019 				err = iwx_deauth(sc);
9020 				if (err)
9021 					goto out;
9022 			}
9023 			/* FALLTHROUGH */
9024 		case IEEE80211_S_SCAN:
9025 		case IEEE80211_S_INIT:
9026 			break;
9027 		}
9028 
9029 		/* Die now if iwx_stop() was called while we were sleeping. */
9030 		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
9031 			refcnt_rele_wake(&sc->task_refs);
9032 			splx(s);
9033 			return;
9034 		}
9035 	}
9036 
9037 	switch (nstate) {
9038 	case IEEE80211_S_INIT:
9039 		break;
9040 
9041 	case IEEE80211_S_SCAN:
9042 next_scan:
9043 		err = iwx_scan(sc);
9044 		if (err)
9045 			break;
9046 		refcnt_rele_wake(&sc->task_refs);
9047 		splx(s);
9048 		return;
9049 
9050 	case IEEE80211_S_AUTH:
9051 		err = iwx_auth(sc);
9052 		break;
9053 
9054 	case IEEE80211_S_ASSOC:
9055 		break;
9056 
9057 	case IEEE80211_S_RUN:
9058 		err = iwx_run(sc);
9059 		break;
9060 	}
9061 
9062 out:
9063 	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
9064 		if (err)
9065 			task_add(systq, &sc->init_task);
9066 		else
9067 			sc->sc_newstate(ic, nstate, arg);
9068 	}
9069 	refcnt_rele_wake(&sc->task_refs);
9070 	splx(s);
9071 }
9072 
9073 int
iwx_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)9074 iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
9075 {
9076 	struct ifnet *ifp = IC2IFP(ic);
9077 	struct iwx_softc *sc = ifp->if_softc;
9078 
9079 	/*
9080 	 * Prevent attempts to transition towards the same state, unless
9081 	 * we are scanning in which case a SCAN -> SCAN transition
9082 	 * triggers another scan iteration. And AUTH -> AUTH is needed
9083 	 * to support band-steering.
9084 	 */
9085 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9086 	    nstate != IEEE80211_S_AUTH)
9087 		return 0;
9088 
9089 	if (ic->ic_state == IEEE80211_S_RUN) {
9090 		iwx_del_task(sc, systq, &sc->ba_task);
9091 		iwx_del_task(sc, systq, &sc->setkey_task);
9092 		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
9093 		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
9094 		iwx_del_task(sc, systq, &sc->mac_ctxt_task);
9095 		iwx_del_task(sc, systq, &sc->phy_ctxt_task);
9096 		iwx_del_task(sc, systq, &sc->bgscan_done_task);
9097 	}
9098 
9099 	sc->ns_nstate = nstate;
9100 	sc->ns_arg = arg;
9101 
9102 	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9103 
9104 	return 0;
9105 }
9106 
9107 void
iwx_endscan(struct iwx_softc * sc)9108 iwx_endscan(struct iwx_softc *sc)
9109 {
9110 	struct ieee80211com *ic = &sc->sc_ic;
9111 
9112 	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
9113 		return;
9114 
9115 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
9116 	ieee80211_end_scan(&ic->ic_if);
9117 }
9118 
9119 /*
9120  * Aging and idle timeouts for the different possible scenarios
9121  * in default configuration
9122  */
9123 static const uint32_t
9124 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
9125 	{
9126 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
9127 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
9128 	},
9129 	{
9130 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
9131 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
9132 	},
9133 	{
9134 		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
9135 		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
9136 	},
9137 	{
9138 		htole32(IWX_SF_BA_AGING_TIMER_DEF),
9139 		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
9140 	},
9141 	{
9142 		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
9143 		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
9144 	},
9145 };
9146 
9147 /*
9148  * Aging and idle timeouts for the different possible scenarios
9149  * in single BSS MAC configuration.
9150  */
9151 static const uint32_t
9152 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
9153 	{
9154 		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
9155 		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
9156 	},
9157 	{
9158 		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
9159 		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
9160 	},
9161 	{
9162 		htole32(IWX_SF_MCAST_AGING_TIMER),
9163 		htole32(IWX_SF_MCAST_IDLE_TIMER)
9164 	},
9165 	{
9166 		htole32(IWX_SF_BA_AGING_TIMER),
9167 		htole32(IWX_SF_BA_IDLE_TIMER)
9168 	},
9169 	{
9170 		htole32(IWX_SF_TX_RE_AGING_TIMER),
9171 		htole32(IWX_SF_TX_RE_IDLE_TIMER)
9172 	},
9173 };
9174 
9175 void
iwx_fill_sf_command(struct iwx_softc * sc,struct iwx_sf_cfg_cmd * sf_cmd,struct ieee80211_node * ni)9176 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
9177     struct ieee80211_node *ni)
9178 {
9179 	int i, j, watermark;
9180 
9181 	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
9182 
9183 	/*
9184 	 * If we are in association flow - check antenna configuration
9185 	 * capabilities of the AP station, and choose the watermark accordingly.
9186 	 */
9187 	if (ni) {
9188 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9189 			if (ni->ni_rxmcs[1] != 0)
9190 				watermark = IWX_SF_W_MARK_MIMO2;
9191 			else
9192 				watermark = IWX_SF_W_MARK_SISO;
9193 		} else {
9194 			watermark = IWX_SF_W_MARK_LEGACY;
9195 		}
9196 	/* default watermark value for unassociated mode. */
9197 	} else {
9198 		watermark = IWX_SF_W_MARK_MIMO2;
9199 	}
9200 	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
9201 
9202 	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
9203 		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
9204 			sf_cmd->long_delay_timeouts[i][j] =
9205 					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
9206 		}
9207 	}
9208 
9209 	if (ni) {
9210 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
9211 		       sizeof(iwx_sf_full_timeout));
9212 	} else {
9213 		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
9214 		       sizeof(iwx_sf_full_timeout_def));
9215 	}
9216 
9217 }
9218 
9219 int
iwx_sf_config(struct iwx_softc * sc,int new_state)9220 iwx_sf_config(struct iwx_softc *sc, int new_state)
9221 {
9222 	struct ieee80211com *ic = &sc->sc_ic;
9223 	struct iwx_sf_cfg_cmd sf_cmd = {
9224 		.state = htole32(new_state),
9225 	};
9226 	int err = 0;
9227 
9228 	switch (new_state) {
9229 	case IWX_SF_UNINIT:
9230 	case IWX_SF_INIT_OFF:
9231 		iwx_fill_sf_command(sc, &sf_cmd, NULL);
9232 		break;
9233 	case IWX_SF_FULL_ON:
9234 		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9235 		break;
9236 	default:
9237 		return EINVAL;
9238 	}
9239 
9240 	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
9241 				   sizeof(sf_cmd), &sf_cmd);
9242 	return err;
9243 }
9244 
9245 int
iwx_send_bt_init_conf(struct iwx_softc * sc)9246 iwx_send_bt_init_conf(struct iwx_softc *sc)
9247 {
9248 	struct iwx_bt_coex_cmd bt_cmd;
9249 
9250 	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
9251 	bt_cmd.enabled_modules = 0;
9252 
9253 	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
9254 	    &bt_cmd);
9255 }
9256 
9257 int
iwx_send_soc_conf(struct iwx_softc * sc)9258 iwx_send_soc_conf(struct iwx_softc *sc)
9259 {
9260 	struct iwx_soc_configuration_cmd cmd;
9261 	int err;
9262 	uint32_t cmd_id, flags = 0;
9263 
9264 	memset(&cmd, 0, sizeof(cmd));
9265 
9266 	/*
9267 	 * In VER_1 of this command, the discrete value is considered
9268 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
9269 	 * values in VER_1, this is backwards-compatible with VER_2,
9270 	 * as long as we don't set any other flag bits.
9271 	 */
9272 	if (!sc->sc_integrated) { /* VER_1 */
9273 		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
9274 	} else { /* VER_2 */
9275 		uint8_t scan_cmd_ver;
9276 		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
9277 			flags |= (sc->sc_ltr_delay &
9278 			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
9279 		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
9280 		    IWX_SCAN_REQ_UMAC);
9281 		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
9282 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9283 			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
9284 	}
9285 	cmd.flags = htole32(flags);
9286 
9287 	cmd.latency = htole32(sc->sc_xtal_latency);
9288 
9289 	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
9290 	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9291 	if (err)
9292 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
9293 	return err;
9294 }
9295 
9296 int
iwx_send_update_mcc_cmd(struct iwx_softc * sc,const char * alpha2)9297 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
9298 {
9299 	struct iwx_mcc_update_cmd mcc_cmd;
9300 	struct iwx_host_cmd hcmd = {
9301 		.id = IWX_MCC_UPDATE_CMD,
9302 		.flags = IWX_CMD_WANT_RESP,
9303 		.data = { &mcc_cmd },
9304 	};
9305 	struct iwx_rx_packet *pkt;
9306 	struct iwx_mcc_update_resp *resp;
9307 	size_t resp_len;
9308 	int err;
9309 
9310 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
9311 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
9312 	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
9313 	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
9314 		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
9315 	else
9316 		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
9317 
9318 	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
9319 	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
9320 
9321 	err = iwx_send_cmd(sc, &hcmd);
9322 	if (err)
9323 		return err;
9324 
9325 	pkt = hcmd.resp_pkt;
9326 	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
9327 		err = EIO;
9328 		goto out;
9329 	}
9330 
9331 	resp_len = iwx_rx_packet_payload_len(pkt);
9332 	if (resp_len < sizeof(*resp)) {
9333 		err = EIO;
9334 		goto out;
9335 	}
9336 
9337 	resp = (void *)pkt->data;
9338 	if (resp_len != sizeof(*resp) +
9339 	    resp->n_channels * sizeof(resp->channels[0])) {
9340 		err = EIO;
9341 		goto out;
9342 	}
9343 
9344 	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
9345 	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
9346 
9347 	/* Update channel map for net80211 and our scan configuration. */
9348 	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
9349 
9350 out:
9351 	iwx_free_resp(sc, &hcmd);
9352 
9353 	return err;
9354 }
9355 
9356 int
iwx_send_temp_report_ths_cmd(struct iwx_softc * sc)9357 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
9358 {
9359 	struct iwx_temp_report_ths_cmd cmd;
9360 	int err;
9361 
9362 	/*
9363 	 * In order to give responsibility for critical-temperature-kill
9364 	 * and TX backoff to FW we need to send an empty temperature
9365 	 * reporting command at init time.
9366 	 */
9367 	memset(&cmd, 0, sizeof(cmd));
9368 
9369 	err = iwx_send_cmd_pdu(sc,
9370 	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
9371 	    0, sizeof(cmd), &cmd);
9372 	if (err)
9373 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9374 		    DEVNAME(sc), err);
9375 
9376 	return err;
9377 }
9378 
9379 int
iwx_init_hw(struct iwx_softc * sc)9380 iwx_init_hw(struct iwx_softc *sc)
9381 {
9382 	struct ieee80211com *ic = &sc->sc_ic;
9383 	int err, i;
9384 
9385 	err = iwx_run_init_mvm_ucode(sc, 0);
9386 	if (err)
9387 		return err;
9388 
9389 	if (!iwx_nic_lock(sc))
9390 		return EBUSY;
9391 
9392 	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
9393 	if (err) {
9394 		printf("%s: could not init tx ant config (error %d)\n",
9395 		    DEVNAME(sc), err);
9396 		goto err;
9397 	}
9398 
9399 	if (sc->sc_tx_with_siso_diversity) {
9400 		err = iwx_send_phy_cfg_cmd(sc);
9401 		if (err) {
9402 			printf("%s: could not send phy config (error %d)\n",
9403 			    DEVNAME(sc), err);
9404 			goto err;
9405 		}
9406 	}
9407 
9408 	err = iwx_send_bt_init_conf(sc);
9409 	if (err) {
9410 		printf("%s: could not init bt coex (error %d)\n",
9411 		    DEVNAME(sc), err);
9412 		return err;
9413 	}
9414 
9415 	err = iwx_send_soc_conf(sc);
9416 	if (err)
9417 		return err;
9418 
9419 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
9420 		err = iwx_send_dqa_cmd(sc);
9421 		if (err)
9422 			return err;
9423 	}
9424 
9425 	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
9426 		/*
9427 		 * The channel used here isn't relevant as it's
9428 		 * going to be overwritten in the other flows.
9429 		 * For now use the first channel we have.
9430 		 */
9431 		sc->sc_phyctxt[i].id = i;
9432 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
9433 		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
9434 		    IWX_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
9435 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
9436 		if (err) {
9437 			printf("%s: could not add phy context %d (error %d)\n",
9438 			    DEVNAME(sc), i, err);
9439 			goto err;
9440 		}
9441 		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
9442 		    IWX_RLC_CONFIG_CMD) == 2) {
9443 			err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
9444 			if (err) {
9445 				printf("%s: could not configure RLC for PHY "
9446 				    "%d (error %d)\n", DEVNAME(sc), i, err);
9447 				goto err;
9448 			}
9449 		}
9450 	}
9451 
9452 	err = iwx_config_ltr(sc);
9453 	if (err) {
9454 		printf("%s: PCIe LTR configuration failed (error %d)\n",
9455 		    DEVNAME(sc), err);
9456 	}
9457 
9458 	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
9459 		err = iwx_send_temp_report_ths_cmd(sc);
9460 		if (err)
9461 			goto err;
9462 	}
9463 
9464 	err = iwx_power_update_device(sc);
9465 	if (err) {
9466 		printf("%s: could not send power command (error %d)\n",
9467 		    DEVNAME(sc), err);
9468 		goto err;
9469 	}
9470 
9471 	if (sc->sc_nvm.lar_enabled) {
9472 		err = iwx_send_update_mcc_cmd(sc, "ZZ");
9473 		if (err) {
9474 			printf("%s: could not init LAR (error %d)\n",
9475 			    DEVNAME(sc), err);
9476 			goto err;
9477 		}
9478 	}
9479 
9480 	err = iwx_config_umac_scan_reduced(sc);
9481 	if (err) {
9482 		printf("%s: could not configure scan (error %d)\n",
9483 		    DEVNAME(sc), err);
9484 		goto err;
9485 	}
9486 
9487 	err = iwx_disable_beacon_filter(sc);
9488 	if (err) {
9489 		printf("%s: could not disable beacon filter (error %d)\n",
9490 		    DEVNAME(sc), err);
9491 		goto err;
9492 	}
9493 
9494 err:
9495 	iwx_nic_unlock(sc);
9496 	return err;
9497 }
9498 
9499 /* Allow multicast from our BSSID. */
9500 int
iwx_allow_mcast(struct iwx_softc * sc)9501 iwx_allow_mcast(struct iwx_softc *sc)
9502 {
9503 	struct ieee80211com *ic = &sc->sc_ic;
9504 	struct iwx_node *in = (void *)ic->ic_bss;
9505 	struct iwx_mcast_filter_cmd *cmd;
9506 	size_t size;
9507 	int err;
9508 
9509 	size = roundup(sizeof(*cmd), 4);
9510 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
9511 	if (cmd == NULL)
9512 		return ENOMEM;
9513 	cmd->filter_own = 1;
9514 	cmd->port_id = 0;
9515 	cmd->count = 0;
9516 	cmd->pass_all = 1;
9517 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
9518 
9519 	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
9520 	    0, size, cmd);
9521 	free(cmd, M_DEVBUF, size);
9522 	return err;
9523 }
9524 
9525 int
iwx_init(struct ifnet * ifp)9526 iwx_init(struct ifnet *ifp)
9527 {
9528 	struct iwx_softc *sc = ifp->if_softc;
9529 	struct ieee80211com *ic = &sc->sc_ic;
9530 	int err, generation;
9531 
9532 	rw_assert_wrlock(&sc->ioctl_rwl);
9533 
9534 	generation = ++sc->sc_generation;
9535 
9536 	err = iwx_preinit(sc);
9537 	if (err)
9538 		return err;
9539 
9540 	err = iwx_start_hw(sc);
9541 	if (err) {
9542 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9543 		return err;
9544 	}
9545 
9546 	err = iwx_init_hw(sc);
9547 	if (err) {
9548 		if (generation == sc->sc_generation)
9549 			iwx_stop_device(sc);
9550 		return err;
9551 	}
9552 
9553 	if (sc->sc_nvm.sku_cap_11n_enable)
9554 		iwx_setup_ht_rates(sc);
9555 	if (sc->sc_nvm.sku_cap_11ac_enable)
9556 		iwx_setup_vht_rates(sc);
9557 
9558 	KASSERT(sc->task_refs.r_refs == 0);
9559 	refcnt_init(&sc->task_refs);
9560 	ifq_clr_oactive(&ifp->if_snd);
9561 	ifp->if_flags |= IFF_RUNNING;
9562 
9563 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9564 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
9565 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
9566 		return 0;
9567 	}
9568 
9569 	ieee80211_begin_scan(ifp);
9570 
9571 	/*
9572 	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
9573 	 * Wait until the transition to SCAN state has completed.
9574 	 */
9575 	do {
9576 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
9577 		    SEC_TO_NSEC(1));
9578 		if (generation != sc->sc_generation)
9579 			return ENXIO;
9580 		if (err) {
9581 			iwx_stop(ifp);
9582 			return err;
9583 		}
9584 	} while (ic->ic_state != IEEE80211_S_SCAN);
9585 
9586 	return 0;
9587 }
9588 
9589 void
iwx_start(struct ifnet * ifp)9590 iwx_start(struct ifnet *ifp)
9591 {
9592 	struct iwx_softc *sc = ifp->if_softc;
9593 	struct ieee80211com *ic = &sc->sc_ic;
9594 	struct ieee80211_node *ni;
9595 	struct ether_header *eh;
9596 	struct mbuf *m;
9597 
9598 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
9599 		return;
9600 
9601 	for (;;) {
9602 		/* why isn't this done per-queue? */
9603 		if (sc->qfullmsk != 0) {
9604 			ifq_set_oactive(&ifp->if_snd);
9605 			break;
9606 		}
9607 
9608 		/* Don't queue additional frames while flushing Tx queues. */
9609 		if (sc->sc_flags & IWX_FLAG_TXFLUSH)
9610 			break;
9611 
9612 		/* need to send management frames even if we're not RUNning */
9613 		m = mq_dequeue(&ic->ic_mgtq);
9614 		if (m) {
9615 			ni = m->m_pkthdr.ph_cookie;
9616 			goto sendit;
9617 		}
9618 
9619 		if (ic->ic_state != IEEE80211_S_RUN ||
9620 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
9621 			break;
9622 
9623 		m = ifq_dequeue(&ifp->if_snd);
9624 		if (!m)
9625 			break;
9626 		if (m->m_len < sizeof (*eh) &&
9627 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
9628 			ifp->if_oerrors++;
9629 			continue;
9630 		}
9631 #if NBPFILTER > 0
9632 		if (ifp->if_bpf != NULL)
9633 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
9634 #endif
9635 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
9636 			ifp->if_oerrors++;
9637 			continue;
9638 		}
9639 
9640  sendit:
9641 #if NBPFILTER > 0
9642 		if (ic->ic_rawbpf != NULL)
9643 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
9644 #endif
9645 		if (iwx_tx(sc, m, ni) != 0) {
9646 			ieee80211_release_node(ic, ni);
9647 			ifp->if_oerrors++;
9648 			continue;
9649 		}
9650 
9651 		if (ifp->if_flags & IFF_UP)
9652 			ifp->if_timer = 1;
9653 	}
9654 
9655 	return;
9656 }
9657 
9658 void
iwx_stop(struct ifnet * ifp)9659 iwx_stop(struct ifnet *ifp)
9660 {
9661 	struct iwx_softc *sc = ifp->if_softc;
9662 	struct ieee80211com *ic = &sc->sc_ic;
9663 	struct iwx_node *in = (void *)ic->ic_bss;
9664 	int i, s = splnet();
9665 
9666 	rw_assert_wrlock(&sc->ioctl_rwl);
9667 
9668 	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
9669 
9670 	/* Cancel scheduled tasks and let any stale tasks finish up. */
9671 	task_del(systq, &sc->init_task);
9672 	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
9673 	iwx_del_task(sc, systq, &sc->ba_task);
9674 	iwx_del_task(sc, systq, &sc->setkey_task);
9675 	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
9676 	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
9677 	iwx_del_task(sc, systq, &sc->mac_ctxt_task);
9678 	iwx_del_task(sc, systq, &sc->phy_ctxt_task);
9679 	iwx_del_task(sc, systq, &sc->bgscan_done_task);
9680 	KASSERT(sc->task_refs.r_refs >= 1);
9681 	refcnt_finalize(&sc->task_refs, "iwxstop");
9682 
9683 	iwx_stop_device(sc);
9684 
9685 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
9686 	sc->bgscan_unref_arg = NULL;
9687 	sc->bgscan_unref_arg_size = 0;
9688 
9689 	/* Reset soft state. */
9690 
9691 	sc->sc_generation++;
9692 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
9693 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
9694 		sc->sc_cmd_resp_pkt[i] = NULL;
9695 		sc->sc_cmd_resp_len[i] = 0;
9696 	}
9697 	ifp->if_flags &= ~IFF_RUNNING;
9698 	ifq_clr_oactive(&ifp->if_snd);
9699 
9700 	in->in_phyctxt = NULL;
9701 	in->in_flags = 0;
9702 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
9703 
9704 	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
9705 	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
9706 	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
9707 	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
9708 	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9709 	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
9710 	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
9711 	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
9712 
9713 	sc->sc_rx_ba_sessions = 0;
9714 	sc->ba_rx.start_tidmask = 0;
9715 	sc->ba_rx.stop_tidmask = 0;
9716 	memset(sc->aggqid, 0, sizeof(sc->aggqid));
9717 	sc->ba_tx.start_tidmask = 0;
9718 	sc->ba_tx.stop_tidmask = 0;
9719 
9720 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
9721 	sc->ns_nstate = IEEE80211_S_INIT;
9722 
9723 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9724 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
9725 		iwx_clear_reorder_buffer(sc, rxba);
9726 	}
9727 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
9728 	ifp->if_timer = 0;
9729 
9730 	splx(s);
9731 }
9732 
9733 void
iwx_watchdog(struct ifnet * ifp)9734 iwx_watchdog(struct ifnet *ifp)
9735 {
9736 	struct iwx_softc *sc = ifp->if_softc;
9737 	int i;
9738 
9739 	ifp->if_timer = 0;
9740 
9741 	/*
9742 	 * We maintain a separate timer for each Tx queue because
9743 	 * Tx aggregation queues can get "stuck" while other queues
9744 	 * keep working. The Linux driver uses a similar workaround.
9745 	 */
9746 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
9747 		if (sc->sc_tx_timer[i] > 0) {
9748 			if (--sc->sc_tx_timer[i] == 0) {
9749 				printf("%s: device timeout\n", DEVNAME(sc));
9750 				if (ifp->if_flags & IFF_DEBUG) {
9751 					iwx_nic_error(sc);
9752 					iwx_dump_driver_status(sc);
9753 				}
9754 				if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9755 					task_add(systq, &sc->init_task);
9756 				ifp->if_oerrors++;
9757 				return;
9758 			}
9759 			ifp->if_timer = 1;
9760 		}
9761 	}
9762 
9763 	ieee80211_watchdog(ifp);
9764 }
9765 
9766 int
iwx_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)9767 iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
9768 {
9769 	struct iwx_softc *sc = ifp->if_softc;
9770 	int s, err = 0, generation = sc->sc_generation;
9771 
9772 	/*
9773 	 * Prevent processes from entering this function while another
9774 	 * process is tsleep'ing in it.
9775 	 */
9776 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
9777 	if (err == 0 && generation != sc->sc_generation) {
9778 		rw_exit(&sc->ioctl_rwl);
9779 		return ENXIO;
9780 	}
9781 	if (err)
9782 		return err;
9783 	s = splnet();
9784 
9785 	switch (cmd) {
9786 	case SIOCSIFADDR:
9787 		ifp->if_flags |= IFF_UP;
9788 		/* FALLTHROUGH */
9789 	case SIOCSIFFLAGS:
9790 		if (ifp->if_flags & IFF_UP) {
9791 			if (!(ifp->if_flags & IFF_RUNNING)) {
9792 				/* Force reload of firmware image from disk. */
9793 				sc->sc_fw.fw_status = IWX_FW_STATUS_NONE;
9794 				err = iwx_init(ifp);
9795 			}
9796 		} else {
9797 			if (ifp->if_flags & IFF_RUNNING)
9798 				iwx_stop(ifp);
9799 		}
9800 		break;
9801 
9802 	default:
9803 		err = ieee80211_ioctl(ifp, cmd, data);
9804 	}
9805 
9806 	if (err == ENETRESET) {
9807 		err = 0;
9808 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9809 		    (IFF_UP | IFF_RUNNING)) {
9810 			iwx_stop(ifp);
9811 			err = iwx_init(ifp);
9812 		}
9813 	}
9814 
9815 	splx(s);
9816 	rw_exit(&sc->ioctl_rwl);
9817 
9818 	return err;
9819 }
9820 
9821 /*
9822  * Note: This structure is read from the device with IO accesses,
9823  * and the reading already does the endian conversion. As it is
9824  * read with uint32_t-sized accesses, any members with a different size
9825  * need to be ordered correctly though!
9826  */
9827 struct iwx_error_event_table {
9828 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9829 	uint32_t error_id;		/* type of error */
9830 	uint32_t trm_hw_status0;	/* TRM HW status */
9831 	uint32_t trm_hw_status1;	/* TRM HW status */
9832 	uint32_t blink2;		/* branch link */
9833 	uint32_t ilink1;		/* interrupt link */
9834 	uint32_t ilink2;		/* interrupt link */
9835 	uint32_t data1;		/* error-specific data */
9836 	uint32_t data2;		/* error-specific data */
9837 	uint32_t data3;		/* error-specific data */
9838 	uint32_t bcon_time;		/* beacon timer */
9839 	uint32_t tsf_low;		/* network timestamp function timer */
9840 	uint32_t tsf_hi;		/* network timestamp function timer */
9841 	uint32_t gp1;		/* GP1 timer register */
9842 	uint32_t gp2;		/* GP2 timer register */
9843 	uint32_t fw_rev_type;	/* firmware revision type */
9844 	uint32_t major;		/* uCode version major */
9845 	uint32_t minor;		/* uCode version minor */
9846 	uint32_t hw_ver;		/* HW Silicon version */
9847 	uint32_t brd_ver;		/* HW board version */
9848 	uint32_t log_pc;		/* log program counter */
9849 	uint32_t frame_ptr;		/* frame pointer */
9850 	uint32_t stack_ptr;		/* stack pointer */
9851 	uint32_t hcmd;		/* last host command header */
9852 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
9853 				 * rxtx_flag */
9854 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
9855 				 * host_flag */
9856 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
9857 				 * enc_flag */
9858 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
9859 				 * time_flag */
9860 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
9861 				 * wico interrupt */
9862 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
9863 	uint32_t wait_event;		/* wait event() caller address */
9864 	uint32_t l2p_control;	/* L2pControlField */
9865 	uint32_t l2p_duration;	/* L2pDurationField */
9866 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
9867 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
9868 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
9869 				 * (LMPM_PMG_SEL) */
9870 	uint32_t u_timestamp;	/* indicate when the date and time of the
9871 				 * compilation */
9872 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
9873 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
9874 
9875 /*
9876  * UMAC error struct - relevant starting from family 8000 chip.
9877  * Note: This structure is read from the device with IO accesses,
9878  * and the reading already does the endian conversion. As it is
9879  * read with u32-sized accesses, any members with a different size
9880  * need to be ordered correctly though!
9881  */
9882 struct iwx_umac_error_event_table {
9883 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9884 	uint32_t error_id;	/* type of error */
9885 	uint32_t blink1;	/* branch link */
9886 	uint32_t blink2;	/* branch link */
9887 	uint32_t ilink1;	/* interrupt link */
9888 	uint32_t ilink2;	/* interrupt link */
9889 	uint32_t data1;		/* error-specific data */
9890 	uint32_t data2;		/* error-specific data */
9891 	uint32_t data3;		/* error-specific data */
9892 	uint32_t umac_major;
9893 	uint32_t umac_minor;
9894 	uint32_t frame_pointer;	/* core register 27*/
9895 	uint32_t stack_pointer;	/* core register 28 */
9896 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
9897 	uint32_t nic_isr_pref;	/* ISR status register */
9898 } __packed;
9899 
9900 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
9901 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
9902 
9903 void
iwx_nic_umac_error(struct iwx_softc * sc)9904 iwx_nic_umac_error(struct iwx_softc *sc)
9905 {
9906 	struct iwx_umac_error_event_table table;
9907 	uint32_t base;
9908 
9909 	base = sc->sc_uc.uc_umac_error_event_table;
9910 
9911 	if (base < 0x400000) {
9912 		printf("%s: Invalid error log pointer 0x%08x\n",
9913 		    DEVNAME(sc), base);
9914 		return;
9915 	}
9916 
9917 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9918 		printf("%s: reading errlog failed\n", DEVNAME(sc));
9919 		return;
9920 	}
9921 
9922 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9923 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
9924 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9925 			sc->sc_flags, table.valid);
9926 	}
9927 
9928 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
9929 		iwx_desc_lookup(table.error_id));
9930 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
9931 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
9932 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
9933 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
9934 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
9935 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
9936 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
9937 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
9938 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
9939 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
9940 	    table.frame_pointer);
9941 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
9942 	    table.stack_pointer);
9943 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
9944 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
9945 	    table.nic_isr_pref);
9946 }
9947 
9948 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
9949 static struct {
9950 	const char *name;
9951 	uint8_t num;
9952 } advanced_lookup[] = {
9953 	{ "NMI_INTERRUPT_WDG", 0x34 },
9954 	{ "SYSASSERT", 0x35 },
9955 	{ "UCODE_VERSION_MISMATCH", 0x37 },
9956 	{ "BAD_COMMAND", 0x38 },
9957 	{ "BAD_COMMAND", 0x39 },
9958 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
9959 	{ "FATAL_ERROR", 0x3D },
9960 	{ "NMI_TRM_HW_ERR", 0x46 },
9961 	{ "NMI_INTERRUPT_TRM", 0x4C },
9962 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
9963 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
9964 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
9965 	{ "NMI_INTERRUPT_HOST", 0x66 },
9966 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
9967 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
9968 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
9969 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
9970 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
9971 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
9972 	{ "ADVANCED_SYSASSERT", 0 },
9973 };
9974 
9975 const char *
iwx_desc_lookup(uint32_t num)9976 iwx_desc_lookup(uint32_t num)
9977 {
9978 	int i;
9979 
9980 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
9981 		if (advanced_lookup[i].num ==
9982 		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
9983 			return advanced_lookup[i].name;
9984 
9985 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
9986 	return advanced_lookup[i].name;
9987 }
9988 
9989 /*
9990  * Support for dumping the error log seemed like a good idea ...
9991  * but it's mostly hex junk and the only sensible thing is the
9992  * hw/ucode revision (which we know anyway).  Since it's here,
9993  * I'll just leave it in, just in case e.g. the Intel guys want to
9994  * help us decipher some "ADVANCED_SYSASSERT" later.
9995  */
9996 void
iwx_nic_error(struct iwx_softc * sc)9997 iwx_nic_error(struct iwx_softc *sc)
9998 {
9999 	struct iwx_error_event_table table;
10000 	uint32_t base;
10001 
10002 	printf("%s: dumping device error log\n", DEVNAME(sc));
10003 	base = sc->sc_uc.uc_lmac_error_event_table[0];
10004 	if (base < 0x400000) {
10005 		printf("%s: Invalid error log pointer 0x%08x\n",
10006 		    DEVNAME(sc), base);
10007 		return;
10008 	}
10009 
10010 	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10011 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10012 		return;
10013 	}
10014 
10015 	if (!table.valid) {
10016 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
10017 		return;
10018 	}
10019 
10020 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10021 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
10022 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10023 		    sc->sc_flags, table.valid);
10024 	}
10025 
10026 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
10027 	    iwx_desc_lookup(table.error_id));
10028 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
10029 	    table.trm_hw_status0);
10030 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
10031 	    table.trm_hw_status1);
10032 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
10033 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
10034 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
10035 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
10036 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
10037 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
10038 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
10039 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
10040 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
10041 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
10042 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
10043 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
10044 	    table.fw_rev_type);
10045 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
10046 	    table.major);
10047 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
10048 	    table.minor);
10049 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
10050 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
10051 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
10052 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
10053 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
10054 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
10055 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
10056 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
10057 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
10058 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
10059 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
10060 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
10061 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
10062 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
10063 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
10064 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
10065 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
10066 
10067 	if (sc->sc_uc.uc_umac_error_event_table)
10068 		iwx_nic_umac_error(sc);
10069 }
10070 
10071 void
iwx_dump_driver_status(struct iwx_softc * sc)10072 iwx_dump_driver_status(struct iwx_softc *sc)
10073 {
10074 	int i;
10075 
10076 	printf("driver status:\n");
10077 	for (i = 0; i < nitems(sc->txq); i++) {
10078 		struct iwx_tx_ring *ring = &sc->txq[i];
10079 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
10080 		    "cur_hw=%-3d queued=%-3d\n",
10081 		    i, ring->qid, ring->cur, ring->cur_hw,
10082 		    ring->queued);
10083 	}
10084 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
10085 	printf("  802.11 state %s\n",
10086 	    ieee80211_state_name[sc->sc_ic.ic_state]);
10087 }
10088 
10089 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
10090 do {									\
10091 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10092 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
10093 	_var_ = (void *)((_pkt_)+1);					\
10094 } while (/*CONSTCOND*/0)
10095 
10096 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
10097 do {									\
10098 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10099 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
10100 	_ptr_ = (void *)((_pkt_)+1);					\
10101 } while (/*CONSTCOND*/0)
10102 
10103 int
iwx_rx_pkt_valid(struct iwx_rx_packet * pkt)10104 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
10105 {
10106 	int qid, idx, code;
10107 
10108 	qid = pkt->hdr.qid & ~0x80;
10109 	idx = pkt->hdr.idx;
10110 	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10111 
10112 	return (!(qid == 0 && idx == 0 && code == 0) &&
10113 	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
10114 }
10115 
10116 void
iwx_rx_pkt(struct iwx_softc * sc,struct iwx_rx_data * data,struct mbuf_list * ml)10117 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
10118 {
10119 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
10120 	struct iwx_rx_packet *pkt, *nextpkt;
10121 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
10122 	struct mbuf *m0, *m;
10123 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
10124 	int qid, idx, code, handled = 1;
10125 
10126 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
10127 	    BUS_DMASYNC_POSTREAD);
10128 
10129 	m0 = data->m;
10130 	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
10131 		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
10132 		qid = pkt->hdr.qid;
10133 		idx = pkt->hdr.idx;
10134 
10135 		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10136 
10137 		if (!iwx_rx_pkt_valid(pkt))
10138 			break;
10139 
10140 		/*
10141 		 * XXX Intel inside (tm)
10142 		 * Any commands in the LONG_GROUP could actually be in the
10143 		 * LEGACY group. Firmware API versions >= 50 reject commands
10144 		 * in group 0, forcing us to use this hack.
10145 		 */
10146 		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
10147 			struct iwx_tx_ring *ring = &sc->txq[qid];
10148 			struct iwx_tx_data *txdata = &ring->data[idx];
10149 			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
10150 				code = iwx_cmd_opcode(code);
10151 		}
10152 
10153 		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
10154 		if (len < minsz || len > (IWX_RBUF_SIZE - offset))
10155 			break;
10156 
10157 		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
10158 			/* Take mbuf m0 off the RX ring. */
10159 			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
10160 				ifp->if_ierrors++;
10161 				break;
10162 			}
10163 			KASSERT(data->m != m0);
10164 		}
10165 
10166 		switch (code) {
10167 		case IWX_REPLY_RX_PHY_CMD:
10168 			iwx_rx_rx_phy_cmd(sc, pkt, data);
10169 			break;
10170 
10171 		case IWX_REPLY_RX_MPDU_CMD: {
10172 			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
10173 			nextoff = offset +
10174 			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
10175 			nextpkt = (struct iwx_rx_packet *)
10176 			    (m0->m_data + nextoff);
10177 			/* AX210 devices ship only one packet per Rx buffer. */
10178 			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
10179 			    nextoff + minsz >= IWX_RBUF_SIZE ||
10180 			    !iwx_rx_pkt_valid(nextpkt)) {
10181 				/* No need to copy last frame in buffer. */
10182 				if (offset > 0)
10183 					m_adj(m0, offset);
10184 				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
10185 				m0 = NULL; /* stack owns m0 now; abort loop */
10186 			} else {
10187 				/*
10188 				 * Create an mbuf which points to the current
10189 				 * packet. Always copy from offset zero to
10190 				 * preserve m_pkthdr.
10191 				 */
10192 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
10193 				if (m == NULL) {
10194 					ifp->if_ierrors++;
10195 					m_freem(m0);
10196 					m0 = NULL;
10197 					break;
10198 				}
10199 				m_adj(m, offset);
10200 				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
10201 			}
10202  			break;
10203 		}
10204 
10205 		case IWX_BAR_FRAME_RELEASE:
10206 			iwx_rx_bar_frame_release(sc, pkt, ml);
10207 			break;
10208 
10209 		case IWX_TX_CMD:
10210 			iwx_rx_tx_cmd(sc, pkt, data);
10211 			break;
10212 
10213 		case IWX_BA_NOTIF:
10214 			iwx_rx_compressed_ba(sc, pkt);
10215 			break;
10216 
10217 		case IWX_MISSED_BEACONS_NOTIFICATION:
10218 			iwx_rx_bmiss(sc, pkt, data);
10219 			break;
10220 
10221 		case IWX_MFUART_LOAD_NOTIFICATION:
10222 			break;
10223 
10224 		case IWX_ALIVE: {
10225 			struct iwx_alive_resp_v4 *resp4;
10226 			struct iwx_alive_resp_v5 *resp5;
10227 			struct iwx_alive_resp_v6 *resp6;
10228 
10229 			DPRINTF(("%s: firmware alive\n", __func__));
10230 			sc->sc_uc.uc_ok = 0;
10231 
10232 			/*
10233 			 * For v5 and above, we can check the version, for older
10234 			 * versions we need to check the size.
10235 			 */
10236 			if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
10237 			    IWX_ALIVE) == 6) {
10238 				SYNC_RESP_STRUCT(resp6, pkt);
10239 				if (iwx_rx_packet_payload_len(pkt) !=
10240 				    sizeof(*resp6)) {
10241 					sc->sc_uc.uc_intr = 1;
10242 					wakeup(&sc->sc_uc);
10243 					break;
10244 				}
10245 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
10246 				    resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
10247 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
10248 				    resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
10249 				sc->sc_uc.uc_log_event_table = le32toh(
10250 				    resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
10251 				sc->sc_uc.uc_umac_error_event_table = le32toh(
10252 				    resp6->umac_data.dbg_ptrs.error_info_addr);
10253 				sc->sc_sku_id[0] =
10254 				    le32toh(resp6->sku_id.data[0]);
10255 				sc->sc_sku_id[1] =
10256 				    le32toh(resp6->sku_id.data[1]);
10257 				sc->sc_sku_id[2] =
10258 				    le32toh(resp6->sku_id.data[2]);
10259 				if (resp6->status == IWX_ALIVE_STATUS_OK)
10260 					sc->sc_uc.uc_ok = 1;
10261 			 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
10262 			    IWX_ALIVE) == 5) {
10263 				SYNC_RESP_STRUCT(resp5, pkt);
10264 				if (iwx_rx_packet_payload_len(pkt) !=
10265 				    sizeof(*resp5)) {
10266 					sc->sc_uc.uc_intr = 1;
10267 					wakeup(&sc->sc_uc);
10268 					break;
10269 				}
10270 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
10271 				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
10272 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
10273 				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
10274 				sc->sc_uc.uc_log_event_table = le32toh(
10275 				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
10276 				sc->sc_uc.uc_umac_error_event_table = le32toh(
10277 				    resp5->umac_data.dbg_ptrs.error_info_addr);
10278 				sc->sc_sku_id[0] =
10279 				    le32toh(resp5->sku_id.data[0]);
10280 				sc->sc_sku_id[1] =
10281 				    le32toh(resp5->sku_id.data[1]);
10282 				sc->sc_sku_id[2] =
10283 				    le32toh(resp5->sku_id.data[2]);
10284 				if (resp5->status == IWX_ALIVE_STATUS_OK)
10285 					sc->sc_uc.uc_ok = 1;
10286 			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
10287 				SYNC_RESP_STRUCT(resp4, pkt);
10288 				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
10289 				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
10290 				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
10291 				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
10292 				sc->sc_uc.uc_log_event_table = le32toh(
10293 				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
10294 				sc->sc_uc.uc_umac_error_event_table = le32toh(
10295 				    resp4->umac_data.dbg_ptrs.error_info_addr);
10296 				if (resp4->status == IWX_ALIVE_STATUS_OK)
10297 					sc->sc_uc.uc_ok = 1;
10298 			}
10299 
10300 			sc->sc_uc.uc_intr = 1;
10301 			wakeup(&sc->sc_uc);
10302 			break;
10303 		}
10304 
10305 		case IWX_STATISTICS_NOTIFICATION: {
10306 			struct iwx_notif_statistics *stats;
10307 			SYNC_RESP_STRUCT(stats, pkt);
10308 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
10309 			sc->sc_noise = iwx_get_noise(&stats->rx.general);
10310 			break;
10311 		}
10312 
10313 		case IWX_DTS_MEASUREMENT_NOTIFICATION:
10314 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
10315 				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
10316 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
10317 				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
10318 			break;
10319 
10320 		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
10321 		    IWX_CT_KILL_NOTIFICATION): {
10322 			struct iwx_ct_kill_notif *notif;
10323 			SYNC_RESP_STRUCT(notif, pkt);
10324 			printf("%s: device at critical temperature (%u degC), "
10325 			    "stopping device\n",
10326 			    DEVNAME(sc), le16toh(notif->temperature));
10327 			sc->sc_flags |= IWX_FLAG_HW_ERR;
10328 			task_add(systq, &sc->init_task);
10329 			break;
10330 		}
10331 
10332 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10333 		    IWX_SCD_QUEUE_CONFIG_CMD):
10334 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10335 		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
10336 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10337 		    IWX_SEC_KEY_CMD):
10338 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
10339 		    IWX_SESSION_PROTECTION_CMD):
10340 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
10341 		    IWX_MAC_CONFIG_CMD):
10342 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
10343 		    IWX_LINK_CONFIG_CMD):
10344 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
10345 		    IWX_STA_CONFIG_CMD):
10346 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
10347 		    IWX_STA_REMOVE_CMD):
10348 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
10349 		    IWX_NVM_GET_INFO):
10350 		case IWX_ADD_STA_KEY:
10351 		case IWX_PHY_CONFIGURATION_CMD:
10352 		case IWX_TX_ANT_CONFIGURATION_CMD:
10353 		case IWX_ADD_STA:
10354 		case IWX_MAC_CONTEXT_CMD:
10355 		case IWX_REPLY_SF_CFG_CMD:
10356 		case IWX_POWER_TABLE_CMD:
10357 		case IWX_LTR_CONFIG:
10358 		case IWX_PHY_CONTEXT_CMD:
10359 		case IWX_BINDING_CONTEXT_CMD:
10360 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
10361 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
10362 		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
10363 		case IWX_REPLY_BEACON_FILTERING_CMD:
10364 		case IWX_MAC_PM_POWER_TABLE:
10365 		case IWX_TIME_QUOTA_CMD:
10366 		case IWX_REMOVE_STA:
10367 		case IWX_TXPATH_FLUSH:
10368 		case IWX_BT_CONFIG:
10369 		case IWX_MCC_UPDATE_CMD:
10370 		case IWX_TIME_EVENT_CMD:
10371 		case IWX_STATISTICS_CMD:
10372 		case IWX_SCD_QUEUE_CFG: {
10373 			size_t pkt_len;
10374 
10375 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
10376 				break;
10377 
10378 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
10379 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
10380 
10381 			pkt_len = sizeof(pkt->len_n_flags) +
10382 			    iwx_rx_packet_len(pkt);
10383 
10384 			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
10385 			    pkt_len < sizeof(*pkt) ||
10386 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
10387 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
10388 				    sc->sc_cmd_resp_len[idx]);
10389 				sc->sc_cmd_resp_pkt[idx] = NULL;
10390 				break;
10391 			}
10392 
10393 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
10394 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
10395 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
10396 			break;
10397 		}
10398 
10399 		case IWX_INIT_COMPLETE_NOTIF:
10400 			sc->sc_init_complete |= IWX_INIT_COMPLETE;
10401 			wakeup(&sc->sc_init_complete);
10402 			break;
10403 
10404 		case IWX_SCAN_COMPLETE_UMAC: {
10405 			struct iwx_umac_scan_complete *notif;
10406 			SYNC_RESP_STRUCT(notif, pkt);
10407 			iwx_endscan(sc);
10408 			break;
10409 		}
10410 
10411 		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
10412 			struct iwx_umac_scan_iter_complete_notif *notif;
10413 			SYNC_RESP_STRUCT(notif, pkt);
10414 			iwx_endscan(sc);
10415 			break;
10416 		}
10417 
10418 		case IWX_MCC_CHUB_UPDATE_CMD: {
10419 			struct iwx_mcc_chub_notif *notif;
10420 			SYNC_RESP_STRUCT(notif, pkt);
10421 			iwx_mcc_update(sc, notif);
10422 			break;
10423 		}
10424 
10425 		case IWX_REPLY_ERROR: {
10426 			struct iwx_error_resp *resp;
10427 			SYNC_RESP_STRUCT(resp, pkt);
10428 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
10429 				DEVNAME(sc), le32toh(resp->error_type),
10430 				resp->cmd_id);
10431 			break;
10432 		}
10433 
10434 		case IWX_TIME_EVENT_NOTIFICATION: {
10435 			struct iwx_time_event_notif *notif;
10436 			uint32_t action;
10437 			SYNC_RESP_STRUCT(notif, pkt);
10438 
10439 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
10440 				break;
10441 			action = le32toh(notif->action);
10442 			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
10443 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
10444 			break;
10445 		}
10446 
10447 		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
10448 		    IWX_SESSION_PROTECTION_NOTIF): {
10449 			struct iwx_session_prot_notif *notif;
10450 			uint32_t status, start, conf_id;
10451 
10452 			SYNC_RESP_STRUCT(notif, pkt);
10453 
10454 			status = le32toh(notif->status);
10455 			start = le32toh(notif->start);
10456 			conf_id = le32toh(notif->conf_id);
10457 			/* Check for end of successful PROTECT_CONF_ASSOC. */
10458 			if (status == 1 && start == 0 &&
10459 			    conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
10460 				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
10461 			break;
10462 		}
10463 
10464 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
10465 		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
10466 		    break;
10467 
10468 		/*
10469 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
10470 		 * messages. Just ignore them for now.
10471 		 */
10472 		case IWX_DEBUG_LOG_MSG:
10473 			break;
10474 
10475 		case IWX_MCAST_FILTER_CMD:
10476 			break;
10477 
10478 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
10479 			break;
10480 
10481 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
10482 			break;
10483 
10484 		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
10485 			break;
10486 
10487 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
10488 		    IWX_NVM_ACCESS_COMPLETE):
10489 			break;
10490 
10491 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
10492 			break; /* happens in monitor mode; ignore for now */
10493 
10494 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
10495 			break;
10496 
10497 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10498 		    IWX_TLC_MNG_UPDATE_NOTIF): {
10499 			struct iwx_tlc_update_notif *notif;
10500 			SYNC_RESP_STRUCT(notif, pkt);
10501 			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
10502 				iwx_rs_update(sc, notif);
10503 			break;
10504 		}
10505 
10506 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
10507 			break;
10508 
10509 		/*
10510 		 * Ignore for now. The Linux driver only acts on this request
10511 		 * with 160Mhz channels in 11ax mode.
10512 		 */
10513 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10514 		    IWX_THERMAL_DUAL_CHAIN_REQUEST):
10515 			DPRINTF(("%s: thermal dual-chain request received\n",
10516 			    DEVNAME(sc)));
10517 			break;
10518 
10519 		/* undocumented notification from iwx-ty-a0-gf-a0-77 image */
10520 		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
10521 			break;
10522 
10523 		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
10524 		    IWX_PNVM_INIT_COMPLETE):
10525 			sc->sc_init_complete |= IWX_PNVM_COMPLETE;
10526 			wakeup(&sc->sc_init_complete);
10527 			break;
10528 
10529 		default:
10530 			handled = 0;
10531 			printf("%s: unhandled firmware response 0x%x/0x%x "
10532 			    "rx ring %d[%d]\n",
10533 			    DEVNAME(sc), code, pkt->len_n_flags,
10534 			    (qid & ~0x80), idx);
10535 			break;
10536 		}
10537 
10538 		/*
10539 		 * uCode sets bit 0x80 when it originates the notification,
10540 		 * i.e. when the notification is not a direct response to a
10541 		 * command sent by the driver.
10542 		 * For example, uCode issues IWX_REPLY_RX when it sends a
10543 		 * received frame to the driver.
10544 		 */
10545 		if (handled && !(qid & (1 << 7))) {
10546 			iwx_cmd_done(sc, qid, idx, code);
10547 		}
10548 
10549 		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
10550 
10551 		/* AX210 devices ship only one packet per Rx buffer. */
10552 		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10553 			break;
10554 	}
10555 
10556 	if (m0 && m0 != data->m)
10557 		m_freem(m0);
10558 }
10559 
10560 void
iwx_notif_intr(struct iwx_softc * sc)10561 iwx_notif_intr(struct iwx_softc *sc)
10562 {
10563 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
10564 	uint16_t hw;
10565 
10566 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
10567 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
10568 
10569 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10570 		uint16_t *status = sc->rxq.stat_dma.vaddr;
10571 		hw = le16toh(*status) & 0xfff;
10572 	} else
10573 		hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
10574 	hw &= (IWX_RX_MQ_RING_COUNT - 1);
10575 	while (sc->rxq.cur != hw) {
10576 		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
10577 		iwx_rx_pkt(sc, data, &ml);
10578 		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
10579 	}
10580 	if_input(&sc->sc_ic.ic_if, &ml);
10581 
10582 	/*
10583 	 * Tell the firmware what we have processed.
10584 	 * Seems like the hardware gets upset unless we align the write by 8??
10585 	 */
10586 	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
10587 	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
10588 }
10589 
10590 int
iwx_intr(void * arg)10591 iwx_intr(void *arg)
10592 {
10593 	struct iwx_softc *sc = arg;
10594 	struct ieee80211com *ic = &sc->sc_ic;
10595 	struct ifnet *ifp = IC2IFP(ic);
10596 	int handled = 0;
10597 	int r1, r2, rv = 0;
10598 
10599 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10600 
10601 	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
10602 		uint32_t *ict = sc->ict_dma.vaddr;
10603 		int tmp;
10604 
10605 		tmp = htole32(ict[sc->ict_cur]);
10606 		if (!tmp)
10607 			goto out_ena;
10608 
10609 		/*
10610 		 * ok, there was something.  keep plowing until we have all.
10611 		 */
10612 		r1 = r2 = 0;
10613 		while (tmp) {
10614 			r1 |= tmp;
10615 			ict[sc->ict_cur] = 0;
10616 			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
10617 			tmp = htole32(ict[sc->ict_cur]);
10618 		}
10619 
10620 		/* this is where the fun begins.  don't ask */
10621 		if (r1 == 0xffffffff)
10622 			r1 = 0;
10623 
10624 		/* i am not expected to understand this */
10625 		if (r1 & 0xc0000)
10626 			r1 |= 0x8000;
10627 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
10628 	} else {
10629 		r1 = IWX_READ(sc, IWX_CSR_INT);
10630 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
10631 			goto out;
10632 		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
10633 	}
10634 	if (r1 == 0 && r2 == 0) {
10635 		goto out_ena;
10636 	}
10637 
10638 	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
10639 
10640 	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
10641 		int i;
10642 
10643 		/* Firmware has now configured the RFH. */
10644 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
10645 			iwx_update_rx_desc(sc, &sc->rxq, i);
10646 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
10647 	}
10648 
10649 	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
10650 
10651 	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
10652 		handled |= IWX_CSR_INT_BIT_RF_KILL;
10653 		iwx_check_rfkill(sc);
10654 		task_add(systq, &sc->init_task);
10655 		rv = 1;
10656 		goto out_ena;
10657 	}
10658 
10659 	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
10660 		if (ifp->if_flags & IFF_DEBUG) {
10661 			iwx_nic_error(sc);
10662 			iwx_dump_driver_status(sc);
10663 		}
10664 		printf("%s: fatal firmware error\n", DEVNAME(sc));
10665 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
10666 			task_add(systq, &sc->init_task);
10667 		rv = 1;
10668 		goto out;
10669 
10670 	}
10671 
10672 	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
10673 		handled |= IWX_CSR_INT_BIT_HW_ERR;
10674 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10675 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
10676 			sc->sc_flags |= IWX_FLAG_HW_ERR;
10677 			task_add(systq, &sc->init_task);
10678 		}
10679 		rv = 1;
10680 		goto out;
10681 	}
10682 
10683 	/* firmware chunk loaded */
10684 	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
10685 		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
10686 		handled |= IWX_CSR_INT_BIT_FH_TX;
10687 
10688 		sc->sc_fw_chunk_done = 1;
10689 		wakeup(&sc->sc_fw);
10690 	}
10691 
10692 	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
10693 	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
10694 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
10695 			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
10696 			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
10697 		}
10698 		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
10699 			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
10700 			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
10701 		}
10702 
10703 		/* Disable periodic interrupt; we use it as just a one-shot. */
10704 		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
10705 
10706 		/*
10707 		 * Enable periodic interrupt in 8 msec only if we received
10708 		 * real RX interrupt (instead of just periodic int), to catch
10709 		 * any dangling Rx interrupt.  If it was just the periodic
10710 		 * interrupt, there was no dangling Rx activity, and no need
10711 		 * to extend the periodic interrupt; one-shot is enough.
10712 		 */
10713 		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
10714 			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
10715 			    IWX_CSR_INT_PERIODIC_ENA);
10716 
10717 		iwx_notif_intr(sc);
10718 	}
10719 
10720 	rv = 1;
10721 
10722  out_ena:
10723 	iwx_restore_interrupts(sc);
10724  out:
10725 	return rv;
10726 }
10727 
10728 int
iwx_intr_msix(void * arg)10729 iwx_intr_msix(void *arg)
10730 {
10731 	struct iwx_softc *sc = arg;
10732 	struct ieee80211com *ic = &sc->sc_ic;
10733 	struct ifnet *ifp = IC2IFP(ic);
10734 	uint32_t inta_fh, inta_hw;
10735 	int vector = 0;
10736 
10737 	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
10738 	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
10739 	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
10740 	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
10741 	inta_fh &= sc->sc_fh_mask;
10742 	inta_hw &= sc->sc_hw_mask;
10743 
10744 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
10745 	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
10746 		iwx_notif_intr(sc);
10747 	}
10748 
10749 	/* firmware chunk loaded */
10750 	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
10751 		sc->sc_fw_chunk_done = 1;
10752 		wakeup(&sc->sc_fw);
10753 	}
10754 
10755 	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
10756 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
10757 	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
10758 		if (ifp->if_flags & IFF_DEBUG) {
10759 			iwx_nic_error(sc);
10760 			iwx_dump_driver_status(sc);
10761 		}
10762 		printf("%s: fatal firmware error\n", DEVNAME(sc));
10763 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
10764 			task_add(systq, &sc->init_task);
10765 		return 1;
10766 	}
10767 
10768 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
10769 		iwx_check_rfkill(sc);
10770 		task_add(systq, &sc->init_task);
10771 	}
10772 
10773 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
10774 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10775 		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
10776 			sc->sc_flags |= IWX_FLAG_HW_ERR;
10777 			task_add(systq, &sc->init_task);
10778 		}
10779 		return 1;
10780 	}
10781 
10782 	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
10783 		int i;
10784 
10785 		/* Firmware has now configured the RFH. */
10786 		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
10787 			iwx_update_rx_desc(sc, &sc->rxq, i);
10788 		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
10789 	}
10790 
10791 	/*
10792 	 * Before sending the interrupt the HW disables it to prevent
10793 	 * a nested interrupt. This is done by writing 1 to the corresponding
10794 	 * bit in the mask register. After handling the interrupt, it should be
10795 	 * re-enabled by clearing this bit. This register is defined as
10796 	 * write 1 clear (W1C) register, meaning that it's being clear
10797 	 * by writing 1 to the bit.
10798 	 */
10799 	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
10800 	return 1;
10801 }
10802 
10803 typedef void *iwx_match_t;
10804 
10805 static const struct pci_matchid iwx_devices[] = {
10806 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
10807 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
10808 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
10809 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_4,},
10810 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_5,},
10811 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_6,},
10812 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_7,},
10813 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_8,},
10814 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_9,},
10815 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_10,},
10816 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_11,},
10817 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_12,},
10818 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_13,},
10819 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_14,},
10820 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_15,},
10821 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_16,},
10822 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_17,},
10823 };
10824 
10825 
10826 int
iwx_match(struct device * parent,iwx_match_t match __unused,void * aux)10827 iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
10828 {
10829 	struct pci_attach_args *pa = aux;
10830 	return pci_matchbyid(pa, iwx_devices, nitems(iwx_devices));
10831 }
10832 
10833 /*
10834  * The device info table below contains device-specific config overrides.
10835  * The most important parameter derived from this table is the name of the
10836  * firmware image to load.
10837  *
10838  * The Linux iwlwifi driver uses an "old" and a "new" device info table.
10839  * The "old" table matches devices based on PCI vendor/product IDs only.
10840  * The "new" table extends this with various device parameters derived
10841  * from MAC type, and RF type.
10842  *
10843  * In iwlwifi "old" and "new" tables share the same array, where "old"
10844  * entries contain dummy values for data defined only for "new" entries.
10845  * As of 2022, Linux developers are still in the process of moving entries
10846  * from "old" to "new" style and it looks like this effort has stalled in
10847  * in some work-in-progress state for quite a while. Linux commits moving
10848  * entries from "old" to "new" have at times been reverted due to regressions.
10849  * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
10850  * devices in the same driver.
10851  *
10852  * Our table below contains mostly "new" entries declared in iwlwifi
10853  * with the _IWL_DEV_INFO() macro (with a leading underscore).
10854  * Other devices are matched based on PCI vendor/product ID as usual,
10855  * unless matching specific PCI subsystem vendor/product IDs is required.
10856  *
10857  * Some "old"-style entries are required to identify the firmware image to use.
10858  * Others might be used to print a specific marketing name into Linux dmesg,
10859  * but we can't be sure whether the corresponding devices would be matched
10860  * correctly in the absence of their entries. So we include them just in case.
10861  */
10862 
10863 struct iwx_dev_info {
10864 	uint16_t device;
10865 	uint16_t subdevice;
10866 	uint16_t mac_type;
10867 	uint16_t rf_type;
10868 	uint8_t mac_step;
10869 	uint8_t rf_id;
10870 	uint8_t no_160;
10871 	uint8_t cores;
10872 	uint8_t cdb;
10873 	uint8_t jacket;
10874 	const struct iwx_device_cfg *cfg;
10875 };
10876 
10877 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
10878 		      _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
10879 	{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg),  \
10880 	  .mac_type = _mac_type, .rf_type = _rf_type,	   \
10881 	  .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id,		   \
10882 	  .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
10883 
10884 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
10885 	_IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY,	   \
10886 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY,  \
10887 		      IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
10888 
10889 /*
10890  * When adding entries to this table keep in mind that entries must
10891  * be listed in the same order as in the Linux driver. Code walks this
10892  * table backwards and uses the first matching entry it finds.
10893  * Device firmware must be available in fw_update(8).
10894  */
10895 static const struct iwx_dev_info iwx_dev_info_table[] = {
10896 	/* So with HR */
10897 	IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
10898 	IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
10899 	IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
10900 	IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
10901 	IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
10902 	IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
10903 	IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
10904 	IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
10905 	IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
10906 	IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
10907 	IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
10908 	IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
10909 	IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
10910 	IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
10911 	IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10912 	IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10913 	IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
10914 	IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
10915 	IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10916 	IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10917 	IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
10918 	IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
10919 	IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
10920 	IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
10921 	IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
10922 	IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
10923 	IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
10924 	IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
10925 	IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
10926 	IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10927 	IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10928 	IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
10929 	IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
10930 	IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
10931 	IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10932 	IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10933 
10934 	/* So with GF2 */
10935 	IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10936 	IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10937 	IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10938 	IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10939 	IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10940 	IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10941 	IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10942 	IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10943 	IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10944 	IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10945 	IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10946 	IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10947 
10948 	/* MA with GF2 */
10949 	IWX_DEV_INFO(0x7e40, 0x1671, iwx_cfg_ma_b0_gf_a0), /* killer_1675s */
10950 	IWX_DEV_INFO(0x7e40, 0x1672, iwx_cfg_ma_b0_gf_a0), /* killer_1675i */
10951 
10952 	/* Qu with Jf, C step */
10953 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10954 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10955 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10956 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10957 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
10958 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10959 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10960 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10961 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10962 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
10963 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10964 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10965 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10966 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10967 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
10968 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10969 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10970 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10971 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10972 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
10973 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10974 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10975 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10976 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10977 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
10978 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10979 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10980 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10981 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10982 		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
10983 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
10984 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10985 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10986 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10987 		      IWX_CFG_ANY,
10988 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
10989 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
10990 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10991 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10992 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10993 		      IWX_CFG_ANY,
10994 		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
10995 
10996 	/* QuZ with Jf */
10997 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10998 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10999 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
11000 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11001 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
11002 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11003 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
11004 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
11005 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11006 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
11007 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11008 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
11009 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
11010 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11011 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
11012 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11013 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
11014 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
11015 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11016 		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
11017 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
11018 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
11019 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
11020 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11021 		      IWX_CFG_ANY,
11022 		      iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
11023 	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
11024 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
11025 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
11026 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11027 		      IWX_CFG_ANY,
11028 		      iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
11029 
11030 	/* Qu with Hr, B step */
11031 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11032 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
11033 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
11034 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11035 		      iwx_qu_b0_hr1_b0), /* AX101 */
11036 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11037 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
11038 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
11039 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11040 		      iwx_qu_b0_hr_b0), /* AX203 */
11041 
11042 	/* Qu with Hr, C step */
11043 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11044 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
11045 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
11046 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11047 		      iwx_qu_c0_hr1_b0), /* AX101 */
11048 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11049 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
11050 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
11051 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11052 		      iwx_qu_c0_hr_b0), /* AX203 */
11053 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11054 		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
11055 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
11056 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11057 		      iwx_qu_c0_hr_b0), /* AX201 */
11058 
11059 	/* QuZ with Hr */
11060 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11061 		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
11062 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
11063 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11064 		      iwx_quz_a0_hr1_b0), /* AX101 */
11065 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11066 		      IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
11067 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
11068 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11069 		      iwx_cfg_quz_a0_hr_b0), /* AX203 */
11070 
11071 	/* SoF with JF2 */
11072 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11073 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11074 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
11075 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11076 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
11077 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11078 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11079 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
11080 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11081 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
11082 
11083 	/* SoF with JF */
11084 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11085 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11086 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
11087 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11088 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
11089 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11090 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11091 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
11092 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11093 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
11094 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11095 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11096 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
11097 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11098 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
11099 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11100 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11101 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
11102 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11103 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
11104 
11105 	/* So with Hr */
11106 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11107 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11108 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
11109 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11110 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
11111 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11112 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11113 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
11114 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11115 		      iwx_cfg_so_a0_hr_b0), /* ax101 */
11116 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11117 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11118 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
11119 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11120 		      iwx_cfg_so_a0_hr_b0), /* ax201 */
11121 
11122 	/* So-F with Hr */
11123 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11124 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11125 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
11126 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11127 		      iwx_cfg_so_a0_hr_b0), /* AX203 */
11128 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11129 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11130 		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
11131 		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11132 		      iwx_cfg_so_a0_hr_b0), /* AX101 */
11133 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11134 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11135 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
11136 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11137 		      iwx_cfg_so_a0_hr_b0), /* AX201 */
11138 
11139 	/* So-F with GF */
11140 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11141 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11142 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
11143 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11144 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
11145 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11146 		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
11147 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
11148 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
11149 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
11150 
11151 	/* So with GF */
11152 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11153 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11154 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
11155 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
11156 		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
11157 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11158 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11159 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
11160 		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
11161 		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
11162 
11163 	/* So with JF2 */
11164 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11165 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11166 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
11167 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11168 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
11169 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11170 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11171 		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
11172 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11173 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
11174 
11175 	/* So with JF */
11176 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11177 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11178 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
11179 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11180 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
11181 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11182 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11183 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
11184 		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11185 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
11186 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11187 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11188 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
11189 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11190 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
11191 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11192 		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
11193 		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
11194 		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
11195 		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
11196 
11197 	/* Ma */
11198 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11199 		      IWX_CFG_MAC_TYPE_MA, IWX_CFG_ANY,
11200 		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
11201 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB,
11202 		      IWX_CFG_ANY, iwx_cfg_ma_b0_hr_b0), /* ax201 */
11203 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11204 		      IWX_CFG_MAC_TYPE_MA, IWX_CFG_ANY,
11205 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
11206 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB,
11207 		      IWX_CFG_ANY, iwx_cfg_ma_b0_gf_a0), /* ax211 */
11208 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11209 		      IWX_CFG_MAC_TYPE_MA, IWX_CFG_ANY,
11210 		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
11211 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_CDB,
11212 		      IWX_CFG_ANY, iwx_cfg_ma_b0_gf4_a0), /* ax211 */
11213 	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
11214 		      IWX_CFG_MAC_TYPE_MA, IWX_CFG_ANY,
11215 		      IWX_CFG_RF_TYPE_FM, IWX_CFG_ANY,
11216 		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB,
11217 		      IWX_CFG_ANY, iwx_cfg_ma_a0_fm_a0), /* ax231 */
11218 };
11219 
11220 int
iwx_preinit(struct iwx_softc * sc)11221 iwx_preinit(struct iwx_softc *sc)
11222 {
11223 	struct ieee80211com *ic = &sc->sc_ic;
11224 	struct ifnet *ifp = IC2IFP(ic);
11225 	int err;
11226 
11227 	err = iwx_prepare_card_hw(sc);
11228 	if (err) {
11229 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11230 		return err;
11231 	}
11232 
11233 	if (sc->attached) {
11234 		/* Update MAC in case the upper layers changed it. */
11235 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
11236 		    ((struct arpcom *)ifp)->ac_enaddr);
11237 		return 0;
11238 	}
11239 
11240 	err = iwx_start_hw(sc);
11241 	if (err) {
11242 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11243 		return err;
11244 	}
11245 
11246 	err = iwx_run_init_mvm_ucode(sc, 1);
11247 	iwx_stop_device(sc);
11248 	if (err)
11249 		return err;
11250 
11251 	/* Print version info and MAC address on first successful fw load. */
11252 	sc->attached = 1;
11253 	if (sc->sc_pnvm_ver) {
11254 		printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
11255 		    "address %s\n",
11256 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
11257 		    sc->sc_fwver, sc->sc_pnvm_ver,
11258 		    ether_sprintf(sc->sc_nvm.hw_addr));
11259 	} else {
11260 		printf("%s: hw rev 0x%x, fw %s, address %s\n",
11261 		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
11262 		    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11263 	}
11264 
11265 	if (sc->sc_nvm.sku_cap_11n_enable)
11266 		iwx_setup_ht_rates(sc);
11267 	if (sc->sc_nvm.sku_cap_11ac_enable)
11268 		iwx_setup_vht_rates(sc);
11269 
11270 	/* not all hardware can do 5GHz band */
11271 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11272 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
11273 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
11274 
11275 	/* Configure channel information obtained from firmware. */
11276 	ieee80211_channel_init(ifp);
11277 
11278 	/* Configure MAC address. */
11279 	err = if_setlladdr(ifp, ic->ic_myaddr);
11280 	if (err)
11281 		printf("%s: could not set MAC address (error %d)\n",
11282 		    DEVNAME(sc), err);
11283 
11284 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
11285 
11286 	return 0;
11287 }
11288 
11289 void
iwx_attach_hook(struct device * self)11290 iwx_attach_hook(struct device *self)
11291 {
11292 	struct iwx_softc *sc = (void *)self;
11293 
11294 	KASSERT(!cold);
11295 
11296 	iwx_preinit(sc);
11297 }
11298 
11299 const struct iwx_device_cfg *
iwx_find_device_cfg(struct iwx_softc * sc)11300 iwx_find_device_cfg(struct iwx_softc *sc)
11301 {
11302 	pcireg_t sreg;
11303 	pci_product_id_t sdev_id;
11304 	uint16_t mac_type, rf_type;
11305 	uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
11306 	int i;
11307 
11308 	sreg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
11309 	sdev_id = PCI_PRODUCT(sreg);
11310 	mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
11311 	mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
11312 	rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
11313 	cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
11314 	jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
11315 
11316 	rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
11317 	no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
11318 	cores = IWX_SUBDEVICE_CORES(sdev_id);
11319 
11320 	for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
11321 		const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
11322 
11323 		if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
11324 		    dev_info->device != sc->sc_pid)
11325 			continue;
11326 
11327 		if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
11328 		    dev_info->subdevice != sdev_id)
11329 			continue;
11330 
11331 		if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
11332 		    dev_info->mac_type != mac_type)
11333 			continue;
11334 
11335 		if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
11336 		    dev_info->mac_step != mac_step)
11337 			continue;
11338 
11339 		if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
11340 		    dev_info->rf_type != rf_type)
11341 			continue;
11342 
11343 		if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
11344 		    dev_info->cdb != cdb)
11345 			continue;
11346 
11347 		if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
11348 		    dev_info->jacket != jacket)
11349 			continue;
11350 
11351 		if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
11352 		    dev_info->rf_id != rf_id)
11353 			continue;
11354 
11355 		if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
11356 		    dev_info->no_160 != no_160)
11357 			continue;
11358 
11359 		if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
11360 		    dev_info->cores != cores)
11361 			continue;
11362 
11363 		return dev_info->cfg;
11364 	}
11365 
11366 	return NULL;
11367 }
11368 
11369 
11370 void
iwx_attach(struct device * parent,struct device * self,void * aux)11371 iwx_attach(struct device *parent, struct device *self, void *aux)
11372 {
11373 	struct iwx_softc *sc = (void *)self;
11374 	struct pci_attach_args *pa = aux;
11375 	pci_intr_handle_t ih;
11376 	pcireg_t reg, memtype;
11377 	struct ieee80211com *ic = &sc->sc_ic;
11378 	struct ifnet *ifp = &ic->ic_if;
11379 	const char *intrstr;
11380 	const struct iwx_device_cfg *cfg;
11381 	int err;
11382 	int txq_i, i, j;
11383 	size_t ctxt_info_size;
11384 
11385 	sc->sc_pid = PCI_PRODUCT(pa->pa_id);
11386 	sc->sc_pct = pa->pa_pc;
11387 	sc->sc_pcitag = pa->pa_tag;
11388 	sc->sc_dmat = pa->pa_dmat;
11389 
11390 	rw_init(&sc->ioctl_rwl, "iwxioctl");
11391 
11392 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11393 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
11394 	if (err == 0) {
11395 		printf("%s: PCIe capability structure not found!\n",
11396 		    DEVNAME(sc));
11397 		return;
11398 	}
11399 
11400 	/*
11401 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11402 	 * PCI Tx retries from interfering with C3 CPU state.
11403 	 */
11404 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11405 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11406 
11407 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
11408 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
11409 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
11410 	if (err) {
11411 		printf("%s: can't map mem space\n", DEVNAME(sc));
11412 		return;
11413 	}
11414 
11415 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11416 		sc->sc_msix = 1;
11417 	} else if (pci_intr_map_msi(pa, &ih)) {
11418 		if (pci_intr_map(pa, &ih)) {
11419 			printf("%s: can't map interrupt\n", DEVNAME(sc));
11420 			return;
11421 		}
11422 		/* Hardware bug workaround. */
11423 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11424 		    PCI_COMMAND_STATUS_REG);
11425 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11426 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11427 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11428 		    PCI_COMMAND_STATUS_REG, reg);
11429 	}
11430 
11431 	intrstr = pci_intr_string(sc->sc_pct, ih);
11432 	if (sc->sc_msix)
11433 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11434 		    iwx_intr_msix, sc, DEVNAME(sc));
11435 	else
11436 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11437 		    iwx_intr, sc, DEVNAME(sc));
11438 
11439 	if (sc->sc_ih == NULL) {
11440 		printf("\n");
11441 		printf("%s: can't establish interrupt", DEVNAME(sc));
11442 		if (intrstr != NULL)
11443 			printf(" at %s", intrstr);
11444 		printf("\n");
11445 		return;
11446 	}
11447 	printf(", %s\n", intrstr);
11448 
11449 	/* Clear pending interrupts. */
11450 	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
11451 	IWX_WRITE(sc, IWX_CSR_INT, ~0);
11452 	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
11453 
11454 	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
11455 	sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
11456 
11457 	/*
11458 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11459 	 * changed, and now the revision step also includes bit 0-1 (no more
11460 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11461 	 * in the old format.
11462 	 */
11463 	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11464 			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11465 
11466 	switch (PCI_PRODUCT(pa->pa_id)) {
11467 	case PCI_PRODUCT_INTEL_WL_22500_1:
11468 		sc->sc_fwname = IWX_CC_A_FW;
11469 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11470 		sc->sc_integrated = 0;
11471 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
11472 		sc->sc_low_latency_xtal = 0;
11473 		sc->sc_xtal_latency = 0;
11474 		sc->sc_tx_with_siso_diversity = 0;
11475 		sc->sc_uhb_supported = 0;
11476 		break;
11477 	case PCI_PRODUCT_INTEL_WL_22500_2:
11478 	case PCI_PRODUCT_INTEL_WL_22500_5:
11479 		/* These devices should be QuZ only. */
11480 		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
11481 			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
11482 			return;
11483 		}
11484 		sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11485 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11486 		sc->sc_integrated = 1;
11487 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
11488 		sc->sc_low_latency_xtal = 0;
11489 		sc->sc_xtal_latency = 500;
11490 		sc->sc_tx_with_siso_diversity = 0;
11491 		sc->sc_uhb_supported = 0;
11492 		break;
11493 	case PCI_PRODUCT_INTEL_WL_22500_3:
11494 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11495 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11496 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11497 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11498 		else
11499 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11500 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11501 		sc->sc_integrated = 1;
11502 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
11503 		sc->sc_low_latency_xtal = 0;
11504 		sc->sc_xtal_latency = 500;
11505 		sc->sc_tx_with_siso_diversity = 0;
11506 		sc->sc_uhb_supported = 0;
11507 		break;
11508 	case PCI_PRODUCT_INTEL_WL_22500_4:
11509 	case PCI_PRODUCT_INTEL_WL_22500_7:
11510 	case PCI_PRODUCT_INTEL_WL_22500_8:
11511 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11512 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11513 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11514 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11515 		else
11516 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11517 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11518 		sc->sc_integrated = 1;
11519 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
11520 		sc->sc_low_latency_xtal = 0;
11521 		sc->sc_xtal_latency = 1820;
11522 		sc->sc_tx_with_siso_diversity = 0;
11523 		sc->sc_uhb_supported = 0;
11524 		break;
11525 	case PCI_PRODUCT_INTEL_WL_22500_6:
11526 		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11527 			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11528 		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11529 			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11530 		else
11531 			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11532 		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11533 		sc->sc_integrated = 1;
11534 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
11535 		sc->sc_low_latency_xtal = 1;
11536 		sc->sc_xtal_latency = 12000;
11537 		sc->sc_tx_with_siso_diversity = 0;
11538 		sc->sc_uhb_supported = 0;
11539 		break;
11540 	case PCI_PRODUCT_INTEL_WL_22500_9:
11541 	case PCI_PRODUCT_INTEL_WL_22500_10:
11542 	case PCI_PRODUCT_INTEL_WL_22500_11:
11543 	case PCI_PRODUCT_INTEL_WL_22500_13:
11544 	case PCI_PRODUCT_INTEL_WL_22500_15:
11545 	case PCI_PRODUCT_INTEL_WL_22500_16:
11546 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
11547 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
11548 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
11549 		sc->sc_integrated = 0;
11550 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
11551 		sc->sc_low_latency_xtal = 0;
11552 		sc->sc_xtal_latency = 0;
11553 		sc->sc_tx_with_siso_diversity = 0;
11554 		sc->sc_uhb_supported = 1;
11555 		break;
11556 	case PCI_PRODUCT_INTEL_WL_22500_12:
11557 	case PCI_PRODUCT_INTEL_WL_22500_17:
11558 		sc->sc_fwname = IWX_SO_A_GF_A_FW;
11559 		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
11560 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
11561 		sc->sc_integrated = 1;
11562 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
11563 		sc->sc_low_latency_xtal = 1;
11564 		sc->sc_xtal_latency = 12000;
11565 		sc->sc_tx_with_siso_diversity = 0;
11566 		sc->sc_uhb_supported = 0;
11567 		sc->sc_imr_enabled = 1;
11568 		break;
11569 	case PCI_PRODUCT_INTEL_WL_22500_14:
11570 		sc->sc_fwname = IWX_MA_B_GF_A_FW;
11571 		sc->sc_pnvm_name = IWX_MA_B_GF_A_PNVM;
11572 		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
11573 		sc->sc_integrated = 1;
11574 		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
11575 		sc->sc_low_latency_xtal = 0;
11576 		sc->sc_xtal_latency = 0;
11577 		sc->sc_tx_with_siso_diversity = 0;
11578 		sc->sc_uhb_supported = 1;
11579 		break;
11580 	default:
11581 		printf("%s: unknown adapter type\n", DEVNAME(sc));
11582 		return;
11583 	}
11584 
11585 	cfg = iwx_find_device_cfg(sc);
11586 	if (cfg) {
11587 		sc->sc_fwname = cfg->fw_name;
11588 		sc->sc_pnvm_name = cfg->pnvm_name;
11589 		sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
11590 		sc->sc_uhb_supported = cfg->uhb_supported;
11591 		if (cfg->xtal_latency) {
11592 			sc->sc_xtal_latency = cfg->xtal_latency;
11593 			sc->sc_low_latency_xtal = cfg->low_latency_xtal;
11594 		}
11595 	}
11596 
11597 	sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
11598 
11599 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
11600 		sc->sc_umac_prph_offset = 0x300000;
11601 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
11602 	} else
11603 		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
11604 
11605 	/* Allocate DMA memory for loading firmware. */
11606 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
11607 		ctxt_info_size = sizeof(struct iwx_context_info_gen3);
11608 	else
11609 		ctxt_info_size = sizeof(struct iwx_context_info);
11610 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
11611 	    ctxt_info_size, 0);
11612 	if (err) {
11613 		printf("%s: could not allocate memory for loading firmware\n",
11614 		    DEVNAME(sc));
11615 		return;
11616 	}
11617 
11618 	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
11619 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
11620 		    sizeof(struct iwx_prph_scratch), 0);
11621 		if (err) {
11622 			printf("%s: could not allocate prph scratch memory\n",
11623 			    DEVNAME(sc));
11624 			goto fail1;
11625 		}
11626 
11627 		/*
11628 		 * Allocate prph information. The driver doesn't use this.
11629 		 * We use the second half of this page to give the device
11630 		 * some dummy TR/CR tail pointers - which shouldn't be
11631 		 * necessary as we don't use this, but the hardware still
11632 		 * reads/writes there and we can't let it go do that with
11633 		 * a NULL pointer.
11634 		 */
11635 		KASSERT(sizeof(struct iwx_prph_info) < PAGE_SIZE / 2);
11636 		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
11637 		    PAGE_SIZE, 0);
11638 		if (err) {
11639 			printf("%s: could not allocate prph info memory\n",
11640 			    DEVNAME(sc));
11641 			goto fail1;
11642 		}
11643 	}
11644 
11645 	/* Allocate interrupt cause table (ICT).*/
11646 	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11647 	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
11648 	if (err) {
11649 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11650 		goto fail1;
11651 	}
11652 
11653 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
11654 		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11655 		if (err) {
11656 			printf("%s: could not allocate TX ring %d\n",
11657 			    DEVNAME(sc), txq_i);
11658 			goto fail4;
11659 		}
11660 	}
11661 
11662 	err = iwx_alloc_rx_ring(sc, &sc->rxq);
11663 	if (err) {
11664 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
11665 		goto fail4;
11666 	}
11667 
11668 	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
11669 	if (sc->sc_nswq == NULL)
11670 		goto fail4;
11671 
11672 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
11673 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
11674 	ic->ic_state = IEEE80211_S_INIT;
11675 
11676 	/* Set device capabilities. */
11677 	ic->ic_caps =
11678 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
11679 	    IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
11680 	    IEEE80211_C_WEP |		/* WEP */
11681 	    IEEE80211_C_RSN |		/* WPA/RSN */
11682 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
11683 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
11684 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
11685 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
11686 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
11687 
11688 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
11689 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
11690 	ic->ic_htcaps |=
11691 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
11692 	ic->ic_htxcaps = 0;
11693 	ic->ic_txbfcaps = 0;
11694 	ic->ic_aselcaps = 0;
11695 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
11696 
11697 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
11698 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
11699 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
11700 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
11701 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
11702 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
11703 
11704 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11705 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11706 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11707 
11708 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
11709 		sc->sc_phyctxt[i].id = i;
11710 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
11711 		sc->sc_phyctxt[i].vht_chan_width =
11712 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
11713 	}
11714 
11715 	/* IBSS channel undefined for now. */
11716 	ic->ic_ibss_chan = &ic->ic_channels[1];
11717 
11718 	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
11719 
11720 	ifp->if_softc = sc;
11721 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
11722 	ifp->if_ioctl = iwx_ioctl;
11723 	ifp->if_start = iwx_start;
11724 	ifp->if_watchdog = iwx_watchdog;
11725 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
11726 
11727 	if_attach(ifp);
11728 	ieee80211_ifattach(ifp);
11729 	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
11730 
11731 #if NBPFILTER > 0
11732 	iwx_radiotap_attach(sc);
11733 #endif
11734 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
11735 		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
11736 		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
11737 		rxba->sc = sc;
11738 		timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
11739 		    rxba);
11740 		timeout_set(&rxba->reorder_buf.reorder_timer,
11741 		    iwx_reorder_timer_expired, &rxba->reorder_buf);
11742 		for (j = 0; j < nitems(rxba->entries); j++)
11743 			ml_init(&rxba->entries[j].frames);
11744 	}
11745 	task_set(&sc->init_task, iwx_init_task, sc);
11746 	task_set(&sc->newstate_task, iwx_newstate_task, sc);
11747 	task_set(&sc->ba_task, iwx_ba_task, sc);
11748 	task_set(&sc->setkey_task, iwx_setkey_task, sc);
11749 	task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
11750 	task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
11751 	task_set(&sc->bgscan_done_task, iwx_bgscan_done_task, sc);
11752 
11753 	ic->ic_node_alloc = iwx_node_alloc;
11754 	ic->ic_bgscan_start = iwx_bgscan;
11755 	ic->ic_bgscan_done = iwx_bgscan_done;
11756 	ic->ic_set_key = iwx_set_key;
11757 	ic->ic_delete_key = iwx_delete_key;
11758 
11759 	/* Override 802.11 state transition machine. */
11760 	sc->sc_newstate = ic->ic_newstate;
11761 	ic->ic_newstate = iwx_newstate;
11762 	ic->ic_updatechan = iwx_updatechan;
11763 	ic->ic_updateprot = iwx_updateprot;
11764 	ic->ic_updateslot = iwx_updateslot;
11765 	ic->ic_updateedca = iwx_updateedca;
11766 	ic->ic_updatedtim = iwx_updatedtim;
11767 	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
11768 	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
11769 	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
11770 	ic->ic_ampdu_tx_stop = NULL;
11771 	/*
11772 	 * We cannot read the MAC address without loading the
11773 	 * firmware from disk. Postpone until mountroot is done.
11774 	 */
11775 	config_mountroot(self, iwx_attach_hook);
11776 
11777 	return;
11778 
11779 fail4:	while (--txq_i >= 0)
11780 		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
11781 	iwx_free_rx_ring(sc, &sc->rxq);
11782 	if (sc->ict_dma.vaddr != NULL)
11783 		iwx_dma_contig_free(&sc->ict_dma);
11784 
11785 fail1:	iwx_dma_contig_free(&sc->ctxt_info_dma);
11786 	iwx_dma_contig_free(&sc->prph_scratch_dma);
11787 	iwx_dma_contig_free(&sc->prph_info_dma);
11788 	return;
11789 }
11790 
11791 #if NBPFILTER > 0
11792 void
iwx_radiotap_attach(struct iwx_softc * sc)11793 iwx_radiotap_attach(struct iwx_softc *sc)
11794 {
11795 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
11796 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
11797 
11798 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
11799 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
11800 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
11801 
11802 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
11803 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
11804 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
11805 }
11806 #endif
11807 
11808 void
iwx_init_task(void * arg1)11809 iwx_init_task(void *arg1)
11810 {
11811 	struct iwx_softc *sc = arg1;
11812 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11813 	int s = splnet();
11814 	int generation = sc->sc_generation;
11815 	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
11816 
11817 	rw_enter_write(&sc->ioctl_rwl);
11818 	if (generation != sc->sc_generation) {
11819 		rw_exit(&sc->ioctl_rwl);
11820 		splx(s);
11821 		return;
11822 	}
11823 
11824 	if (ifp->if_flags & IFF_RUNNING)
11825 		iwx_stop(ifp);
11826 	else
11827 		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
11828 
11829 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
11830 		iwx_init(ifp);
11831 
11832 	rw_exit(&sc->ioctl_rwl);
11833 	splx(s);
11834 }
11835 
11836 void
iwx_resume(struct iwx_softc * sc)11837 iwx_resume(struct iwx_softc *sc)
11838 {
11839 	pcireg_t reg;
11840 
11841 	/*
11842 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11843 	 * PCI Tx retries from interfering with C3 CPU state.
11844 	 */
11845 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11846 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11847 
11848 	if (!sc->sc_msix) {
11849 		/* Hardware bug workaround. */
11850 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11851 		    PCI_COMMAND_STATUS_REG);
11852 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11853 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11854 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11855 		    PCI_COMMAND_STATUS_REG, reg);
11856 	}
11857 
11858 	iwx_disable_interrupts(sc);
11859 }
11860 
11861 int
iwx_wakeup(struct iwx_softc * sc)11862 iwx_wakeup(struct iwx_softc *sc)
11863 {
11864 	struct ieee80211com *ic = &sc->sc_ic;
11865 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11866 	int err;
11867 
11868 	rw_enter_write(&sc->ioctl_rwl);
11869 
11870 	err = iwx_start_hw(sc);
11871 	if (err) {
11872 		rw_exit(&sc->ioctl_rwl);
11873 		return err;
11874 	}
11875 
11876 	err = iwx_init_hw(sc);
11877 	if (err) {
11878 		iwx_stop_device(sc);
11879 		rw_exit(&sc->ioctl_rwl);
11880 		return err;
11881 	}
11882 
11883 	refcnt_init(&sc->task_refs);
11884 	ifq_clr_oactive(&ifp->if_snd);
11885 	ifp->if_flags |= IFF_RUNNING;
11886 
11887 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
11888 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
11889 	else
11890 		ieee80211_begin_scan(ifp);
11891 
11892 	rw_exit(&sc->ioctl_rwl);
11893 	return 0;
11894 }
11895 
11896 int
iwx_activate(struct device * self,int act)11897 iwx_activate(struct device *self, int act)
11898 {
11899 	struct iwx_softc *sc = (struct iwx_softc *)self;
11900 	struct ifnet *ifp = &sc->sc_ic.ic_if;
11901 	int err = 0;
11902 
11903 	switch (act) {
11904 	case DVACT_QUIESCE:
11905 		if (ifp->if_flags & IFF_RUNNING) {
11906 			rw_enter_write(&sc->ioctl_rwl);
11907 			iwx_stop(ifp);
11908 			rw_exit(&sc->ioctl_rwl);
11909 		}
11910 		break;
11911 	case DVACT_RESUME:
11912 		iwx_resume(sc);
11913 		break;
11914 	case DVACT_WAKEUP:
11915 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
11916 			err = iwx_wakeup(sc);
11917 			if (err)
11918 				printf("%s: could not initialize hardware\n",
11919 				    DEVNAME(sc));
11920 		}
11921 		break;
11922 	}
11923 
11924 	return 0;
11925 }
11926 
11927 struct cfdriver iwx_cd = {
11928 	NULL, "iwx", DV_IFNET
11929 };
11930 
11931 const struct cfattach iwx_ca = {
11932 	sizeof(struct iwx_softc), iwx_match, iwx_attach,
11933 	NULL, iwx_activate
11934 };
11935