xref: /netbsd/sys/dev/pci/if_iwm.c (revision 5ffa2830)
1 /*	$NetBSD: if_iwm.c,v 1.87 2021/06/24 09:17:53 riastradh Exp $	*/
2 /*	OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp	*/
3 #define IEEE80211_NO_HT
4 /*
5  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
6  *   Author: Stefan Sperling <stsp@openbsd.org>
7  * Copyright (c) 2014 Fixup Software Ltd.
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016        Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <linuxwifi@intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
61  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
62  * Copyright(c) 2016        Intel Deutschland GmbH
63  * All rights reserved.
64  *
65  * Redistribution and use in source and binary forms, with or without
66  * modification, are permitted provided that the following conditions
67  * are met:
68  *
69  *  * Redistributions of source code must retain the above copyright
70  *    notice, this list of conditions and the following disclaimer.
71  *  * Redistributions in binary form must reproduce the above copyright
72  *    notice, this list of conditions and the following disclaimer in
73  *    the documentation and/or other materials provided with the
74  *    distribution.
75  *  * Neither the name Intel Corporation nor the names of its
76  *    contributors may be used to endorse or promote products derived
77  *    from this software without specific prior written permission.
78  *
79  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
80  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
81  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
82  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
83  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
84  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
85  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
86  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
87  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
88  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
89  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90  */
91 
92 /*-
93  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
94  *
95  * Permission to use, copy, modify, and distribute this software for any
96  * purpose with or without fee is hereby granted, provided that the above
97  * copyright notice and this permission notice appear in all copies.
98  *
99  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
100  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
101  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
102  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
103  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
104  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
105  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106  */
107 
108 #include <sys/cdefs.h>
109 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.87 2021/06/24 09:17:53 riastradh Exp $");
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/kmem.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/socket.h>
119 #include <sys/sockio.h>
120 #include <sys/sysctl.h>
121 #include <sys/systm.h>
122 
123 #include <sys/cpu.h>
124 #include <sys/bus.h>
125 #include <sys/workqueue.h>
126 #include <machine/endian.h>
127 #include <sys/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 #include <dev/firmload.h>
133 
134 #include <net/bpf.h>
135 #include <net/if.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_ether.h>
139 
140 #include <netinet/in.h>
141 #include <netinet/ip.h>
142 
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_radiotap.h>
146 
147 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
148 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
149 
150 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
151 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
152 
153 #ifdef IWM_DEBUG
154 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
155 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
156 int iwm_debug = 0;
157 #else
158 #define DPRINTF(x)	do { ; } while (0)
159 #define DPRINTFN(n, x)	do { ; } while (0)
160 #endif
161 
162 #include <dev/pci/if_iwmreg.h>
163 #include <dev/pci/if_iwmvar.h>
164 
165 static const uint8_t iwm_nvm_channels[] = {
166 	/* 2.4 GHz */
167 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168 	/* 5 GHz */
169 	36, 40, 44, 48, 52, 56, 60, 64,
170 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171 	149, 153, 157, 161, 165
172 };
173 
174 static const uint8_t iwm_nvm_channels_8000[] = {
175 	/* 2.4 GHz */
176 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
177 	/* 5 GHz */
178 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
179 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
180 	149, 153, 157, 161, 165, 169, 173, 177, 181
181 };
182 
183 #define IWM_NUM_2GHZ_CHANNELS	14
184 
185 static const struct iwm_rate {
186 	uint8_t rate;
187 	uint8_t plcp;
188 	uint8_t ht_plcp;
189 } iwm_rates[] = {
190 		/* Legacy */		/* HT */
191 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
192 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
194 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
196 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
197 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
198 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
199 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
200 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
201 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
202 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
203 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
204 };
205 #define IWM_RIDX_CCK	0
206 #define IWM_RIDX_OFDM	4
207 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
208 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
209 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
210 
211 #ifndef IEEE80211_NO_HT
212 /* Convert an MCS index into an iwm_rates[] index. */
213 static const int iwm_mcs2ridx[] = {
214 	IWM_RATE_MCS_0_INDEX,
215 	IWM_RATE_MCS_1_INDEX,
216 	IWM_RATE_MCS_2_INDEX,
217 	IWM_RATE_MCS_3_INDEX,
218 	IWM_RATE_MCS_4_INDEX,
219 	IWM_RATE_MCS_5_INDEX,
220 	IWM_RATE_MCS_6_INDEX,
221 	IWM_RATE_MCS_7_INDEX,
222 };
223 #endif
224 
225 struct iwm_nvm_section {
226 	uint16_t length;
227 	uint8_t *data;
228 };
229 
230 struct iwm_newstate_state {
231 	struct work ns_wk;
232 	enum ieee80211_state ns_nstate;
233 	int ns_arg;
234 	int ns_generation;
235 };
236 
237 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
238 static int	iwm_firmware_store_section(struct iwm_softc *,
239 		    enum iwm_ucode_type, uint8_t *, size_t);
240 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
241 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
242 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
243 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
244 #ifdef IWM_DEBUG
245 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
246 #endif
247 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
248 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
249 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
250 static int	iwm_nic_lock(struct iwm_softc *);
251 static void	iwm_nic_unlock(struct iwm_softc *);
252 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
253 		    uint32_t);
254 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
255 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257 		    bus_size_t, bus_size_t);
258 static void	iwm_dma_contig_free(struct iwm_dma_info *);
259 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
260 static void	iwm_disable_rx_dma(struct iwm_softc *);
261 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
262 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
264 		    int);
265 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
266 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void	iwm_enable_rfkill_int(struct iwm_softc *);
268 static int	iwm_check_rfkill(struct iwm_softc *);
269 static void	iwm_enable_interrupts(struct iwm_softc *);
270 static void	iwm_restore_interrupts(struct iwm_softc *);
271 static void	iwm_disable_interrupts(struct iwm_softc *);
272 static void	iwm_ict_reset(struct iwm_softc *);
273 static int	iwm_set_hw_ready(struct iwm_softc *);
274 static int	iwm_prepare_card_hw(struct iwm_softc *);
275 static void	iwm_apm_config(struct iwm_softc *);
276 static int	iwm_apm_init(struct iwm_softc *);
277 static void	iwm_apm_stop(struct iwm_softc *);
278 static int	iwm_allow_mcast(struct iwm_softc *);
279 static int	iwm_start_hw(struct iwm_softc *);
280 static void	iwm_stop_device(struct iwm_softc *);
281 static void	iwm_nic_config(struct iwm_softc *);
282 static int	iwm_nic_rx_init(struct iwm_softc *);
283 static int	iwm_nic_tx_init(struct iwm_softc *);
284 static int	iwm_nic_init(struct iwm_softc *);
285 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
286 static int	iwm_post_alive(struct iwm_softc *);
287 static struct iwm_phy_db_entry *
288 		iwm_phy_db_get_section(struct iwm_softc *,
289 		    enum iwm_phy_db_section_type, uint16_t);
290 static int	iwm_phy_db_set_section(struct iwm_softc *,
291 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
292 static int	iwm_is_valid_channel(uint16_t);
293 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
294 static uint16_t iwm_channel_id_to_papd(uint16_t);
295 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
296 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
297 		    uint8_t **, uint16_t *, uint16_t);
298 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
299 		    void *);
300 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
301 		    enum iwm_phy_db_section_type, uint8_t);
302 static int	iwm_send_phy_db_data(struct iwm_softc *);
303 static void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
304 		    struct iwm_time_event_cmd_v1 *);
305 static int	iwm_send_time_event_cmd(struct iwm_softc *,
306 		    const struct iwm_time_event_cmd_v2 *);
307 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
308 		    uint32_t, uint32_t);
309 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
310 		    uint16_t, uint8_t *, uint16_t *);
311 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
312 		    uint16_t *, size_t);
313 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
314 		    const uint8_t *, size_t);
315 #ifndef IEEE80211_NO_HT
316 static void	iwm_setup_ht_rates(struct iwm_softc *);
317 static void	iwm_htprot_task(void *);
318 static void	iwm_update_htprot(struct ieee80211com *,
319 		    struct ieee80211_node *);
320 static int	iwm_ampdu_rx_start(struct ieee80211com *,
321 		    struct ieee80211_node *, uint8_t);
322 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
323 		    struct ieee80211_node *, uint8_t);
324 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
325 		    uint8_t, uint16_t, int);
326 #ifdef notyet
327 static int	iwm_ampdu_tx_start(struct ieee80211com *,
328 		    struct ieee80211_node *, uint8_t);
329 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
330 		    struct ieee80211_node *, uint8_t);
331 #endif
332 static void	iwm_ba_task(void *);
333 #endif
334 
335 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
336 		    const uint16_t *, const uint16_t *, const uint16_t *,
337 		    const uint16_t *, const uint16_t *);
338 static void	iwm_set_hw_address_8000(struct iwm_softc *,
339 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
340 static int	iwm_parse_nvm_sections(struct iwm_softc *,
341 		    struct iwm_nvm_section *);
342 static int	iwm_nvm_init(struct iwm_softc *);
343 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
344 		    const uint8_t *, uint32_t);
345 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
346 		    const uint8_t *, uint32_t);
347 static int	iwm_load_cpu_sections_7000(struct iwm_softc *,
348 		    struct iwm_fw_sects *, int , int *);
349 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
350 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
351 		    struct iwm_fw_sects *, int , int *);
352 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
353 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
354 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
355 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
356 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
357 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
358 		    enum iwm_ucode_type);
359 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
360 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
361 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
362 static int	iwm_get_signal_strength(struct iwm_softc *,
363 		    struct iwm_rx_phy_info *);
364 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
365 		    struct iwm_rx_packet *, struct iwm_rx_data *);
366 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
367 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
368 		    struct iwm_rx_data *);
369 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
370 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
371 		    struct iwm_rx_data *);
372 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
373 		    uint32_t);
374 #if 0
375 static int	iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
376 static int	iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
377 #endif
378 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
379 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
380 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
381 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
382 		    uint8_t, uint8_t);
383 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
384 		    uint8_t, uint8_t, uint32_t, uint32_t);
385 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
386 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
387 		    uint16_t, const void *);
388 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
389 		    uint32_t *);
390 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
391 		    const void *, uint32_t *);
392 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
393 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
394 #if 0
395 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
396 		    uint16_t);
397 #endif
398 static const struct iwm_rate *
399 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
400 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
401 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
402 		    struct ieee80211_node *, int);
403 static void	iwm_led_enable(struct iwm_softc *);
404 static void	iwm_led_disable(struct iwm_softc *);
405 static int	iwm_led_is_enabled(struct iwm_softc *);
406 static void	iwm_led_blink_timeout(void *);
407 static void	iwm_led_blink_start(struct iwm_softc *);
408 static void	iwm_led_blink_stop(struct iwm_softc *);
409 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
410 		    struct iwm_beacon_filter_cmd *);
411 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
412 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
413 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
414 		    int);
415 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
416 		    struct iwm_mac_power_cmd *);
417 static int	iwm_power_mac_update_mode(struct iwm_softc *,
418 		    struct iwm_node *);
419 static int	iwm_power_update_device(struct iwm_softc *);
420 #ifdef notyet
421 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
422 #endif
423 static int	iwm_disable_beacon_filter(struct iwm_softc *);
424 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
425 static int	iwm_add_aux_sta(struct iwm_softc *);
426 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
427 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
428 #ifdef notyet
429 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
430 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
431 #endif
432 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
433 		    struct iwm_scan_channel_cfg_lmac *, int);
434 static int	iwm_fill_probe_req(struct iwm_softc *,
435 		    struct iwm_scan_probe_req *);
436 static int	iwm_lmac_scan(struct iwm_softc *);
437 static int	iwm_config_umac_scan(struct iwm_softc *);
438 static int	iwm_umac_scan(struct iwm_softc *);
439 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
440 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
441 		    int *);
442 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
443 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
444 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
445 		    struct iwm_mac_data_sta *, int);
446 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
447 		    uint32_t, int);
448 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
449 static int	iwm_auth(struct iwm_softc *);
450 static int	iwm_assoc(struct iwm_softc *);
451 static void	iwm_calib_timeout(void *);
452 #ifndef IEEE80211_NO_HT
453 static void	iwm_setrates_task(void *);
454 static int	iwm_setrates(struct iwm_node *);
455 #endif
456 static int	iwm_media_change(struct ifnet *);
457 static int	iwm_do_newstate(struct ieee80211com *, enum ieee80211_state,
458 		    int);
459 static void	iwm_newstate_cb(struct work *, void *);
460 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
461 static void	iwm_endscan(struct iwm_softc *);
462 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
463 		    struct ieee80211_node *);
464 static int	iwm_sf_config(struct iwm_softc *, int);
465 static int	iwm_send_bt_init_conf(struct iwm_softc *);
466 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
467 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
468 static int	iwm_init_hw(struct iwm_softc *);
469 static int	iwm_init(struct ifnet *);
470 static void	iwm_start(struct ifnet *);
471 static void	iwm_stop(struct ifnet *, int);
472 static void	iwm_watchdog(struct ifnet *);
473 static int	iwm_ioctl(struct ifnet *, u_long, void *);
474 #ifdef IWM_DEBUG
475 static const char *iwm_desc_lookup(uint32_t);
476 static void	iwm_nic_error(struct iwm_softc *);
477 static void	iwm_nic_umac_error(struct iwm_softc *);
478 #endif
479 static void	iwm_notif_intr(struct iwm_softc *);
480 static int	iwm_intr(void *);
481 static void	iwm_softintr(void *);
482 static int	iwm_preinit(struct iwm_softc *);
483 static void	iwm_attach_hook(device_t);
484 static void	iwm_attach(device_t, device_t, void *);
485 static int	iwm_config_complete(struct iwm_softc *);
486 #if 0
487 static void	iwm_init_task(void *);
488 static int	iwm_activate(device_t, enum devact);
489 static void	iwm_wakeup(struct iwm_softc *);
490 #endif
491 static void	iwm_radiotap_attach(struct iwm_softc *);
492 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
493 
494 static int iwm_sysctl_root_num;
495 static int iwm_lar_disable;
496 
497 #ifndef	IWM_DEFAULT_MCC
498 #define	IWM_DEFAULT_MCC	"ZZ"
499 #endif
500 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
501 
502 static int
iwm_firmload(struct iwm_softc * sc)503 iwm_firmload(struct iwm_softc *sc)
504 {
505 	struct iwm_fw_info *fw = &sc->sc_fw;
506 	firmware_handle_t fwh;
507 	int err;
508 
509 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
510 		return 0;
511 
512 	/* Open firmware image. */
513 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
514 	if (err) {
515 		aprint_error_dev(sc->sc_dev,
516 		    "could not get firmware handle %s\n", sc->sc_fwname);
517 		return err;
518 	}
519 
520 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
521 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
522 		fw->fw_rawdata = NULL;
523 	}
524 
525 	fw->fw_rawsize = firmware_get_size(fwh);
526 	/*
527 	 * Well, this is how the Linux driver checks it ....
528 	 */
529 	if (fw->fw_rawsize < sizeof(uint32_t)) {
530 		aprint_error_dev(sc->sc_dev,
531 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
532 		err = EINVAL;
533 		goto out;
534 	}
535 
536 	/* Read the firmware. */
537 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
538 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
539 	if (err) {
540 		aprint_error_dev(sc->sc_dev,
541 		    "could not read firmware %s\n", sc->sc_fwname);
542 		goto out;
543 	}
544 
545 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
546  out:
547 	/* caller will release memory, if necessary */
548 
549 	firmware_close(fwh);
550 	return err;
551 }
552 
553 /*
554  * just maintaining status quo.
555  */
556 static void
iwm_fix_channel(struct iwm_softc * sc,struct mbuf * m)557 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
558 {
559 	struct ieee80211com *ic = &sc->sc_ic;
560 	struct ieee80211_frame *wh;
561 	uint8_t subtype;
562 
563 	wh = mtod(m, struct ieee80211_frame *);
564 
565 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
566 		return;
567 
568 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
569 
570 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
571 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
572 		return;
573 
574 	int chan = le32toh(sc->sc_last_phy_info.channel);
575 	if (chan < __arraycount(ic->ic_channels))
576 		ic->ic_curchan = &ic->ic_channels[chan];
577 }
578 
579 static int
iwm_store_cscheme(struct iwm_softc * sc,uint8_t * data,size_t dlen)580 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
581 {
582 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
583 
584 	if (dlen < sizeof(*l) ||
585 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
586 		return EINVAL;
587 
588 	/* we don't actually store anything for now, always use s/w crypto */
589 
590 	return 0;
591 }
592 
593 static int
iwm_firmware_store_section(struct iwm_softc * sc,enum iwm_ucode_type type,uint8_t * data,size_t dlen)594 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
595     uint8_t *data, size_t dlen)
596 {
597 	struct iwm_fw_sects *fws;
598 	struct iwm_fw_onesect *fwone;
599 
600 	if (type >= IWM_UCODE_TYPE_MAX)
601 		return EINVAL;
602 	if (dlen < sizeof(uint32_t))
603 		return EINVAL;
604 
605 	fws = &sc->sc_fw.fw_sects[type];
606 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
607 		return EINVAL;
608 
609 	fwone = &fws->fw_sect[fws->fw_count];
610 
611 	/* first 32bit are device load offset */
612 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
613 
614 	/* rest is data */
615 	fwone->fws_data = data + sizeof(uint32_t);
616 	fwone->fws_len = dlen - sizeof(uint32_t);
617 
618 	/* for freeing the buffer during driver unload */
619 	fwone->fws_alloc = data;
620 	fwone->fws_allocsize = dlen;
621 
622 	fws->fw_count++;
623 	fws->fw_totlen += fwone->fws_len;
624 
625 	return 0;
626 }
627 
628 struct iwm_tlv_calib_data {
629 	uint32_t ucode_type;
630 	struct iwm_tlv_calib_ctrl calib;
631 } __packed;
632 
633 static int
iwm_set_default_calib(struct iwm_softc * sc,const void * data)634 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
635 {
636 	const struct iwm_tlv_calib_data *def_calib = data;
637 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
638 
639 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
640 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
641 		    DEVNAME(sc), ucode_type));
642 		return EINVAL;
643 	}
644 
645 	sc->sc_default_calib[ucode_type].flow_trigger =
646 	    def_calib->calib.flow_trigger;
647 	sc->sc_default_calib[ucode_type].event_trigger =
648 	    def_calib->calib.event_trigger;
649 
650 	return 0;
651 }
652 
653 static int
iwm_read_firmware(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)654 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
655 {
656 	struct iwm_fw_info *fw = &sc->sc_fw;
657 	struct iwm_tlv_ucode_header *uhdr;
658 	struct iwm_ucode_tlv tlv;
659 	enum iwm_ucode_tlv_type tlv_type;
660 	uint8_t *data;
661 	int err, status;
662 	size_t len;
663 
664 	if (ucode_type != IWM_UCODE_TYPE_INIT &&
665 	    fw->fw_status == IWM_FW_STATUS_DONE)
666 		return 0;
667 
668 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
669 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
670 	} else {
671 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
672 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
673 	}
674 	status = fw->fw_status;
675 
676 	if (status == IWM_FW_STATUS_DONE)
677 		return 0;
678 
679 	err = iwm_firmload(sc);
680 	if (err) {
681 		aprint_error_dev(sc->sc_dev,
682 		    "could not read firmware %s (error %d)\n",
683 		    sc->sc_fwname, err);
684 		goto out;
685 	}
686 
687 	sc->sc_capaflags = 0;
688 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
689 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
690 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
691 
692 	uhdr = (void *)fw->fw_rawdata;
693 	if (*(uint32_t *)fw->fw_rawdata != 0
694 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
695 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
696 		    sc->sc_fwname);
697 		err = EINVAL;
698 		goto out;
699 	}
700 
701 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
702 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
703 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
704 	    IWM_UCODE_API(le32toh(uhdr->ver)));
705 	data = uhdr->data;
706 	len = fw->fw_rawsize - sizeof(*uhdr);
707 
708 	while (len >= sizeof(tlv)) {
709 		size_t tlv_len;
710 		void *tlv_data;
711 
712 		memcpy(&tlv, data, sizeof(tlv));
713 		tlv_len = le32toh(tlv.length);
714 		tlv_type = le32toh(tlv.type);
715 
716 		len -= sizeof(tlv);
717 		data += sizeof(tlv);
718 		tlv_data = data;
719 
720 		if (len < tlv_len) {
721 			aprint_error_dev(sc->sc_dev,
722 			    "firmware too short: %zu bytes\n", len);
723 			err = EINVAL;
724 			goto parse_out;
725 		}
726 
727 		switch (tlv_type) {
728 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
729 			if (tlv_len < sizeof(uint32_t)) {
730 				err = EINVAL;
731 				goto parse_out;
732 			}
733 			sc->sc_capa_max_probe_len
734 			    = le32toh(*(uint32_t *)tlv_data);
735 			/* limit it to something sensible */
736 			if (sc->sc_capa_max_probe_len >
737 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
738 				err = EINVAL;
739 				goto parse_out;
740 			}
741 			break;
742 		case IWM_UCODE_TLV_PAN:
743 			if (tlv_len) {
744 				err = EINVAL;
745 				goto parse_out;
746 			}
747 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
748 			break;
749 		case IWM_UCODE_TLV_FLAGS:
750 			if (tlv_len < sizeof(uint32_t)) {
751 				err = EINVAL;
752 				goto parse_out;
753 			}
754 			if (tlv_len % sizeof(uint32_t)) {
755 				err = EINVAL;
756 				goto parse_out;
757 			}
758 			/*
759 			 * Apparently there can be many flags, but Linux driver
760 			 * parses only the first one, and so do we.
761 			 *
762 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
763 			 * Intentional or a bug?  Observations from
764 			 * current firmware file:
765 			 *  1) TLV_PAN is parsed first
766 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
767 			 * ==> this resets TLV_PAN to itself... hnnnk
768 			 */
769 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
770 			break;
771 		case IWM_UCODE_TLV_CSCHEME:
772 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
773 			if (err)
774 				goto parse_out;
775 			break;
776 		case IWM_UCODE_TLV_NUM_OF_CPU: {
777 			uint32_t num_cpu;
778 			if (tlv_len != sizeof(uint32_t)) {
779 				err = EINVAL;
780 				goto parse_out;
781 			}
782 			num_cpu = le32toh(*(uint32_t *)tlv_data);
783 			if (num_cpu == 2) {
784 				fw->fw_sects[IWM_UCODE_TYPE_REGULAR].is_dual_cpus =
785 				    true;
786 				fw->fw_sects[IWM_UCODE_TYPE_INIT].is_dual_cpus =
787 				    true;
788 				fw->fw_sects[IWM_UCODE_TYPE_WOW].is_dual_cpus =
789 				    true;
790 			} else if (num_cpu < 1 || num_cpu > 2) {
791 				err = EINVAL;
792 				goto parse_out;
793 			}
794 			break;
795 		}
796 		case IWM_UCODE_TLV_SEC_RT:
797 			err = iwm_firmware_store_section(sc,
798 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
799 			if (err)
800 				goto parse_out;
801 			break;
802 		case IWM_UCODE_TLV_SEC_INIT:
803 			err = iwm_firmware_store_section(sc,
804 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
805 			if (err)
806 				goto parse_out;
807 			break;
808 		case IWM_UCODE_TLV_SEC_WOWLAN:
809 			err = iwm_firmware_store_section(sc,
810 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
811 			if (err)
812 				goto parse_out;
813 			break;
814 		case IWM_UCODE_TLV_DEF_CALIB:
815 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
816 				err = EINVAL;
817 				goto parse_out;
818 			}
819 			err = iwm_set_default_calib(sc, tlv_data);
820 			if (err)
821 				goto parse_out;
822 			break;
823 		case IWM_UCODE_TLV_PHY_SKU:
824 			if (tlv_len != sizeof(uint32_t)) {
825 				err = EINVAL;
826 				goto parse_out;
827 			}
828 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
829 			break;
830 
831 		case IWM_UCODE_TLV_API_CHANGES_SET: {
832 			struct iwm_ucode_api *api;
833 			uint32_t idx, bits;
834 			int i;
835 			if (tlv_len != sizeof(*api)) {
836 				err = EINVAL;
837 				goto parse_out;
838 			}
839 			api = (struct iwm_ucode_api *)tlv_data;
840 			idx = le32toh(api->api_index);
841 			bits = le32toh(api->api_flags);
842 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
843 				err = EINVAL;
844 				goto parse_out;
845 			}
846 			for (i = 0; i < 32; i++) {
847 				if (!ISSET(bits, __BIT(i)))
848 					continue;
849 				setbit(sc->sc_ucode_api, i + (32 * idx));
850 			}
851 			break;
852 		}
853 
854 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
855 			struct iwm_ucode_capa *capa;
856 			uint32_t idx, bits;
857 			int i;
858 			if (tlv_len != sizeof(*capa)) {
859 				err = EINVAL;
860 				goto parse_out;
861 			}
862 			capa = (struct iwm_ucode_capa *)tlv_data;
863 			idx = le32toh(capa->api_index);
864 			bits = le32toh(capa->api_capa);
865 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
866 				err = EINVAL;
867 				goto parse_out;
868 			}
869 			for (i = 0; i < 32; i++) {
870 				if (!ISSET(bits, __BIT(i)))
871 					continue;
872 				setbit(sc->sc_enabled_capa, i + (32 * idx));
873 			}
874 			break;
875 		}
876 
877 		case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
878 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
879 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
880 		case IWM_UCODE_TLV_FW_MEM_SEG:
881 			/* ignore, not used by current driver */
882 			break;
883 
884 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
885 			err = iwm_firmware_store_section(sc,
886 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
887 			    tlv_len);
888 			if (err)
889 				goto parse_out;
890 			break;
891 
892 		case IWM_UCODE_TLV_PAGING: {
893 			uint32_t paging_mem_size;
894 			if (tlv_len != sizeof(paging_mem_size)) {
895 				err = EINVAL;
896 				goto parse_out;
897 			}
898 			paging_mem_size = le32toh(*(uint32_t *)tlv_data);
899 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
900 				err = EINVAL;
901 				goto parse_out;
902 			}
903 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
904 				err = EINVAL;
905 				goto parse_out;
906 			}
907 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
908 			    paging_mem_size;
909 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR_USNIFFER].paging_mem_size =
910 			    paging_mem_size;
911 			break;
912 		}
913 
914 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
915 			if (tlv_len != sizeof(uint32_t)) {
916 				err = EINVAL;
917 				goto parse_out;
918 			}
919 			sc->sc_capa_n_scan_channels =
920 			  le32toh(*(uint32_t *)tlv_data);
921 			break;
922 
923 		case IWM_UCODE_TLV_FW_VERSION:
924 			if (tlv_len != sizeof(uint32_t) * 3) {
925 				err = EINVAL;
926 				goto parse_out;
927 			}
928 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
929 			    "%d.%d.%d",
930 			    le32toh(((uint32_t *)tlv_data)[0]),
931 			    le32toh(((uint32_t *)tlv_data)[1]),
932 			    le32toh(((uint32_t *)tlv_data)[2]));
933 			break;
934 
935 		default:
936 			DPRINTF(("%s: unknown firmware section %d, abort\n",
937 			    DEVNAME(sc), tlv_type));
938 			err = EINVAL;
939 			goto parse_out;
940 		}
941 
942 		len -= roundup(tlv_len, 4);
943 		data += roundup(tlv_len, 4);
944 	}
945 
946 	KASSERT(err == 0);
947 
948  parse_out:
949 	if (err) {
950 		aprint_error_dev(sc->sc_dev,
951 		    "firmware parse error, section type %d\n", tlv_type);
952 	}
953 
954 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
955 		aprint_error_dev(sc->sc_dev,
956 		    "device uses unsupported power ops\n");
957 		err = ENOTSUP;
958 	}
959 
960  out:
961 	if (err)
962 		fw->fw_status = IWM_FW_STATUS_NONE;
963 	else
964 		fw->fw_status = IWM_FW_STATUS_DONE;
965 	wakeup(&sc->sc_fw);
966 
967 	if (err && fw->fw_rawdata != NULL) {
968 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
969 		fw->fw_rawdata = NULL;
970 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
971 		/* don't touch fw->fw_status */
972 		memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
973 	}
974 	return err;
975 }
976 
977 static uint32_t
iwm_read_prph(struct iwm_softc * sc,uint32_t addr)978 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
979 {
980 	IWM_WRITE(sc,
981 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
982 	IWM_BARRIER_READ_WRITE(sc);
983 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
984 }
985 
986 static void
iwm_write_prph(struct iwm_softc * sc,uint32_t addr,uint32_t val)987 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
988 {
989 	IWM_WRITE(sc,
990 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
991 	IWM_BARRIER_WRITE(sc);
992 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
993 }
994 
995 #ifdef IWM_DEBUG
996 static int
iwm_read_mem(struct iwm_softc * sc,uint32_t addr,void * buf,int dwords)997 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
998 {
999 	int offs;
1000 	uint32_t *vals = buf;
1001 
1002 	if (iwm_nic_lock(sc)) {
1003 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1004 		for (offs = 0; offs < dwords; offs++)
1005 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1006 		iwm_nic_unlock(sc);
1007 		return 0;
1008 	}
1009 	return EBUSY;
1010 }
1011 #endif
1012 
1013 static int
iwm_write_mem(struct iwm_softc * sc,uint32_t addr,const void * buf,int dwords)1014 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1015 {
1016 	int offs;
1017 	const uint32_t *vals = buf;
1018 
1019 	if (iwm_nic_lock(sc)) {
1020 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1021 		/* WADDR auto-increments */
1022 		for (offs = 0; offs < dwords; offs++) {
1023 			uint32_t val = vals ? vals[offs] : 0;
1024 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1025 		}
1026 		iwm_nic_unlock(sc);
1027 		return 0;
1028 	}
1029 	return EBUSY;
1030 }
1031 
1032 static int
iwm_write_mem32(struct iwm_softc * sc,uint32_t addr,uint32_t val)1033 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1034 {
1035 	return iwm_write_mem(sc, addr, &val, 1);
1036 }
1037 
1038 static int
iwm_poll_bit(struct iwm_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1039 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1040     int timo)
1041 {
1042 	for (;;) {
1043 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1044 			return 1;
1045 		}
1046 		if (timo < 10) {
1047 			return 0;
1048 		}
1049 		timo -= 10;
1050 		DELAY(10);
1051 	}
1052 }
1053 
1054 static int
iwm_nic_lock(struct iwm_softc * sc)1055 iwm_nic_lock(struct iwm_softc *sc)
1056 {
1057 	int rv = 0;
1058 
1059 	if (sc->sc_cmd_hold_nic_awake)
1060 		return 1;
1061 
1062 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1063 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1064 
1065 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1066 		DELAY(2);
1067 
1068 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1069 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1070 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1071 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1072 		rv = 1;
1073 	} else {
1074 		DPRINTF(("%s: resetting device via NMI\n", DEVNAME(sc)));
1075 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1076 	}
1077 
1078 	return rv;
1079 }
1080 
1081 static void
iwm_nic_unlock(struct iwm_softc * sc)1082 iwm_nic_unlock(struct iwm_softc *sc)
1083 {
1084 
1085 	if (sc->sc_cmd_hold_nic_awake)
1086 		return;
1087 
1088 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1089 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1090 }
1091 
1092 static void
iwm_set_bits_mask_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1093 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1094     uint32_t mask)
1095 {
1096 	uint32_t val;
1097 
1098 	/* XXX: no error path? */
1099 	if (iwm_nic_lock(sc)) {
1100 		val = iwm_read_prph(sc, reg) & mask;
1101 		val |= bits;
1102 		iwm_write_prph(sc, reg, val);
1103 		iwm_nic_unlock(sc);
1104 	}
1105 }
1106 
1107 static void
iwm_set_bits_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits)1108 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1109 {
1110 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1111 }
1112 
1113 static void
iwm_clear_bits_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits)1114 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1115 {
1116 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1117 }
1118 
1119 static int
iwm_dma_contig_alloc(bus_dma_tag_t tag,struct iwm_dma_info * dma,bus_size_t size,bus_size_t alignment)1120 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1121     bus_size_t size, bus_size_t alignment)
1122 {
1123 	int nsegs, err;
1124 	void *va;
1125 
1126 	dma->tag = tag;
1127 	dma->size = size;
1128 
1129 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1130 	    &dma->map);
1131 	if (err)
1132 		goto fail;
1133 
1134 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1135 	    BUS_DMA_NOWAIT);
1136 	if (err)
1137 		goto fail;
1138 
1139 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1140 	if (err)
1141 		goto fail;
1142 	dma->vaddr = va;
1143 
1144 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1145 	    BUS_DMA_NOWAIT);
1146 	if (err)
1147 		goto fail;
1148 
1149 	memset(dma->vaddr, 0, size);
1150 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1151 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1152 
1153 	return 0;
1154 
1155 fail:	iwm_dma_contig_free(dma);
1156 	return err;
1157 }
1158 
1159 static void
iwm_dma_contig_free(struct iwm_dma_info * dma)1160 iwm_dma_contig_free(struct iwm_dma_info *dma)
1161 {
1162 	if (dma->map != NULL) {
1163 		if (dma->vaddr != NULL) {
1164 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1165 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1166 			bus_dmamap_unload(dma->tag, dma->map);
1167 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1168 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1169 			dma->vaddr = NULL;
1170 		}
1171 		bus_dmamap_destroy(dma->tag, dma->map);
1172 		dma->map = NULL;
1173 	}
1174 }
1175 
1176 static int
iwm_alloc_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1177 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1178 {
1179 	bus_size_t size;
1180 	int i, err;
1181 
1182 	ring->cur = 0;
1183 
1184 	/* Allocate RX descriptors (256-byte aligned). */
1185 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1186 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1187 	if (err) {
1188 		aprint_error_dev(sc->sc_dev,
1189 		    "could not allocate RX ring DMA memory\n");
1190 		goto fail;
1191 	}
1192 	ring->desc = ring->desc_dma.vaddr;
1193 
1194 	/* Allocate RX status area (16-byte aligned). */
1195 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1196 	    sizeof(*ring->stat), 16);
1197 	if (err) {
1198 		aprint_error_dev(sc->sc_dev,
1199 		    "could not allocate RX status DMA memory\n");
1200 		goto fail;
1201 	}
1202 	ring->stat = ring->stat_dma.vaddr;
1203 
1204 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1205 		struct iwm_rx_data *data = &ring->data[i];
1206 
1207 		memset(data, 0, sizeof(*data));
1208 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1209 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1210 		    &data->map);
1211 		if (err) {
1212 			aprint_error_dev(sc->sc_dev,
1213 			    "could not create RX buf DMA map\n");
1214 			goto fail;
1215 		}
1216 
1217 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1218 		if (err)
1219 			goto fail;
1220 	}
1221 	return 0;
1222 
1223 fail:	iwm_free_rx_ring(sc, ring);
1224 	return err;
1225 }
1226 
1227 static void
iwm_disable_rx_dma(struct iwm_softc * sc)1228 iwm_disable_rx_dma(struct iwm_softc *sc)
1229 {
1230 	int ntries;
1231 
1232 	if (iwm_nic_lock(sc)) {
1233 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1234 		for (ntries = 0; ntries < 1000; ntries++) {
1235 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1236 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1237 				break;
1238 			DELAY(10);
1239 		}
1240 		iwm_nic_unlock(sc);
1241 	}
1242 }
1243 
1244 void
iwm_reset_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1245 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1246 {
1247 	ring->cur = 0;
1248 	memset(ring->stat, 0, sizeof(*ring->stat));
1249 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1250 	    ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1251 }
1252 
1253 static void
iwm_free_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1254 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1255 {
1256 	int i;
1257 
1258 	iwm_dma_contig_free(&ring->desc_dma);
1259 	iwm_dma_contig_free(&ring->stat_dma);
1260 
1261 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1262 		struct iwm_rx_data *data = &ring->data[i];
1263 
1264 		if (data->m != NULL) {
1265 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1266 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1267 			bus_dmamap_unload(sc->sc_dmat, data->map);
1268 			m_freem(data->m);
1269 			data->m = NULL;
1270 		}
1271 		if (data->map != NULL) {
1272 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1273 			data->map = NULL;
1274 		}
1275 	}
1276 }
1277 
1278 static int
iwm_alloc_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring,int qid)1279 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1280 {
1281 	bus_addr_t paddr;
1282 	bus_size_t size;
1283 	int i, err, nsegs;
1284 
1285 	ring->qid = qid;
1286 	ring->queued = 0;
1287 	ring->cur = 0;
1288 
1289 	/* Allocate TX descriptors (256-byte aligned). */
1290 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1291 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1292 	if (err) {
1293 		aprint_error_dev(sc->sc_dev,
1294 		    "could not allocate TX ring DMA memory\n");
1295 		goto fail;
1296 	}
1297 	ring->desc = ring->desc_dma.vaddr;
1298 
1299 	/*
1300 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1301 	 * to allocate commands space for other rings.
1302 	 */
1303 	if (qid > IWM_CMD_QUEUE)
1304 		return 0;
1305 
1306 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1307 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1308 	if (err) {
1309 		aprint_error_dev(sc->sc_dev,
1310 		    "could not allocate TX cmd DMA memory\n");
1311 		goto fail;
1312 	}
1313 	ring->cmd = ring->cmd_dma.vaddr;
1314 
1315 	paddr = ring->cmd_dma.paddr;
1316 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1317 		struct iwm_tx_data *data = &ring->data[i];
1318 		size_t mapsize;
1319 
1320 		data->cmd_paddr = paddr;
1321 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1322 		    + offsetof(struct iwm_tx_cmd, scratch);
1323 		paddr += sizeof(struct iwm_device_cmd);
1324 
1325 		/* FW commands may require more mapped space than packets. */
1326 		if (qid == IWM_CMD_QUEUE) {
1327 			mapsize = IWM_RBUF_SIZE;
1328 			nsegs = 1;
1329 		} else {
1330 			mapsize = MCLBYTES;
1331 			nsegs = IWM_NUM_OF_TBS - 2;
1332 		}
1333 		err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1334 		    0, BUS_DMA_NOWAIT, &data->map);
1335 		if (err) {
1336 			aprint_error_dev(sc->sc_dev,
1337 			    "could not create TX buf DMA map\n");
1338 			goto fail;
1339 		}
1340 	}
1341 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1342 	return 0;
1343 
1344 fail:	iwm_free_tx_ring(sc, ring);
1345 	return err;
1346 }
1347 
1348 static void
iwm_clear_cmd_in_flight(struct iwm_softc * sc)1349 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1350 {
1351 
1352 	if (!sc->apmg_wake_up_wa)
1353 		return;
1354 
1355 	if (!sc->sc_cmd_hold_nic_awake) {
1356 		aprint_error_dev(sc->sc_dev,
1357 		    "cmd_hold_nic_awake not set\n");
1358 		return;
1359 	}
1360 
1361 	sc->sc_cmd_hold_nic_awake = 0;
1362 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1363 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1364 }
1365 
1366 static int
iwm_set_cmd_in_flight(struct iwm_softc * sc)1367 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1368 {
1369 	int ret;
1370 
1371 	/*
1372 	 * wake up the NIC to make sure that the firmware will see the host
1373 	 * command - we will let the NIC sleep once all the host commands
1374 	 * returned. This needs to be done only on NICs that have
1375 	 * apmg_wake_up_wa set.
1376 	 */
1377 	if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1378 
1379 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1380 		    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1381 
1382 		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1383 		    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1384 		    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1385 		     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1386 		    15000);
1387 		if (ret == 0) {
1388 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1389 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1390 			aprint_error_dev(sc->sc_dev,
1391 			    "failed to wake NIC for hcmd\n");
1392 			return EIO;
1393 		}
1394 		sc->sc_cmd_hold_nic_awake = 1;
1395 	}
1396 
1397 	return 0;
1398 }
1399 static void
iwm_reset_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1400 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1401 {
1402 	int i;
1403 
1404 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1405 		struct iwm_tx_data *data = &ring->data[i];
1406 
1407 		if (data->m != NULL) {
1408 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1409 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1410 			bus_dmamap_unload(sc->sc_dmat, data->map);
1411 			m_freem(data->m);
1412 			data->m = NULL;
1413 		}
1414 	}
1415 	/* Clear TX descriptors. */
1416 	memset(ring->desc, 0, ring->desc_dma.size);
1417 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1418 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1419 	sc->qfullmsk &= ~(1 << ring->qid);
1420 	ring->queued = 0;
1421 	ring->cur = 0;
1422 
1423 	if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1424 		iwm_clear_cmd_in_flight(sc);
1425 }
1426 
1427 static void
iwm_free_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1428 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1429 {
1430 	int i;
1431 
1432 	iwm_dma_contig_free(&ring->desc_dma);
1433 	iwm_dma_contig_free(&ring->cmd_dma);
1434 
1435 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1436 		struct iwm_tx_data *data = &ring->data[i];
1437 
1438 		if (data->m != NULL) {
1439 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1440 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1441 			bus_dmamap_unload(sc->sc_dmat, data->map);
1442 			m_freem(data->m);
1443 			data->m = NULL;
1444 		}
1445 		if (data->map != NULL) {
1446 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1447 			data->map = NULL;
1448 		}
1449 	}
1450 }
1451 
1452 static void
iwm_enable_rfkill_int(struct iwm_softc * sc)1453 iwm_enable_rfkill_int(struct iwm_softc *sc)
1454 {
1455 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1456 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1457 }
1458 
1459 static int
iwm_check_rfkill(struct iwm_softc * sc)1460 iwm_check_rfkill(struct iwm_softc *sc)
1461 {
1462 	uint32_t v;
1463 	int s;
1464 	int rv;
1465 
1466 	s = splnet();
1467 
1468 	/*
1469 	 * "documentation" is not really helpful here:
1470 	 *  27:	HW_RF_KILL_SW
1471 	 *	Indicates state of (platform's) hardware RF-Kill switch
1472 	 *
1473 	 * But apparently when it's off, it's on ...
1474 	 */
1475 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1476 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1477 	if (rv) {
1478 		sc->sc_flags |= IWM_FLAG_RFKILL;
1479 	} else {
1480 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1481 	}
1482 
1483 	splx(s);
1484 	return rv;
1485 }
1486 
1487 static void
iwm_enable_interrupts(struct iwm_softc * sc)1488 iwm_enable_interrupts(struct iwm_softc *sc)
1489 {
1490 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1491 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1492 }
1493 
1494 static void
iwm_restore_interrupts(struct iwm_softc * sc)1495 iwm_restore_interrupts(struct iwm_softc *sc)
1496 {
1497 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1498 }
1499 
1500 static void
iwm_disable_interrupts(struct iwm_softc * sc)1501 iwm_disable_interrupts(struct iwm_softc *sc)
1502 {
1503 	int s = splnet();
1504 
1505 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1506 
1507 	/* acknowledge all interrupts */
1508 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1509 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1510 
1511 	splx(s);
1512 }
1513 
1514 static void
iwm_ict_reset(struct iwm_softc * sc)1515 iwm_ict_reset(struct iwm_softc *sc)
1516 {
1517 	iwm_disable_interrupts(sc);
1518 
1519 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1520 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1521 	    BUS_DMASYNC_PREWRITE);
1522 	sc->ict_cur = 0;
1523 
1524 	/* Set physical address of ICT (4KB aligned). */
1525 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1526 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1527 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1528 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1529 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1530 
1531 	/* Switch to ICT interrupt mode in driver. */
1532 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1533 
1534 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1535 	iwm_enable_interrupts(sc);
1536 }
1537 
1538 #define IWM_HW_READY_TIMEOUT 50
1539 static int
iwm_set_hw_ready(struct iwm_softc * sc)1540 iwm_set_hw_ready(struct iwm_softc *sc)
1541 {
1542 	int ready;
1543 
1544 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1545 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1546 
1547 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1548 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1549 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1550 	    IWM_HW_READY_TIMEOUT);
1551 	if (ready)
1552 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1553 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1554 
1555 	return ready;
1556 }
1557 #undef IWM_HW_READY_TIMEOUT
1558 
1559 static int
iwm_prepare_card_hw(struct iwm_softc * sc)1560 iwm_prepare_card_hw(struct iwm_softc *sc)
1561 {
1562 	int t = 0;
1563 
1564 	if (iwm_set_hw_ready(sc))
1565 		return 0;
1566 
1567 	DELAY(100);
1568 
1569 	/* If HW is not ready, prepare the conditions to check again */
1570 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1571 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1572 
1573 	do {
1574 		if (iwm_set_hw_ready(sc))
1575 			return 0;
1576 		DELAY(200);
1577 		t += 200;
1578 	} while (t < 150000);
1579 
1580 	return ETIMEDOUT;
1581 }
1582 
1583 static void
iwm_apm_config(struct iwm_softc * sc)1584 iwm_apm_config(struct iwm_softc *sc)
1585 {
1586 	pcireg_t reg;
1587 
1588 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1589 	    sc->sc_cap_off + PCIE_LCSR);
1590 	if (reg & PCIE_LCSR_ASPM_L1) {
1591 		/* Um the Linux driver prints "Disabling L0S for this one ... */
1592 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1593 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1594 	} else {
1595 		/* ... and "Enabling" here */
1596 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1597 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1598 	}
1599 }
1600 
1601 /*
1602  * Start up NIC's basic functionality after it has been reset
1603  * e.g. after platform boot or shutdown.
1604  * NOTE:  This does not load uCode nor start the embedded processor
1605  */
1606 static int
iwm_apm_init(struct iwm_softc * sc)1607 iwm_apm_init(struct iwm_softc *sc)
1608 {
1609 	int err = 0;
1610 
1611 	/* Disable L0S exit timer (platform NMI workaround) */
1612 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1613 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1614 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1615 	}
1616 
1617 	/*
1618 	 * Disable L0s without affecting L1;
1619 	 *  don't wait for ICH L0s (ICH bug W/A)
1620 	 */
1621 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1622 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1623 
1624 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1625 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1626 
1627 	/*
1628 	 * Enable HAP INTA (interrupt from management bus) to
1629 	 * wake device's PCI Express link L1a -> L0s
1630 	 */
1631 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1632 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1633 
1634 	iwm_apm_config(sc);
1635 
1636 #if 0 /* not for 7k/8k */
1637 	/* Configure analog phase-lock-loop before activating to D0A */
1638 	if (trans->cfg->base_params->pll_cfg_val)
1639 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1640 		    trans->cfg->base_params->pll_cfg_val);
1641 #endif
1642 
1643 	/*
1644 	 * Set "initialization complete" bit to move adapter from
1645 	 * D0U* --> D0A* (powered-up active) state.
1646 	 */
1647 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1648 
1649 	/*
1650 	 * Wait for clock stabilization; once stabilized, access to
1651 	 * device-internal resources is supported, e.g. iwm_write_prph()
1652 	 * and accesses to uCode SRAM.
1653 	 */
1654 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1655 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1656 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1657 		aprint_error_dev(sc->sc_dev,
1658 		    "timeout waiting for clock stabilization\n");
1659 		err = ETIMEDOUT;
1660 		goto out;
1661 	}
1662 
1663 	if (sc->host_interrupt_operation_mode) {
1664 		/*
1665 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1666 		 * only check host_interrupt_operation_mode even if this is
1667 		 * not related to host_interrupt_operation_mode.
1668 		 *
1669 		 * Enable the oscillator to count wake up time for L1 exit. This
1670 		 * consumes slightly more power (100uA) - but allows to be sure
1671 		 * that we wake up from L1 on time.
1672 		 *
1673 		 * This looks weird: read twice the same register, discard the
1674 		 * value, set a bit, and yet again, read that same register
1675 		 * just to discard the value. But that's the way the hardware
1676 		 * seems to like it.
1677 		 */
1678 		iwm_read_prph(sc, IWM_OSC_CLK);
1679 		iwm_read_prph(sc, IWM_OSC_CLK);
1680 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1681 		iwm_read_prph(sc, IWM_OSC_CLK);
1682 		iwm_read_prph(sc, IWM_OSC_CLK);
1683 	}
1684 
1685 	/*
1686 	 * Enable DMA clock and wait for it to stabilize.
1687 	 *
1688 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1689 	 * do not disable clocks.  This preserves any hardware bits already
1690 	 * set by default in "CLK_CTRL_REG" after reset.
1691 	 */
1692 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1693 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1694 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1695 		DELAY(20);
1696 
1697 		/* Disable L1-Active */
1698 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1699 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1700 
1701 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1702 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1703 		    IWM_APMG_RTC_INT_STT_RFKILL);
1704 	}
1705  out:
1706 	if (err)
1707 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1708 	return err;
1709 }
1710 
1711 static void
iwm_apm_stop(struct iwm_softc * sc)1712 iwm_apm_stop(struct iwm_softc *sc)
1713 {
1714 	/* stop device's busmaster DMA activity */
1715 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1716 
1717 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1718 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1719 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1720 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1721 	DPRINTF(("iwm apm stop\n"));
1722 }
1723 
1724 static int
iwm_start_hw(struct iwm_softc * sc)1725 iwm_start_hw(struct iwm_softc *sc)
1726 {
1727 	int err;
1728 
1729 	err = iwm_prepare_card_hw(sc);
1730 	if (err)
1731 		return err;
1732 
1733 	/* Reset the entire device */
1734 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1735 	DELAY(10);
1736 
1737 	err = iwm_apm_init(sc);
1738 	if (err)
1739 		return err;
1740 
1741 	iwm_enable_rfkill_int(sc);
1742 	iwm_check_rfkill(sc);
1743 
1744 	return 0;
1745 }
1746 
1747 static void
iwm_stop_device(struct iwm_softc * sc)1748 iwm_stop_device(struct iwm_softc *sc)
1749 {
1750 	int chnl, ntries;
1751 	int qid;
1752 
1753 	iwm_disable_interrupts(sc);
1754 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1755 
1756 	/* Deactivate TX scheduler. */
1757 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1758 
1759 	/* Stop all DMA channels. */
1760 	if (iwm_nic_lock(sc)) {
1761 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1762 			IWM_WRITE(sc,
1763 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1764 			for (ntries = 0; ntries < 200; ntries++) {
1765 				uint32_t r;
1766 
1767 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1768 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1769 				    chnl))
1770 					break;
1771 				DELAY(20);
1772 			}
1773 		}
1774 		iwm_nic_unlock(sc);
1775 	}
1776 	iwm_disable_rx_dma(sc);
1777 
1778 	iwm_reset_rx_ring(sc, &sc->rxq);
1779 
1780 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
1781 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1782 
1783 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1784 		/* Power-down device's busmaster DMA clocks */
1785 		if (iwm_nic_lock(sc)) {
1786 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1787 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1788 			DELAY(5);
1789 			iwm_nic_unlock(sc);
1790 		}
1791 	}
1792 
1793 	/* Make sure (redundant) we've released our request to stay awake */
1794 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1795 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1796 
1797 	/* Stop the device, and put it in low power state */
1798 	iwm_apm_stop(sc);
1799 
1800 	/*
1801 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1802 	 * Clean again the interrupt here
1803 	 */
1804 	iwm_disable_interrupts(sc);
1805 
1806 	/* Reset the on-board processor. */
1807 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1808 
1809 	/* Even though we stop the HW we still want the RF kill interrupt. */
1810 	iwm_enable_rfkill_int(sc);
1811 	iwm_check_rfkill(sc);
1812 }
1813 
1814 static void
iwm_nic_config(struct iwm_softc * sc)1815 iwm_nic_config(struct iwm_softc *sc)
1816 {
1817 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1818 	uint32_t reg_val = 0;
1819 
1820 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1821 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1822 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1823 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1824 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1825 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1826 
1827 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1828 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1829 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1830 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1831 
1832 	/* radio configuration */
1833 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1834 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1835 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1836 
1837 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1838 
1839 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1840 	    radio_cfg_step, radio_cfg_dash));
1841 
1842 	/*
1843 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1844 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1845 	 * to lose ownership and not being able to obtain it back.
1846 	 */
1847 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1848 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1849 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1850 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1851 	}
1852 }
1853 
1854 static int
iwm_nic_rx_init(struct iwm_softc * sc)1855 iwm_nic_rx_init(struct iwm_softc *sc)
1856 {
1857 	if (!iwm_nic_lock(sc))
1858 		return EBUSY;
1859 
1860 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1861 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1862 	    0, sc->rxq.stat_dma.size,
1863 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1864 
1865 	iwm_disable_rx_dma(sc);
1866 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1867 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1868 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1869 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1870 
1871 	/* Set physical address of RX ring (256-byte aligned). */
1872 	IWM_WRITE(sc,
1873 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1874 
1875 	/* Set physical address of RX status (16-byte aligned). */
1876 	IWM_WRITE(sc,
1877 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1878 
1879 	/* Enable RX. */
1880 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1881 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1882 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1883 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1884 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1885 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1886 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1887 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1888 
1889 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1890 
1891 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1892 	if (sc->host_interrupt_operation_mode)
1893 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1894 
1895 	/*
1896 	 * This value should initially be 0 (before preparing any RBs),
1897 	 * and should be 8 after preparing the first 8 RBs (for example).
1898 	 */
1899 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1900 
1901 	iwm_nic_unlock(sc);
1902 
1903 	return 0;
1904 }
1905 
1906 static int
iwm_nic_tx_init(struct iwm_softc * sc)1907 iwm_nic_tx_init(struct iwm_softc *sc)
1908 {
1909 	int qid;
1910 
1911 	if (!iwm_nic_lock(sc))
1912 		return EBUSY;
1913 
1914 	/* Deactivate TX scheduler. */
1915 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1916 
1917 	/* Set physical address of "keep warm" page (16-byte aligned). */
1918 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1919 
1920 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1921 		struct iwm_tx_ring *txq = &sc->txq[qid];
1922 
1923 		/* Set physical address of TX ring (256-byte aligned). */
1924 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1925 		    txq->desc_dma.paddr >> 8);
1926 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1927 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1928 	}
1929 
1930 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1931 
1932 	iwm_nic_unlock(sc);
1933 
1934 	return 0;
1935 }
1936 
1937 static int
iwm_nic_init(struct iwm_softc * sc)1938 iwm_nic_init(struct iwm_softc *sc)
1939 {
1940 	int err;
1941 
1942 	iwm_apm_init(sc);
1943 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1944 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1945 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1946 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1947 	}
1948 
1949 	iwm_nic_config(sc);
1950 
1951 	err = iwm_nic_rx_init(sc);
1952 	if (err)
1953 		return err;
1954 
1955 	err = iwm_nic_tx_init(sc);
1956 	if (err)
1957 		return err;
1958 
1959 	DPRINTF(("shadow registers enabled\n"));
1960 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1961 
1962 	return 0;
1963 }
1964 
1965 static const uint8_t iwm_ac_to_tx_fifo[] = {
1966 	IWM_TX_FIFO_VO,
1967 	IWM_TX_FIFO_VI,
1968 	IWM_TX_FIFO_BE,
1969 	IWM_TX_FIFO_BK,
1970 };
1971 
1972 static int
iwm_enable_txq(struct iwm_softc * sc,int sta_id,int qid,int fifo)1973 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1974 {
1975 	if (!iwm_nic_lock(sc)) {
1976 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1977 		return EBUSY;
1978 	}
1979 
1980 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1981 
1982 	if (qid == IWM_CMD_QUEUE) {
1983 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1984 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1985 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1986 
1987 		iwm_nic_unlock(sc);
1988 
1989 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1990 
1991 		if (!iwm_nic_lock(sc))
1992 			return EBUSY;
1993 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1994 		iwm_nic_unlock(sc);
1995 
1996 		iwm_write_mem32(sc,
1997 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1998 
1999 		/* Set scheduler window size and frame limit. */
2000 		iwm_write_mem32(sc,
2001 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2002 		    sizeof(uint32_t),
2003 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2004 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2005 		    ((IWM_FRAME_LIMIT
2006 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2007 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2008 
2009 		if (!iwm_nic_lock(sc))
2010 			return EBUSY;
2011 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2012 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2013 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2014 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2015 		    IWM_SCD_QUEUE_STTS_REG_MSK);
2016 	} else {
2017 		struct iwm_scd_txq_cfg_cmd cmd;
2018 		int err;
2019 
2020 		iwm_nic_unlock(sc);
2021 
2022 		memset(&cmd, 0, sizeof(cmd));
2023 		cmd.scd_queue = qid;
2024 		cmd.enable = 1;
2025 		cmd.sta_id = sta_id;
2026 		cmd.tx_fifo = fifo;
2027 		cmd.aggregate = 0;
2028 		cmd.window = IWM_FRAME_LIMIT;
2029 
2030 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
2031 		    &cmd);
2032 		if (err)
2033 			return err;
2034 
2035 		if (!iwm_nic_lock(sc))
2036 			return EBUSY;
2037 	}
2038 
2039 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2040 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2041 
2042 	iwm_nic_unlock(sc);
2043 
2044 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2045 
2046 	return 0;
2047 }
2048 
2049 static int
iwm_post_alive(struct iwm_softc * sc)2050 iwm_post_alive(struct iwm_softc *sc)
2051 {
2052 	int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2053 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2054 	int err, chnl;
2055 	uint32_t base;
2056 
2057 	if (!iwm_nic_lock(sc))
2058 		return EBUSY;
2059 
2060 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2061 	if (sc->sched_base != base) {
2062 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2063 		    DEVNAME(sc), sc->sched_base, base));
2064 		sc->sched_base = base;
2065 	}
2066 
2067 	iwm_nic_unlock(sc);
2068 
2069 	iwm_ict_reset(sc);
2070 
2071 	/* Clear TX scheduler state in SRAM. */
2072 	err = iwm_write_mem(sc,
2073 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2074 	if (err)
2075 		return err;
2076 
2077 	if (!iwm_nic_lock(sc))
2078 		return EBUSY;
2079 
2080 	/* Set physical address of TX scheduler rings (1KB aligned). */
2081 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2082 
2083 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2084 
2085 	iwm_nic_unlock(sc);
2086 
2087 	/* enable command channel */
2088 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2089 	if (err)
2090 		return err;
2091 
2092 	if (!iwm_nic_lock(sc))
2093 		return EBUSY;
2094 
2095 	/* Activate TX scheduler. */
2096 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2097 
2098 	/* Enable DMA channels. */
2099 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2100 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2101 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2102 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2103 	}
2104 
2105 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2106 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2107 
2108 	/* Enable L1-Active */
2109 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2110 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2111 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2112 	}
2113 
2114 	iwm_nic_unlock(sc);
2115 
2116 	return 0;
2117 }
2118 
2119 static struct iwm_phy_db_entry *
iwm_phy_db_get_section(struct iwm_softc * sc,enum iwm_phy_db_section_type type,uint16_t chg_id)2120 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2121     uint16_t chg_id)
2122 {
2123 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2124 
2125 	if (type >= IWM_PHY_DB_MAX)
2126 		return NULL;
2127 
2128 	switch (type) {
2129 	case IWM_PHY_DB_CFG:
2130 		return &phy_db->cfg;
2131 	case IWM_PHY_DB_CALIB_NCH:
2132 		return &phy_db->calib_nch;
2133 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2134 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2135 			return NULL;
2136 		return &phy_db->calib_ch_group_papd[chg_id];
2137 	case IWM_PHY_DB_CALIB_CHG_TXP:
2138 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2139 			return NULL;
2140 		return &phy_db->calib_ch_group_txp[chg_id];
2141 	default:
2142 		return NULL;
2143 	}
2144 	return NULL;
2145 }
2146 
2147 static int
iwm_phy_db_set_section(struct iwm_softc * sc,struct iwm_calib_res_notif_phy_db * phy_db_notif,uint16_t size)2148 iwm_phy_db_set_section(struct iwm_softc *sc,
2149     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2150 {
2151 	struct iwm_phy_db_entry *entry;
2152 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2153 	uint16_t chg_id = 0;
2154 
2155 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2156 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2157 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2158 
2159 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2160 	if (!entry)
2161 		return EINVAL;
2162 
2163 	if (entry->data)
2164 		kmem_intr_free(entry->data, entry->size);
2165 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2166 	if (!entry->data) {
2167 		entry->size = 0;
2168 		return ENOMEM;
2169 	}
2170 	memcpy(entry->data, phy_db_notif->data, size);
2171 	entry->size = size;
2172 
2173 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2174 	    __func__, __LINE__, type, size, entry->data));
2175 
2176 	return 0;
2177 }
2178 
2179 static int
iwm_is_valid_channel(uint16_t ch_id)2180 iwm_is_valid_channel(uint16_t ch_id)
2181 {
2182 	if (ch_id <= 14 ||
2183 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2184 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2185 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2186 		return 1;
2187 	return 0;
2188 }
2189 
2190 static uint8_t
iwm_ch_id_to_ch_index(uint16_t ch_id)2191 iwm_ch_id_to_ch_index(uint16_t ch_id)
2192 {
2193 	if (!iwm_is_valid_channel(ch_id))
2194 		return 0xff;
2195 
2196 	if (ch_id <= 14)
2197 		return ch_id - 1;
2198 	if (ch_id <= 64)
2199 		return (ch_id + 20) / 4;
2200 	if (ch_id <= 140)
2201 		return (ch_id - 12) / 4;
2202 	return (ch_id - 13) / 4;
2203 }
2204 
2205 
2206 static uint16_t
iwm_channel_id_to_papd(uint16_t ch_id)2207 iwm_channel_id_to_papd(uint16_t ch_id)
2208 {
2209 	if (!iwm_is_valid_channel(ch_id))
2210 		return 0xff;
2211 
2212 	if (1 <= ch_id && ch_id <= 14)
2213 		return 0;
2214 	if (36 <= ch_id && ch_id <= 64)
2215 		return 1;
2216 	if (100 <= ch_id && ch_id <= 140)
2217 		return 2;
2218 	return 3;
2219 }
2220 
2221 static uint16_t
iwm_channel_id_to_txp(struct iwm_softc * sc,uint16_t ch_id)2222 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2223 {
2224 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2225 	struct iwm_phy_db_chg_txp *txp_chg;
2226 	int i;
2227 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2228 
2229 	if (ch_index == 0xff)
2230 		return 0xff;
2231 
2232 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2233 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2234 		if (!txp_chg)
2235 			return 0xff;
2236 		/*
2237 		 * Looking for the first channel group the max channel
2238 		 * of which is higher than the requested channel.
2239 		 */
2240 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2241 			return i;
2242 	}
2243 	return 0xff;
2244 }
2245 
2246 static int
iwm_phy_db_get_section_data(struct iwm_softc * sc,uint32_t type,uint8_t ** data,uint16_t * size,uint16_t ch_id)2247 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2248     uint16_t *size, uint16_t ch_id)
2249 {
2250 	struct iwm_phy_db_entry *entry;
2251 	uint16_t ch_group_id = 0;
2252 
2253 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2254 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2255 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2256 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2257 
2258 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2259 	if (!entry)
2260 		return EINVAL;
2261 
2262 	*data = entry->data;
2263 	*size = entry->size;
2264 
2265 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2266 		       __func__, __LINE__, type, *size));
2267 
2268 	return 0;
2269 }
2270 
2271 static int
iwm_send_phy_db_cmd(struct iwm_softc * sc,uint16_t type,uint16_t length,void * data)2272 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2273     void *data)
2274 {
2275 	struct iwm_phy_db_cmd phy_db_cmd;
2276 	struct iwm_host_cmd cmd = {
2277 		.id = IWM_PHY_DB_CMD,
2278 		.flags = IWM_CMD_ASYNC,
2279 	};
2280 
2281 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2282 	    type, length));
2283 
2284 	phy_db_cmd.type = le16toh(type);
2285 	phy_db_cmd.length = le16toh(length);
2286 
2287 	cmd.data[0] = &phy_db_cmd;
2288 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2289 	cmd.data[1] = data;
2290 	cmd.len[1] = length;
2291 
2292 	return iwm_send_cmd(sc, &cmd);
2293 }
2294 
2295 static int
iwm_phy_db_send_all_channel_groups(struct iwm_softc * sc,enum iwm_phy_db_section_type type,uint8_t max_ch_groups)2296 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2297     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2298 {
2299 	uint16_t i;
2300 	int err;
2301 	struct iwm_phy_db_entry *entry;
2302 
2303 	/* Send all the channel-specific groups to operational fw */
2304 	for (i = 0; i < max_ch_groups; i++) {
2305 		entry = iwm_phy_db_get_section(sc, type, i);
2306 		if (!entry)
2307 			return EINVAL;
2308 
2309 		if (!entry->size)
2310 			continue;
2311 
2312 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2313 		if (err) {
2314 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2315 			    "err %d\n", DEVNAME(sc), type, i, err));
2316 			return err;
2317 		}
2318 
2319 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2320 		    DEVNAME(sc), type, i));
2321 
2322 		DELAY(1000);
2323 	}
2324 
2325 	return 0;
2326 }
2327 
2328 static int
iwm_send_phy_db_data(struct iwm_softc * sc)2329 iwm_send_phy_db_data(struct iwm_softc *sc)
2330 {
2331 	uint8_t *data = NULL;
2332 	uint16_t size = 0;
2333 	int err;
2334 
2335 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2336 	if (err)
2337 		return err;
2338 
2339 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2340 	if (err)
2341 		return err;
2342 
2343 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2344 	    &data, &size, 0);
2345 	if (err)
2346 		return err;
2347 
2348 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2349 	if (err)
2350 		return err;
2351 
2352 	err = iwm_phy_db_send_all_channel_groups(sc,
2353 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2354 	if (err)
2355 		return err;
2356 
2357 	err = iwm_phy_db_send_all_channel_groups(sc,
2358 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2359 	if (err)
2360 		return err;
2361 
2362 	return 0;
2363 }
2364 
2365 /*
2366  * For the high priority TE use a time event type that has similar priority to
2367  * the FW's action scan priority.
2368  */
2369 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2370 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2371 
2372 /* used to convert from time event API v2 to v1 */
2373 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2374 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
2375 static inline uint16_t
iwm_te_v2_get_notify(uint16_t policy)2376 iwm_te_v2_get_notify(uint16_t policy)
2377 {
2378 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2379 }
2380 
2381 static inline uint16_t
iwm_te_v2_get_dep_policy(uint16_t policy)2382 iwm_te_v2_get_dep_policy(uint16_t policy)
2383 {
2384 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2385 		IWM_TE_V2_PLACEMENT_POS;
2386 }
2387 
2388 static inline uint16_t
iwm_te_v2_get_absence(uint16_t policy)2389 iwm_te_v2_get_absence(uint16_t policy)
2390 {
2391 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2392 }
2393 
2394 static void
iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 * cmd_v2,struct iwm_time_event_cmd_v1 * cmd_v1)2395 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2396     struct iwm_time_event_cmd_v1 *cmd_v1)
2397 {
2398 	cmd_v1->id_and_color = cmd_v2->id_and_color;
2399 	cmd_v1->action = cmd_v2->action;
2400 	cmd_v1->id = cmd_v2->id;
2401 	cmd_v1->apply_time = cmd_v2->apply_time;
2402 	cmd_v1->max_delay = cmd_v2->max_delay;
2403 	cmd_v1->depends_on = cmd_v2->depends_on;
2404 	cmd_v1->interval = cmd_v2->interval;
2405 	cmd_v1->duration = cmd_v2->duration;
2406 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2407 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2408 	else
2409 		cmd_v1->repeat = htole32(cmd_v2->repeat);
2410 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2411 	cmd_v1->interval_reciprocal = 0; /* unused */
2412 
2413 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2414 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2415 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2416 }
2417 
2418 static int
iwm_send_time_event_cmd(struct iwm_softc * sc,const struct iwm_time_event_cmd_v2 * cmd)2419 iwm_send_time_event_cmd(struct iwm_softc *sc,
2420     const struct iwm_time_event_cmd_v2 *cmd)
2421 {
2422 	struct iwm_time_event_cmd_v1 cmd_v1;
2423 
2424 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2425 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2426 		    cmd);
2427 
2428 	iwm_te_v2_to_v1(cmd, &cmd_v1);
2429 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2430 	    &cmd_v1);
2431 }
2432 
2433 static void
iwm_protect_session(struct iwm_softc * sc,struct iwm_node * in,uint32_t duration,uint32_t max_delay)2434 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2435     uint32_t duration, uint32_t max_delay)
2436 {
2437 	struct iwm_time_event_cmd_v2 time_cmd;
2438 
2439 	memset(&time_cmd, 0, sizeof(time_cmd));
2440 
2441 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2442 	time_cmd.id_and_color =
2443 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2444 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2445 
2446 	time_cmd.apply_time = htole32(0);
2447 
2448 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2449 	time_cmd.max_delay = htole32(max_delay);
2450 	/* TODO: why do we need to interval = bi if it is not periodic? */
2451 	time_cmd.interval = htole32(1);
2452 	time_cmd.duration = htole32(duration);
2453 	time_cmd.repeat = 1;
2454 	time_cmd.policy
2455 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2456 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2457 		IWM_T2_V2_START_IMMEDIATELY);
2458 
2459 	iwm_send_time_event_cmd(sc, &time_cmd);
2460 }
2461 
2462 /*
2463  * NVM read access and content parsing.  We do not support
2464  * external NVM or writing NVM.
2465  */
2466 
2467 /* list of NVM sections we are allowed/need to read */
2468 static const int iwm_nvm_to_read[] = {
2469 	IWM_NVM_SECTION_TYPE_HW,
2470 	IWM_NVM_SECTION_TYPE_SW,
2471 	IWM_NVM_SECTION_TYPE_REGULATORY,
2472 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2473 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2474 	IWM_NVM_SECTION_TYPE_HW_8000,
2475 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2476 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2477 };
2478 
2479 /* Default NVM size to read */
2480 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2481 #define IWM_MAX_NVM_SECTION_SIZE_7000	(16 * 512 * sizeof(uint16_t)) /*16 KB*/
2482 #define IWM_MAX_NVM_SECTION_SIZE_8000	(32 * 512 * sizeof(uint16_t)) /*32 KB*/
2483 
2484 #define IWM_NVM_WRITE_OPCODE 1
2485 #define IWM_NVM_READ_OPCODE 0
2486 
2487 static int
iwm_nvm_read_chunk(struct iwm_softc * sc,uint16_t section,uint16_t offset,uint16_t length,uint8_t * data,uint16_t * len)2488 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2489     uint16_t length, uint8_t *data, uint16_t *len)
2490 {
2491 	offset = 0;
2492 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2493 		.offset = htole16(offset),
2494 		.length = htole16(length),
2495 		.type = htole16(section),
2496 		.op_code = IWM_NVM_READ_OPCODE,
2497 	};
2498 	struct iwm_nvm_access_resp *nvm_resp;
2499 	struct iwm_rx_packet *pkt;
2500 	struct iwm_host_cmd cmd = {
2501 		.id = IWM_NVM_ACCESS_CMD,
2502 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2503 		.data = { &nvm_access_cmd, },
2504 	};
2505 	int err, offset_read;
2506 	size_t bytes_read;
2507 	uint8_t *resp_data;
2508 
2509 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2510 
2511 	err = iwm_send_cmd(sc, &cmd);
2512 	if (err) {
2513 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2514 		    DEVNAME(sc), err));
2515 		return err;
2516 	}
2517 
2518 	pkt = cmd.resp_pkt;
2519 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2520 		err = EIO;
2521 		goto exit;
2522 	}
2523 
2524 	/* Extract NVM response */
2525 	nvm_resp = (void *)pkt->data;
2526 
2527 	err = le16toh(nvm_resp->status);
2528 	bytes_read = le16toh(nvm_resp->length);
2529 	offset_read = le16toh(nvm_resp->offset);
2530 	resp_data = nvm_resp->data;
2531 	if (err) {
2532 		err = EINVAL;
2533 		goto exit;
2534 	}
2535 
2536 	if (offset_read != offset) {
2537 		err = EINVAL;
2538 		goto exit;
2539 	}
2540 	if (bytes_read > length) {
2541 		err = EINVAL;
2542 		goto exit;
2543 	}
2544 
2545 	memcpy(data + offset, resp_data, bytes_read);
2546 	*len = bytes_read;
2547 
2548  exit:
2549 	iwm_free_resp(sc, &cmd);
2550 	return err;
2551 }
2552 
2553 /*
2554  * Reads an NVM section completely.
2555  * NICs prior to 7000 family doesn't have a real NVM, but just read
2556  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2557  * by uCode, we need to manually check in this case that we don't
2558  * overflow and try to read more than the EEPROM size.
2559  */
2560 static int
iwm_nvm_read_section(struct iwm_softc * sc,uint16_t section,uint8_t * data,uint16_t * len,size_t max_len)2561 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2562     uint16_t *len, size_t max_len)
2563 {
2564 	uint16_t chunklen, seglen;
2565 	int err;
2566 
2567 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2568 	*len = 0;
2569 
2570 	/* Read NVM chunks until exhausted (reading less than requested) */
2571 	while (seglen == chunklen && *len < max_len) {
2572 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2573 		    &seglen);
2574 		if (err) {
2575 			DPRINTF(("%s: Cannot read NVM from section %d "
2576 			    "offset %d, length %d\n",
2577 			    DEVNAME(sc), section, *len, chunklen));
2578 			return err;
2579 		}
2580 		*len += seglen;
2581 	}
2582 
2583 	DPRINTFN(4, ("NVM section %d read completed\n", section));
2584 	return 0;
2585 }
2586 
2587 static uint8_t
iwm_fw_valid_tx_ant(struct iwm_softc * sc)2588 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2589 {
2590 	uint8_t tx_ant;
2591 
2592 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2593 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2594 
2595 	if (sc->sc_nvm.valid_tx_ant)
2596 		tx_ant &= sc->sc_nvm.valid_tx_ant;
2597 
2598 	return tx_ant;
2599 }
2600 
2601 static uint8_t
iwm_fw_valid_rx_ant(struct iwm_softc * sc)2602 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2603 {
2604 	uint8_t rx_ant;
2605 
2606 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2607 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2608 
2609 	if (sc->sc_nvm.valid_rx_ant)
2610 		rx_ant &= sc->sc_nvm.valid_rx_ant;
2611 
2612 	return rx_ant;
2613 }
2614 
2615 static void
iwm_init_channel_map(struct iwm_softc * sc,const uint16_t * const nvm_ch_flags,const uint8_t * nvm_channels,size_t nchan)2616 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2617     const uint8_t *nvm_channels, size_t nchan)
2618 {
2619 	struct ieee80211com *ic = &sc->sc_ic;
2620 	struct iwm_nvm_data *data = &sc->sc_nvm;
2621 	int ch_idx;
2622 	struct ieee80211_channel *channel;
2623 	uint16_t ch_flags;
2624 	int is_5ghz;
2625 	int flags, hw_value;
2626 
2627 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2628 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2629 		aprint_debug_dev(sc->sc_dev,
2630 		    "Ch. %d: %svalid %cibss %s %cradar %cdfs"
2631 		    " %cwide %c40MHz %c80MHz %c160MHz\n",
2632 		    nvm_channels[ch_idx],
2633 		    ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
2634 		    ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
2635 		    ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
2636 		    ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
2637 		    ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
2638 		    ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
2639 		    ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
2640 		    ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
2641 		    ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
2642 
2643 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2644 		    !data->sku_cap_band_52GHz_enable)
2645 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2646 
2647 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2648 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2649 			    nvm_channels[ch_idx], ch_flags,
2650 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
2651 			continue;
2652 		}
2653 
2654 		hw_value = nvm_channels[ch_idx];
2655 		channel = &ic->ic_channels[hw_value];
2656 
2657 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2658 		if (!is_5ghz) {
2659 			flags = IEEE80211_CHAN_2GHZ;
2660 			channel->ic_flags
2661 			    = IEEE80211_CHAN_CCK
2662 			    | IEEE80211_CHAN_OFDM
2663 			    | IEEE80211_CHAN_DYN
2664 			    | IEEE80211_CHAN_2GHZ;
2665 		} else {
2666 			flags = IEEE80211_CHAN_5GHZ;
2667 			channel->ic_flags =
2668 			    IEEE80211_CHAN_A;
2669 		}
2670 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2671 
2672 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2673 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2674 
2675 #ifndef IEEE80211_NO_HT
2676 		if (data->sku_cap_11n_enable)
2677 			channel->ic_flags |= IEEE80211_CHAN_HT;
2678 #endif
2679 	}
2680 }
2681 
2682 #ifndef IEEE80211_NO_HT
2683 static void
iwm_setup_ht_rates(struct iwm_softc * sc)2684 iwm_setup_ht_rates(struct iwm_softc *sc)
2685 {
2686 	struct ieee80211com *ic = &sc->sc_ic;
2687 
2688 	/* TX is supported with the same MCS as RX. */
2689 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2690 
2691 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
2692 
2693 #ifdef notyet
2694 	if (sc->sc_nvm.sku_cap_mimo_disable)
2695 		return;
2696 
2697 	if (iwm_fw_valid_rx_ant(sc) > 1)
2698 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
2699 	if (iwm_fw_valid_rx_ant(sc) > 2)
2700 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
2701 #endif
2702 }
2703 
2704 #define IWM_MAX_RX_BA_SESSIONS 16
2705 
2706 static void
iwm_sta_rx_agg(struct iwm_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,int start)2707 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2708     uint16_t ssn, int start)
2709 {
2710 	struct ieee80211com *ic = &sc->sc_ic;
2711 	struct iwm_add_sta_cmd_v7 cmd;
2712 	struct iwm_node *in = (struct iwm_node *)ni;
2713 	int err, s;
2714 	uint32_t status;
2715 
2716 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2717 		ieee80211_addba_req_refuse(ic, ni, tid);
2718 		return;
2719 	}
2720 
2721 	memset(&cmd, 0, sizeof(cmd));
2722 
2723 	cmd.sta_id = IWM_STATION_ID;
2724 	cmd.mac_id_n_color
2725 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2726 	cmd.add_modify = IWM_STA_MODE_MODIFY;
2727 
2728 	if (start) {
2729 		cmd.add_immediate_ba_tid = (uint8_t)tid;
2730 		cmd.add_immediate_ba_ssn = ssn;
2731 	} else {
2732 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
2733 	}
2734 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2735 	    IWM_STA_MODIFY_REMOVE_BA_TID;
2736 
2737 	status = IWM_ADD_STA_SUCCESS;
2738 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2739 	    &status);
2740 
2741 	s = splnet();
2742 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2743 		if (start) {
2744 			sc->sc_rx_ba_sessions++;
2745 			ieee80211_addba_req_accept(ic, ni, tid);
2746 		} else if (sc->sc_rx_ba_sessions > 0)
2747 			sc->sc_rx_ba_sessions--;
2748 	} else if (start)
2749 		ieee80211_addba_req_refuse(ic, ni, tid);
2750 	splx(s);
2751 }
2752 
2753 static void
iwm_htprot_task(void * arg)2754 iwm_htprot_task(void *arg)
2755 {
2756 	struct iwm_softc *sc = arg;
2757 	struct ieee80211com *ic = &sc->sc_ic;
2758 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2759 	int err;
2760 
2761 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
2762 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2763 	if (err)
2764 		aprint_error_dev(sc->sc_dev,
2765 		    "could not change HT protection: error %d\n", err);
2766 }
2767 
2768 /*
2769  * This function is called by upper layer when HT protection settings in
2770  * beacons have changed.
2771  */
2772 static void
iwm_update_htprot(struct ieee80211com * ic,struct ieee80211_node * ni)2773 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2774 {
2775 	struct iwm_softc *sc = ic->ic_softc;
2776 
2777 	/* assumes that ni == ic->ic_bss */
2778 	task_add(systq, &sc->htprot_task);
2779 }
2780 
2781 static void
iwm_ba_task(void * arg)2782 iwm_ba_task(void *arg)
2783 {
2784 	struct iwm_softc *sc = arg;
2785 	struct ieee80211com *ic = &sc->sc_ic;
2786 	struct ieee80211_node *ni = ic->ic_bss;
2787 
2788 	if (sc->ba_start)
2789 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2790 	else
2791 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2792 }
2793 
2794 /*
2795  * This function is called by upper layer when an ADDBA request is received
2796  * from another STA and before the ADDBA response is sent.
2797  */
2798 static int
iwm_ampdu_rx_start(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)2799 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2800     uint8_t tid)
2801 {
2802 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2803 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2804 
2805 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2806 		return ENOSPC;
2807 
2808 	sc->ba_start = 1;
2809 	sc->ba_tid = tid;
2810 	sc->ba_ssn = htole16(ba->ba_winstart);
2811 	task_add(systq, &sc->ba_task);
2812 
2813 	return EBUSY;
2814 }
2815 
2816 /*
2817  * This function is called by upper layer on teardown of an HT-immediate
2818  * Block Ack agreement (eg. upon receipt of a DELBA frame).
2819  */
2820 static void
iwm_ampdu_rx_stop(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)2821 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2822     uint8_t tid)
2823 {
2824 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2825 
2826 	sc->ba_start = 0;
2827 	sc->ba_tid = tid;
2828 	task_add(systq, &sc->ba_task);
2829 }
2830 #endif
2831 
2832 static void
iwm_free_fw_paging(struct iwm_softc * sc)2833 iwm_free_fw_paging(struct iwm_softc *sc)
2834 {
2835 	int i;
2836 
2837 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
2838 		return;
2839 
2840 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
2841 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
2842 	}
2843 
2844 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
2845 }
2846 
2847 static int
iwm_fill_paging_mem(struct iwm_softc * sc,const struct iwm_fw_sects * fws)2848 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2849 {
2850 	int sec_idx, idx;
2851 	uint32_t offset = 0;
2852 
2853 	/*
2854 	 * find where is the paging image start point:
2855 	 * if CPU2 exist and it's in paging format, then the image looks like:
2856 	 * CPU1 sections (2 or more)
2857 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
2858 	 * CPU2 sections (not paged)
2859 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
2860 	 * non paged to CPU2 paging sec
2861 	 * CPU2 paging CSS
2862 	 * CPU2 paging image (including instruction and data)
2863 	 */
2864 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
2865 		if (fws->fw_sect[sec_idx].fws_devoff ==
2866 		    IWM_PAGING_SEPARATOR_SECTION) {
2867 			sec_idx++;
2868 			break;
2869 		}
2870 	}
2871 
2872 	/*
2873 	 * If paging is enabled there should be at least 2 more sections left
2874 	 * (one for CSS and one for Paging data)
2875 	 */
2876 	if (sec_idx >= __arraycount(fws->fw_sect) - 1) {
2877 		aprint_verbose_dev(sc->sc_dev,
2878 		    "Paging: Missing CSS and/or paging sections\n");
2879 		iwm_free_fw_paging(sc);
2880 		return EINVAL;
2881 	}
2882 
2883 	/* copy the CSS block to the dram */
2884 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n", DEVNAME(sc),
2885 	    sec_idx));
2886 
2887 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
2888 	    fws->fw_sect[sec_idx].fws_data, sc->fw_paging_db[0].fw_paging_size);
2889 
2890 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
2891 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
2892 
2893 	sec_idx++;
2894 
2895 	/*
2896 	 * copy the paging blocks to the dram
2897 	 * loop index start from 1 since that CSS block already copied to dram
2898 	 * and CSS index is 0.
2899 	 * loop stop at num_of_paging_blk since that last block is not full.
2900 	 */
2901 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
2902 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2903 		       (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2904 		       sc->fw_paging_db[idx].fw_paging_size);
2905 
2906 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
2907 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
2908 
2909 		offset += sc->fw_paging_db[idx].fw_paging_size;
2910 	}
2911 
2912 	/* copy the last paging block */
2913 	if (sc->num_of_pages_in_last_blk > 0) {
2914 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2915 		    (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2916 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
2917 
2918 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
2919 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
2920 	}
2921 
2922 	return 0;
2923 }
2924 
2925 static int
iwm_alloc_fw_paging_mem(struct iwm_softc * sc,const struct iwm_fw_sects * fws)2926 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2927 {
2928 	int blk_idx = 0;
2929 	int error, num_of_pages;
2930 	bus_dmamap_t dmap;
2931 
2932 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
2933 		int i;
2934 		/* Device got reset, and we setup firmware paging again */
2935 		for (i = 0; i < sc->num_of_paging_blk + 1; i++) {
2936 			dmap = sc->fw_paging_db[i].fw_paging_block.map;
2937 			bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2938 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2939 		}
2940 		return 0;
2941 	}
2942 
2943 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
2944 	CTASSERT(__BIT(IWM_BLOCK_2_EXP_SIZE) == IWM_PAGING_BLOCK_SIZE);
2945 
2946 	num_of_pages = fws->paging_mem_size / IWM_FW_PAGING_SIZE;
2947 	sc->num_of_paging_blk =
2948 	    howmany(num_of_pages, IWM_NUM_OF_PAGE_PER_GROUP);
2949 	sc->num_of_pages_in_last_blk = num_of_pages -
2950 	    IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
2951 
2952 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, "
2953 	    "each block holds 8 pages, last block holds %d pages\n",
2954 	    DEVNAME(sc), sc->num_of_paging_blk, sc->num_of_pages_in_last_blk));
2955 
2956 	/* allocate block of 4Kbytes for paging CSS */
2957 	error = iwm_dma_contig_alloc(sc->sc_dmat,
2958 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
2959 	    4096);
2960 	if (error) {
2961 		/* free all the previous pages since we failed */
2962 		iwm_free_fw_paging(sc);
2963 		return ENOMEM;
2964 	}
2965 
2966 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
2967 
2968 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
2969 	    DEVNAME(sc)));
2970 
2971 	/*
2972 	 * allocate blocks in dram.
2973 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
2974 	 */
2975 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
2976 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
2977 		/* XXX Use iwm_dma_contig_alloc for allocating */
2978 		error = iwm_dma_contig_alloc(sc->sc_dmat,
2979 		    &sc->fw_paging_db[blk_idx].fw_paging_block,
2980 		    IWM_PAGING_BLOCK_SIZE, 4096);
2981 		if (error) {
2982 			/* free all the previous pages since we failed */
2983 			iwm_free_fw_paging(sc);
2984 			return ENOMEM;
2985 		}
2986 
2987 		sc->fw_paging_db[blk_idx].fw_paging_size =
2988 		    IWM_PAGING_BLOCK_SIZE;
2989 
2990 		DPRINTF(("%s: Paging: allocated 32K bytes for firmware "
2991 		    "paging.\n", DEVNAME(sc)));
2992 	}
2993 
2994 	return 0;
2995 }
2996 
2997 static int
iwm_save_fw_paging(struct iwm_softc * sc,const struct iwm_fw_sects * fws)2998 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2999 {
3000 	int err;
3001 
3002 	err = iwm_alloc_fw_paging_mem(sc, fws);
3003 	if (err)
3004 		return err;
3005 
3006 	return iwm_fill_paging_mem(sc, fws);
3007 }
3008 
3009 static bool
iwm_has_new_tx_api(struct iwm_softc * sc)3010 iwm_has_new_tx_api(struct iwm_softc *sc)
3011 {
3012 	/* XXX */
3013 	return false;
3014 }
3015 
3016 /* send paging cmd to FW in case CPU2 has paging image */
3017 static int
iwm_send_paging_cmd(struct iwm_softc * sc,const struct iwm_fw_sects * fws)3018 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3019 {
3020 	struct iwm_fw_paging_cmd fw_paging_cmd = {
3021 		.flags = htole32(IWM_PAGING_CMD_IS_SECURED |
3022 		                 IWM_PAGING_CMD_IS_ENABLED |
3023 		                 (sc->num_of_pages_in_last_blk <<
3024 		                  IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
3025 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
3026 		.block_num = htole32(sc->num_of_paging_blk),
3027 	};
3028 	size_t size = sizeof(fw_paging_cmd);
3029 	int blk_idx;
3030 	bus_dmamap_t dmap;
3031 
3032 	if (!iwm_has_new_tx_api(sc))
3033 		size -= (sizeof(uint64_t) - sizeof(uint32_t)) *
3034 		    IWM_NUM_OF_FW_PAGING_BLOCKS;
3035 
3036 	/* loop for for all paging blocks + CSS block */
3037 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
3038 		bus_addr_t dev_phy_addr =
3039 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr;
3040 		if (iwm_has_new_tx_api(sc)) {
3041 			fw_paging_cmd.device_phy_addr.addr64[blk_idx] =
3042 			    htole64(dev_phy_addr);
3043 		} else {
3044 			dev_phy_addr = dev_phy_addr >> IWM_PAGE_2_EXP_SIZE;
3045 			fw_paging_cmd.device_phy_addr.addr32[blk_idx] =
3046 			    htole32(dev_phy_addr);
3047 		}
3048 		dmap = sc->fw_paging_db[blk_idx].fw_paging_block.map;
3049 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3050 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3051 	}
3052 
3053 	return iwm_send_cmd_pdu(sc,
3054 	    iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD, IWM_ALWAYS_LONG_GROUP, 0),
3055 	    0, size, &fw_paging_cmd);
3056 }
3057 
3058 static void
iwm_set_hw_address_8000(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * mac_override,const uint16_t * nvm_hw)3059 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3060     const uint16_t *mac_override, const uint16_t *nvm_hw)
3061 {
3062 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
3063 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3064 	};
3065 	static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
3066 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
3067 	};
3068 	const uint8_t *hw_addr;
3069 
3070 	if (mac_override) {
3071 		hw_addr = (const uint8_t *)(mac_override +
3072 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
3073 
3074 		/*
3075 		 * Store the MAC address from MAO section.
3076 		 * No byte swapping is required in MAO section
3077 		 */
3078 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3079 
3080 		/*
3081 		 * Force the use of the OTP MAC address in case of reserved MAC
3082 		 * address in the NVM, or if address is given but invalid.
3083 		 */
3084 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3085 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3086 		    sizeof(etherbroadcastaddr)) != 0) &&
3087 		    (memcmp(etheranyaddr, data->hw_addr,
3088 		    sizeof(etheranyaddr)) != 0) &&
3089 		    !ETHER_IS_MULTICAST(data->hw_addr))
3090 			return;
3091 	}
3092 
3093 	if (nvm_hw) {
3094 		/* Read the mac address from WFMP registers. */
3095 		uint32_t mac_addr0 =
3096 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3097 		uint32_t mac_addr1 =
3098 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3099 
3100 		hw_addr = (const uint8_t *)&mac_addr0;
3101 		data->hw_addr[0] = hw_addr[3];
3102 		data->hw_addr[1] = hw_addr[2];
3103 		data->hw_addr[2] = hw_addr[1];
3104 		data->hw_addr[3] = hw_addr[0];
3105 
3106 		hw_addr = (const uint8_t *)&mac_addr1;
3107 		data->hw_addr[4] = hw_addr[1];
3108 		data->hw_addr[5] = hw_addr[0];
3109 
3110 		return;
3111 	}
3112 
3113 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
3114 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3115 }
3116 
3117 static int
iwm_parse_nvm_data(struct iwm_softc * sc,const uint16_t * nvm_hw,const uint16_t * nvm_sw,const uint16_t * nvm_calib,const uint16_t * mac_override,const uint16_t * phy_sku,const uint16_t * regulatory)3118 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3119     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3120     const uint16_t *mac_override, const uint16_t *phy_sku,
3121     const uint16_t *regulatory)
3122 {
3123 	struct iwm_nvm_data *data = &sc->sc_nvm;
3124 	uint8_t hw_addr[ETHER_ADDR_LEN];
3125 	uint32_t sku;
3126 
3127 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3128 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3129 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3130 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3131 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3132 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3133 
3134 		data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3135 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3136 	} else {
3137 		uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
3138 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3139 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3140 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3141 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3142 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3143 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3144 
3145 		data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
3146 		sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
3147 	}
3148 
3149 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3150 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3151 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3152 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3153 
3154 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3155 
3156 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3157 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3158 		data->hw_addr[0] = hw_addr[1];
3159 		data->hw_addr[1] = hw_addr[0];
3160 		data->hw_addr[2] = hw_addr[3];
3161 		data->hw_addr[3] = hw_addr[2];
3162 		data->hw_addr[4] = hw_addr[5];
3163 		data->hw_addr[5] = hw_addr[4];
3164 	} else
3165 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3166 
3167 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3168 		uint16_t lar_offset, lar_config;
3169 		lar_offset = data->nvm_version < 0xE39 ?
3170 		    IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
3171 		lar_config = le16_to_cpup(regulatory + lar_offset);
3172                 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
3173 	}
3174 
3175 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
3176 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3177 		    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
3178 	else
3179 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3180 		    iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
3181 
3182 	data->calib_version = 255;   /* TODO:
3183 					this value will prevent some checks from
3184 					failing, we need to check if this
3185 					field is still needed, and if it does,
3186 					where is it in the NVM */
3187 
3188 	return 0;
3189 }
3190 
3191 static int
iwm_parse_nvm_sections(struct iwm_softc * sc,struct iwm_nvm_section * sections)3192 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3193 {
3194 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3195 	const uint16_t *regulatory = NULL;
3196 
3197 	/* Checking for required sections */
3198 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3199 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3200 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3201 			return ENOENT;
3202 		}
3203 
3204 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3205 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3206 		/* SW and REGULATORY sections are mandatory */
3207 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3208 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3209 			return ENOENT;
3210 		}
3211 		/* MAC_OVERRIDE or at least HW section must exist */
3212 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3213 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3214 			return ENOENT;
3215 		}
3216 
3217 		/* PHY_SKU section is mandatory in B0 */
3218 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3219 			return ENOENT;
3220 		}
3221 
3222 		regulatory = (const uint16_t *)
3223 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3224 		hw = (const uint16_t *)
3225 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3226 		mac_override =
3227 			(const uint16_t *)
3228 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3229 		phy_sku = (const uint16_t *)
3230 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3231 	} else {
3232 		panic("unknown device family %d\n", sc->sc_device_family);
3233 	}
3234 
3235 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3236 	calib = (const uint16_t *)
3237 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3238 
3239 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3240 	    phy_sku, regulatory);
3241 }
3242 
3243 static int
iwm_nvm_init(struct iwm_softc * sc)3244 iwm_nvm_init(struct iwm_softc *sc)
3245 {
3246 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3247 	int i, section, err;
3248 	uint16_t len;
3249 	uint8_t *buf;
3250 	const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
3251 	    IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
3252 
3253 	/* Read From FW NVM */
3254 	DPRINTF(("Read NVM\n"));
3255 
3256 	memset(nvm_sections, 0, sizeof(nvm_sections));
3257 
3258 	buf = kmem_alloc(bufsz, KM_SLEEP);
3259 
3260 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
3261 		section = iwm_nvm_to_read[i];
3262 		KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
3263 
3264 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3265 		if (err) {
3266 			err = 0;
3267 			continue;
3268 		}
3269 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
3270 		memcpy(nvm_sections[section].data, buf, len);
3271 		nvm_sections[section].length = len;
3272 	}
3273 	kmem_free(buf, bufsz);
3274 	if (err == 0)
3275 		err = iwm_parse_nvm_sections(sc, nvm_sections);
3276 
3277 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3278 		if (nvm_sections[i].data != NULL)
3279 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3280 	}
3281 
3282 	return err;
3283 }
3284 
3285 static int
iwm_firmware_load_sect(struct iwm_softc * sc,uint32_t dst_addr,const uint8_t * section,uint32_t byte_cnt)3286 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3287     const uint8_t *section, uint32_t byte_cnt)
3288 {
3289 	int err = EINVAL;
3290 	uint32_t chunk_sz, offset;
3291 
3292 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3293 
3294 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3295 		uint32_t addr, len;
3296 		const uint8_t *data;
3297 		bool is_extended = false;
3298 
3299 		addr = dst_addr + offset;
3300 		len = MIN(chunk_sz, byte_cnt - offset);
3301 		data = section + offset;
3302 
3303 		if (addr >= IWM_FW_MEM_EXTENDED_START &&
3304 		    addr <= IWM_FW_MEM_EXTENDED_END)
3305 			is_extended = true;
3306 
3307 		if (is_extended)
3308 			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3309 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3310 
3311 		err = iwm_firmware_load_chunk(sc, addr, data, len);
3312 
3313 		if (is_extended)
3314 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3315 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3316 
3317 		if (err)
3318 			break;
3319 	}
3320 
3321 	return err;
3322 }
3323 
3324 static int
iwm_firmware_load_chunk(struct iwm_softc * sc,uint32_t dst_addr,const uint8_t * section,uint32_t byte_cnt)3325 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3326     const uint8_t *section, uint32_t byte_cnt)
3327 {
3328 	struct iwm_dma_info *dma = &sc->fw_dma;
3329 	int err;
3330 
3331 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
3332 	memcpy(dma->vaddr, section, byte_cnt);
3333 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3334 	    BUS_DMASYNC_PREWRITE);
3335 
3336 	sc->sc_fw_chunk_done = 0;
3337 
3338 	if (!iwm_nic_lock(sc))
3339 		return EBUSY;
3340 
3341 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3342 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3343 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3344 	    dst_addr);
3345 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3346 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3347 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3348 	    (iwm_get_dma_hi_addr(dma->paddr)
3349 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3350 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3351 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3352 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3353 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3354 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3355 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
3356 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3357 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3358 
3359 	iwm_nic_unlock(sc);
3360 
3361 	/* Wait for this segment to load. */
3362 	err = 0;
3363 	while (!sc->sc_fw_chunk_done) {
3364 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3365 		if (err)
3366 			break;
3367 	}
3368 	if (!sc->sc_fw_chunk_done) {
3369 		DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3370 		    DEVNAME(sc), dst_addr, byte_cnt));
3371 	}
3372 
3373 	return err;
3374 }
3375 
3376 static int
iwm_load_cpu_sections_7000(struct iwm_softc * sc,struct iwm_fw_sects * fws,int cpu,int * first_ucode_section)3377 iwm_load_cpu_sections_7000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3378     int cpu, int *first_ucode_section)
3379 {
3380 	int i, err = 0;
3381 	uint32_t last_read_idx = 0;
3382 	void *data;
3383 	uint32_t dlen;
3384 	uint32_t offset;
3385 
3386 	if (cpu == 1) {
3387 		*first_ucode_section = 0;
3388 	} else {
3389 		(*first_ucode_section)++;
3390 	}
3391 
3392 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3393 		last_read_idx = i;
3394 		data = fws->fw_sect[i].fws_data;
3395 		dlen = fws->fw_sect[i].fws_len;
3396 		offset = fws->fw_sect[i].fws_devoff;
3397 
3398 		/*
3399 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3400 		 * CPU1 to CPU2.
3401 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3402 		 * CPU2 non paged to CPU2 paging sec.
3403 		 */
3404 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3405 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3406 			break;
3407 
3408 		if (dlen > sc->sc_fwdmasegsz) {
3409 			err = EFBIG;
3410 		} else
3411 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3412 		if (err) {
3413 			DPRINTF(("%s: could not load firmware chunk %d "
3414 			    "(error %d)\n", DEVNAME(sc), i, err));
3415 			return err;
3416 		}
3417 	}
3418 
3419 	*first_ucode_section = last_read_idx;
3420 
3421 	return 0;
3422 }
3423 
3424 static int
iwm_load_firmware_7000(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)3425 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3426 {
3427 	struct iwm_fw_sects *fws;
3428 	int err = 0;
3429 	int first_ucode_section;
3430 
3431 	fws = &sc->sc_fw.fw_sects[ucode_type];
3432 
3433 	DPRINTF(("%s: working with %s CPU\n", DEVNAME(sc),
3434 	    fws->is_dual_cpus ? "dual" : "single"));
3435 
3436 	/* load to FW the binary Secured sections of CPU1 */
3437 	err = iwm_load_cpu_sections_7000(sc, fws, 1, &first_ucode_section);
3438 	if (err)
3439 		return err;
3440 
3441 	if (fws->is_dual_cpus) {
3442 		/* set CPU2 header address */
3443 		if (iwm_nic_lock(sc)) {
3444 			iwm_write_prph(sc,
3445 			    IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
3446 			    IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
3447 			iwm_nic_unlock(sc);
3448 		}
3449 
3450 		/* load to FW the binary sections of CPU2 */
3451 		err = iwm_load_cpu_sections_7000(sc, fws, 2,
3452 		    &first_ucode_section);
3453 		if (err)
3454 			return err;
3455 	}
3456 
3457 	/* release CPU reset */
3458 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
3459 
3460 	return 0;
3461 }
3462 
3463 static int
iwm_load_cpu_sections_8000(struct iwm_softc * sc,struct iwm_fw_sects * fws,int cpu,int * first_ucode_section)3464 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3465     int cpu, int *first_ucode_section)
3466 {
3467 	int shift_param;
3468 	int i, err = 0, sec_num = 0x1;
3469 	uint32_t val, last_read_idx = 0;
3470 	void *data;
3471 	uint32_t dlen;
3472 	uint32_t offset;
3473 
3474 	if (cpu == 1) {
3475 		shift_param = 0;
3476 		*first_ucode_section = 0;
3477 	} else {
3478 		shift_param = 16;
3479 		(*first_ucode_section)++;
3480 	}
3481 
3482 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3483 		last_read_idx = i;
3484 		data = fws->fw_sect[i].fws_data;
3485 		dlen = fws->fw_sect[i].fws_len;
3486 		offset = fws->fw_sect[i].fws_devoff;
3487 
3488 		/*
3489 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3490 		 * CPU1 to CPU2.
3491 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
3492 		 * CPU2 non paged to CPU2 paging sec.
3493 		 */
3494 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3495 		    offset == IWM_PAGING_SEPARATOR_SECTION)
3496 			break;
3497 
3498 		if (dlen > sc->sc_fwdmasegsz) {
3499 			err = EFBIG;
3500 		} else
3501 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
3502 		if (err) {
3503 			DPRINTF(("%s: could not load firmware chunk %d "
3504 			    "(error %d)\n", DEVNAME(sc), i, err));
3505 			return err;
3506 		}
3507 
3508 		/* Notify the ucode of the loaded section number and status */
3509 		if (iwm_nic_lock(sc)) {
3510 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3511 			val = val | (sec_num << shift_param);
3512 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3513 			sec_num = (sec_num << 1) | 0x1;
3514 			iwm_nic_unlock(sc);
3515 
3516 			/*
3517 			 * The firmware won't load correctly without this delay.
3518 			 */
3519 			DELAY(8000);
3520 		}
3521 	}
3522 
3523 	*first_ucode_section = last_read_idx;
3524 
3525 	if (iwm_nic_lock(sc)) {
3526 		if (cpu == 1)
3527 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3528 		else
3529 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3530 		iwm_nic_unlock(sc);
3531 	}
3532 
3533 	return 0;
3534 }
3535 
3536 static int
iwm_load_firmware_8000(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)3537 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3538 {
3539 	struct iwm_fw_sects *fws;
3540 	int err = 0;
3541 	int first_ucode_section;
3542 
3543 	fws = &sc->sc_fw.fw_sects[ucode_type];
3544 
3545 	/* configure the ucode to be ready to get the secured image */
3546 	/* release CPU reset */
3547 	if (iwm_nic_lock(sc)) {
3548 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3549 		    IWM_RELEASE_CPU_RESET_BIT);
3550 		iwm_nic_unlock(sc);
3551 	}
3552 
3553 	/* load to FW the binary Secured sections of CPU1 */
3554 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3555 	if (err)
3556 		return err;
3557 
3558 	/* load to FW the binary sections of CPU2 */
3559 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3560 }
3561 
3562 static int
iwm_load_firmware(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)3563 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3564 {
3565 	int err, w;
3566 
3567 	sc->sc_uc.uc_intr = 0;
3568 
3569 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3570 		err = iwm_load_firmware_8000(sc, ucode_type);
3571 	else
3572 		err = iwm_load_firmware_7000(sc, ucode_type);
3573 	if (err)
3574 		return err;
3575 
3576 	/* wait for the firmware to load */
3577 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3578 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3579 	if (err || !sc->sc_uc.uc_ok) {
3580 		aprint_error_dev(sc->sc_dev,
3581 		    "could not load firmware (error %d, ok %d)\n",
3582 		    err, sc->sc_uc.uc_ok);
3583 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3584 			aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3585 			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3586 			aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3587 			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3588 		}
3589 	}
3590 
3591 	return err;
3592 }
3593 
3594 static int
iwm_start_fw(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)3595 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3596 {
3597 	int err;
3598 
3599 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3600 
3601 	err = iwm_nic_init(sc);
3602 	if (err) {
3603 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3604 		return err;
3605 	}
3606 
3607 	/* make sure rfkill handshake bits are cleared */
3608 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3609 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3610 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3611 
3612 	/* clear (again), then enable host interrupts */
3613 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
3614 	iwm_enable_interrupts(sc);
3615 
3616 	/* really make sure rfkill handshake bits are cleared */
3617 	/* maybe we should write a few times more?  just to make sure */
3618 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3619 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3620 
3621 	return iwm_load_firmware(sc, ucode_type);
3622 }
3623 
3624 static int
iwm_send_tx_ant_cfg(struct iwm_softc * sc,uint8_t valid_tx_ant)3625 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3626 {
3627 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3628 		.valid = htole32(valid_tx_ant),
3629 	};
3630 
3631 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3632 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
3633 }
3634 
3635 static int
iwm_send_phy_cfg_cmd(struct iwm_softc * sc)3636 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3637 {
3638 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
3639 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3640 
3641 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3642 	phy_cfg_cmd.calib_control.event_trigger =
3643 	    sc->sc_default_calib[ucode_type].event_trigger;
3644 	phy_cfg_cmd.calib_control.flow_trigger =
3645 	    sc->sc_default_calib[ucode_type].flow_trigger;
3646 
3647 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3648 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3649 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3650 }
3651 
3652 static int
iwm_load_ucode_wait_alive(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)3653 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3654 {
3655 	struct iwm_fw_sects *fws;
3656 	enum iwm_ucode_type old_type = sc->sc_uc_current;
3657 	int err;
3658 
3659 	err = iwm_read_firmware(sc, ucode_type);
3660 	if (err)
3661 		return err;
3662 
3663 	sc->sc_uc_current = ucode_type;
3664 	err = iwm_start_fw(sc, ucode_type);
3665 	if (err) {
3666 		sc->sc_uc_current = old_type;
3667 		return err;
3668 	}
3669 
3670 	err = iwm_post_alive(sc);
3671 	if (err)
3672 		return err;
3673 
3674 	fws = &sc->sc_fw.fw_sects[ucode_type];
3675 	if (fws->paging_mem_size) {
3676 		err = iwm_save_fw_paging(sc, fws);
3677 		if (err)
3678 			return err;
3679 
3680 		err = iwm_send_paging_cmd(sc, fws);
3681 		if (err) {
3682 			iwm_free_fw_paging(sc);
3683 			return err;
3684 		}
3685 	}
3686 
3687 	return 0;
3688 }
3689 
3690 static int
iwm_run_init_mvm_ucode(struct iwm_softc * sc,int justnvm)3691 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3692 {
3693 	int err;
3694 
3695 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3696 		aprint_error_dev(sc->sc_dev,
3697 		    "radio is disabled by hardware switch\n");
3698 		return EPERM;
3699 	}
3700 
3701 	sc->sc_init_complete = 0;
3702 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3703 	if (err) {
3704 		DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3705 		return err;
3706 	}
3707 
3708 	if (justnvm) {
3709 		err = iwm_nvm_init(sc);
3710 		if (err) {
3711 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3712 			return err;
3713 		}
3714 
3715 		memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3716 		    ETHER_ADDR_LEN);
3717 		return 0;
3718 	}
3719 
3720 	err = iwm_send_bt_init_conf(sc);
3721 	if (err)
3722 		return err;
3723 
3724 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3725 	if (err)
3726 		return err;
3727 
3728 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3729 	if (err)
3730 		return err;
3731 
3732 	/*
3733 	 * Send phy configurations command to init uCode
3734 	 * to start the 16.0 uCode init image internal calibrations.
3735 	 */
3736 	err = iwm_send_phy_cfg_cmd(sc);
3737 	if (err)
3738 		return err;
3739 
3740 	/*
3741 	 * Nothing to do but wait for the init complete notification
3742 	 * from the firmware
3743 	 */
3744 	while (!sc->sc_init_complete) {
3745 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3746 		if (err)
3747 			break;
3748 	}
3749 
3750 	return err;
3751 }
3752 
3753 static int
iwm_rx_addbuf(struct iwm_softc * sc,int size,int idx)3754 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3755 {
3756 	struct iwm_rx_ring *ring = &sc->rxq;
3757 	struct iwm_rx_data *data = &ring->data[idx];
3758 	struct mbuf *m;
3759 	int err;
3760 	int fatal = 0;
3761 
3762 	m = m_gethdr(M_DONTWAIT, MT_DATA);
3763 	if (m == NULL)
3764 		return ENOBUFS;
3765 
3766 	if (size <= MCLBYTES) {
3767 		MCLGET(m, M_DONTWAIT);
3768 	} else {
3769 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3770 	}
3771 	if ((m->m_flags & M_EXT) == 0) {
3772 		m_freem(m);
3773 		return ENOBUFS;
3774 	}
3775 
3776 	if (data->m != NULL) {
3777 		bus_dmamap_unload(sc->sc_dmat, data->map);
3778 		fatal = 1;
3779 	}
3780 
3781 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3782 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3783 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3784 	if (err) {
3785 		/* XXX */
3786 		if (fatal)
3787 			panic("iwm: could not load RX mbuf");
3788 		m_freem(m);
3789 		return err;
3790 	}
3791 	data->m = m;
3792 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3793 
3794 	/* Update RX descriptor. */
3795 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3796 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3797 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3798 
3799 	return 0;
3800 }
3801 
3802 #define IWM_RSSI_OFFSET 50
3803 static int
iwm_calc_rssi(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)3804 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3805 {
3806 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3807 	uint32_t agc_a, agc_b;
3808 	uint32_t val;
3809 
3810 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3811 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3812 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3813 
3814 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3815 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3816 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3817 
3818 	/*
3819 	 * dBm = rssi dB - agc dB - constant.
3820 	 * Higher AGC (higher radio gain) means lower signal.
3821 	 */
3822 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3823 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3824 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3825 
3826 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3827 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3828 
3829 	return max_rssi_dbm;
3830 }
3831 
3832 /*
3833  * RSSI values are reported by the FW as positive values - need to negate
3834  * to obtain their dBM.  Account for missing antennas by replacing 0
3835  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3836  */
3837 static int
iwm_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)3838 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3839 {
3840 	int energy_a, energy_b, energy_c, max_energy;
3841 	uint32_t val;
3842 
3843 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3844 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3845 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3846 	energy_a = energy_a ? -energy_a : -256;
3847 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3848 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3849 	energy_b = energy_b ? -energy_b : -256;
3850 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3851 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3852 	energy_c = energy_c ? -energy_c : -256;
3853 	max_energy = MAX(energy_a, energy_b);
3854 	max_energy = MAX(max_energy, energy_c);
3855 
3856 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3857 	    energy_a, energy_b, energy_c, max_energy));
3858 
3859 	return max_energy;
3860 }
3861 
3862 static void
iwm_rx_rx_phy_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)3863 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3864     struct iwm_rx_data *data)
3865 {
3866 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3867 
3868 	DPRINTFN(20, ("received PHY stats\n"));
3869 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3870 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3871 
3872 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3873 }
3874 
3875 /*
3876  * Retrieve the average noise (in dBm) among receivers.
3877  */
3878 static int
iwm_get_noise(const struct iwm_statistics_rx_non_phy * stats)3879 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3880 {
3881 	int i, total, nbant, noise;
3882 
3883 	total = nbant = noise = 0;
3884 	for (i = 0; i < 3; i++) {
3885 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3886 		if (noise) {
3887 			total += noise;
3888 			nbant++;
3889 		}
3890 	}
3891 
3892 	/* There should be at least one antenna but check anyway. */
3893 	return (nbant == 0) ? -127 : (total / nbant) - 107;
3894 }
3895 
3896 static void
iwm_rx_rx_mpdu(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)3897 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3898     struct iwm_rx_data *data)
3899 {
3900 	struct ieee80211com *ic = &sc->sc_ic;
3901 	struct ieee80211_frame *wh;
3902 	struct ieee80211_node *ni;
3903 	struct ieee80211_channel *c = NULL;
3904 	struct mbuf *m;
3905 	struct iwm_rx_phy_info *phy_info;
3906 	struct iwm_rx_mpdu_res_start *rx_res;
3907 	int device_timestamp;
3908 	uint32_t len;
3909 	uint32_t rx_pkt_status;
3910 	int rssi;
3911 	int s;
3912 
3913 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3914 	    BUS_DMASYNC_POSTREAD);
3915 
3916 	phy_info = &sc->sc_last_phy_info;
3917 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3918 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3919 	len = le16toh(rx_res->byte_count);
3920 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3921 	    sizeof(*rx_res) + len));
3922 
3923 	m = data->m;
3924 	m->m_data = pkt->data + sizeof(*rx_res);
3925 	m->m_pkthdr.len = m->m_len = len;
3926 
3927 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3928 		DPRINTF(("dsp size out of range [0,20]: %d\n",
3929 		    phy_info->cfg_phy_cnt));
3930 		return;
3931 	}
3932 
3933 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3934 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3935 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3936 		return; /* drop */
3937 	}
3938 
3939 	device_timestamp = le32toh(phy_info->system_timestamp);
3940 
3941 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3942 		rssi = iwm_get_signal_strength(sc, phy_info);
3943 	} else {
3944 		rssi = iwm_calc_rssi(sc, phy_info);
3945 	}
3946 	rssi = -rssi;
3947 
3948 	if (ic->ic_state == IEEE80211_S_SCAN)
3949 		iwm_fix_channel(sc, m);
3950 
3951 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3952 		return;
3953 
3954 	m_set_rcvif(m, IC2IFP(ic));
3955 
3956 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3957 		c = &ic->ic_channels[le32toh(phy_info->channel)];
3958 
3959 	s = splnet();
3960 
3961 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3962 	if (c)
3963 		ni->ni_chan = c;
3964 
3965 	if (__predict_false(sc->sc_drvbpf != NULL)) {
3966 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3967 
3968 		tap->wr_flags = 0;
3969 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3970 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3971 		tap->wr_chan_freq =
3972 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
3973 		tap->wr_chan_flags =
3974 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
3975 		tap->wr_dbm_antsignal = (int8_t)rssi;
3976 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3977 		tap->wr_tsft = phy_info->system_timestamp;
3978 		if (phy_info->phy_flags &
3979 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3980 			uint8_t mcs = (phy_info->rate_n_flags &
3981 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3982 			      IWM_RATE_HT_MCS_NSS_MSK));
3983 			tap->wr_rate = (0x80 | mcs);
3984 		} else {
3985 			uint8_t rate = (phy_info->rate_n_flags &
3986 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
3987 			switch (rate) {
3988 			/* CCK rates. */
3989 			case  10: tap->wr_rate =   2; break;
3990 			case  20: tap->wr_rate =   4; break;
3991 			case  55: tap->wr_rate =  11; break;
3992 			case 110: tap->wr_rate =  22; break;
3993 			/* OFDM rates. */
3994 			case 0xd: tap->wr_rate =  12; break;
3995 			case 0xf: tap->wr_rate =  18; break;
3996 			case 0x5: tap->wr_rate =  24; break;
3997 			case 0x7: tap->wr_rate =  36; break;
3998 			case 0x9: tap->wr_rate =  48; break;
3999 			case 0xb: tap->wr_rate =  72; break;
4000 			case 0x1: tap->wr_rate =  96; break;
4001 			case 0x3: tap->wr_rate = 108; break;
4002 			/* Unknown rate: should not happen. */
4003 			default:  tap->wr_rate =   0;
4004 			}
4005 		}
4006 
4007 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m, BPF_D_IN);
4008 	}
4009 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
4010 	ieee80211_free_node(ni);
4011 
4012 	splx(s);
4013 }
4014 
4015 static void
iwm_rx_tx_cmd_single(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_node * in)4016 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4017     struct iwm_node *in)
4018 {
4019 	struct ieee80211com *ic = &sc->sc_ic;
4020 	struct ifnet *ifp = IC2IFP(ic);
4021 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4022 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4023 	int failack = tx_resp->failure_frame;
4024 
4025 	KASSERT(tx_resp->frame_count == 1);
4026 
4027 	/* Update rate control statistics. */
4028 	in->in_amn.amn_txcnt++;
4029 	if (failack > 0) {
4030 		in->in_amn.amn_retrycnt++;
4031 	}
4032 
4033 	if (status != IWM_TX_STATUS_SUCCESS &&
4034 	    status != IWM_TX_STATUS_DIRECT_DONE)
4035 		if_statinc(ifp, if_oerrors);
4036 	else
4037 		if_statinc(ifp, if_opackets);
4038 }
4039 
4040 static void
iwm_rx_tx_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)4041 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4042     struct iwm_rx_data *data)
4043 {
4044 	struct ieee80211com *ic = &sc->sc_ic;
4045 	struct ifnet *ifp = IC2IFP(ic);
4046 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4047 	int idx = cmd_hdr->idx;
4048 	int qid = cmd_hdr->qid;
4049 	struct iwm_tx_ring *ring = &sc->txq[qid];
4050 	struct iwm_tx_data *txd = &ring->data[idx];
4051 	struct iwm_node *in = txd->in;
4052 	int s;
4053 
4054 	s = splnet();
4055 
4056 	if (txd->done) {
4057 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
4058 		    DEVNAME(sc)));
4059 		splx(s);
4060 		return;
4061 	}
4062 
4063 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4064 	    BUS_DMASYNC_POSTREAD);
4065 
4066 	sc->sc_tx_timer = 0;
4067 
4068 	iwm_rx_tx_cmd_single(sc, pkt, in);
4069 
4070 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4071 	    BUS_DMASYNC_POSTWRITE);
4072 	bus_dmamap_unload(sc->sc_dmat, txd->map);
4073 	m_freem(txd->m);
4074 
4075 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
4076 	KASSERT(txd->done == 0);
4077 	txd->done = 1;
4078 	KASSERT(txd->in);
4079 
4080 	txd->m = NULL;
4081 	txd->in = NULL;
4082 	ieee80211_free_node(&in->in_ni);
4083 
4084 	if (--ring->queued < IWM_TX_RING_LOMARK) {
4085 		sc->qfullmsk &= ~(1 << qid);
4086 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
4087 			ifp->if_flags &= ~IFF_OACTIVE;
4088 			KASSERT(KERNEL_LOCKED_P());
4089 			iwm_start(ifp);
4090 		}
4091 	}
4092 
4093 	splx(s);
4094 }
4095 
4096 static int
iwm_binding_cmd(struct iwm_softc * sc,struct iwm_node * in,uint32_t action)4097 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4098 {
4099 	struct iwm_binding_cmd cmd;
4100 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4101 	int i, err;
4102 	uint32_t status;
4103 
4104 	memset(&cmd, 0, sizeof(cmd));
4105 
4106 	cmd.id_and_color
4107 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4108 	cmd.action = htole32(action);
4109 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4110 
4111 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4112 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4113 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4114 
4115 	status = 0;
4116 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4117 	    sizeof(cmd), &cmd, &status);
4118 	if (err == 0 && status != 0)
4119 		err = EIO;
4120 
4121 	return err;
4122 }
4123 
4124 static void
iwm_phy_ctxt_cmd_hdr(struct iwm_softc * sc,struct iwm_phy_ctxt * ctxt,struct iwm_phy_context_cmd * cmd,uint32_t action,uint32_t apply_time)4125 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4126     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4127 {
4128 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4129 
4130 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4131 	    ctxt->color));
4132 	cmd->action = htole32(action);
4133 	cmd->apply_time = htole32(apply_time);
4134 }
4135 
4136 static void
iwm_phy_ctxt_cmd_data(struct iwm_softc * sc,struct iwm_phy_context_cmd * cmd,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic)4137 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4138     struct ieee80211_channel *chan, uint8_t chains_static,
4139     uint8_t chains_dynamic)
4140 {
4141 	struct ieee80211com *ic = &sc->sc_ic;
4142 	uint8_t active_cnt, idle_cnt;
4143 
4144 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4145 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4146 
4147 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4148 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4149 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4150 
4151 	/* Set rx the chains */
4152 	idle_cnt = chains_static;
4153 	active_cnt = chains_dynamic;
4154 
4155 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4156 	    IWM_PHY_RX_CHAIN_VALID_POS);
4157 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4158 	cmd->rxchain_info |= htole32(active_cnt <<
4159 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4160 
4161 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4162 }
4163 
4164 static int
iwm_phy_ctxt_cmd(struct iwm_softc * sc,struct iwm_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time)4165 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4166     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4167     uint32_t apply_time)
4168 {
4169 	struct iwm_phy_context_cmd cmd;
4170 
4171 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4172 
4173 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4174 	    chains_static, chains_dynamic);
4175 
4176 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4177 	    sizeof(struct iwm_phy_context_cmd), &cmd);
4178 }
4179 
4180 static int
iwm_send_cmd(struct iwm_softc * sc,struct iwm_host_cmd * hcmd)4181 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4182 {
4183 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4184 	struct iwm_tfd *desc;
4185 	struct iwm_tx_data *txdata;
4186 	struct iwm_device_cmd *cmd;
4187 	struct mbuf *m;
4188 	bus_addr_t paddr;
4189 	uint32_t addr_lo;
4190 	int err = 0, i, paylen, off, s;
4191 	int code;
4192 	int async, wantresp;
4193 	int group_id;
4194 	size_t hdrlen, datasz;
4195 	uint8_t *data;
4196 
4197 	code = hcmd->id;
4198 	async = hcmd->flags & IWM_CMD_ASYNC;
4199 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
4200 
4201 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
4202 		paylen += hcmd->len[i];
4203 	}
4204 
4205 	/* if the command wants an answer, busy sc_cmd_resp */
4206 	if (wantresp) {
4207 		KASSERT(!async);
4208 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
4209 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
4210 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
4211 	}
4212 
4213 	/*
4214 	 * Is the hardware still available?  (after e.g. above wait).
4215 	 */
4216 	s = splnet();
4217 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
4218 		err = ENXIO;
4219 		goto out;
4220 	}
4221 
4222 	desc = &ring->desc[ring->cur];
4223 	txdata = &ring->data[ring->cur];
4224 
4225 	group_id = iwm_cmd_groupid(code);
4226 	if (group_id != 0) {
4227 		hdrlen = sizeof(cmd->hdr_wide);
4228 		datasz = sizeof(cmd->data_wide);
4229 	} else {
4230 		hdrlen = sizeof(cmd->hdr);
4231 		datasz = sizeof(cmd->data);
4232 	}
4233 
4234 	if (paylen > datasz) {
4235 		/* Command is too large to fit in pre-allocated space. */
4236 		size_t totlen = hdrlen + paylen;
4237 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4238 			aprint_error_dev(sc->sc_dev,
4239 			    "firmware command too long (%zd bytes)\n", totlen);
4240 			err = EINVAL;
4241 			goto out;
4242 		}
4243 		m = m_gethdr(M_DONTWAIT, MT_DATA);
4244 		if (m == NULL) {
4245 			err = ENOMEM;
4246 			goto out;
4247 		}
4248 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
4249 		if (!(m->m_flags & M_EXT)) {
4250 			aprint_error_dev(sc->sc_dev,
4251 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
4252 			m_freem(m);
4253 			err = ENOMEM;
4254 			goto out;
4255 		}
4256 		cmd = mtod(m, struct iwm_device_cmd *);
4257 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4258 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4259 		if (err) {
4260 			aprint_error_dev(sc->sc_dev,
4261 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
4262 			m_freem(m);
4263 			goto out;
4264 		}
4265 		txdata->m = m;
4266 		paddr = txdata->map->dm_segs[0].ds_addr;
4267 	} else {
4268 		cmd = &ring->cmd[ring->cur];
4269 		paddr = txdata->cmd_paddr;
4270 	}
4271 
4272 	if (group_id != 0) {
4273 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4274 		cmd->hdr_wide.group_id = group_id;
4275 		cmd->hdr_wide.qid = ring->qid;
4276 		cmd->hdr_wide.idx = ring->cur;
4277 		cmd->hdr_wide.length = htole16(paylen);
4278 		cmd->hdr_wide.version = iwm_cmd_version(code);
4279 		data = cmd->data_wide;
4280 	} else {
4281 		cmd->hdr.code = code;
4282 		cmd->hdr.flags = 0;
4283 		cmd->hdr.qid = ring->qid;
4284 		cmd->hdr.idx = ring->cur;
4285 		data = cmd->data;
4286 	}
4287 
4288 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
4289 		if (hcmd->len[i] == 0)
4290 			continue;
4291 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4292 		off += hcmd->len[i];
4293 	}
4294 	KASSERT(off == paylen);
4295 
4296 	/* lo field is not aligned */
4297 	addr_lo = htole32((uint32_t)paddr);
4298 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4299 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
4300 	    | ((hdrlen + paylen) << 4));
4301 	desc->num_tbs = 1;
4302 
4303 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
4304 	    code, hdrlen + paylen, async ? " (async)" : ""));
4305 
4306 	if (paylen > datasz) {
4307 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
4308 		    BUS_DMASYNC_PREWRITE);
4309 	} else {
4310 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4311 		    (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
4312 		    BUS_DMASYNC_PREWRITE);
4313 	}
4314 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4315 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4316 	    BUS_DMASYNC_PREWRITE);
4317 
4318 	err = iwm_set_cmd_in_flight(sc);
4319 	if (err)
4320 		goto out;
4321 	ring->queued++;
4322 
4323 #if 0
4324 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4325 #endif
4326 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
4327 	    code, ring->qid, ring->cur));
4328 
4329 	/* Kick command ring. */
4330 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4331 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4332 
4333 	if (!async) {
4334 		int generation = sc->sc_generation;
4335 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(2000));
4336 		if (err == 0) {
4337 			/* if hardware is no longer up, return error */
4338 			if (generation != sc->sc_generation) {
4339 				err = ENXIO;
4340 			} else {
4341 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
4342 			}
4343 		}
4344 	}
4345  out:
4346 	if (wantresp && err) {
4347 		iwm_free_resp(sc, hcmd);
4348 	}
4349 	splx(s);
4350 
4351 	return err;
4352 }
4353 
4354 static int
iwm_send_cmd_pdu(struct iwm_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)4355 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4356     uint16_t len, const void *data)
4357 {
4358 	struct iwm_host_cmd cmd = {
4359 		.id = id,
4360 		.len = { len, },
4361 		.data = { data, },
4362 		.flags = flags,
4363 	};
4364 
4365 	return iwm_send_cmd(sc, &cmd);
4366 }
4367 
4368 static int
iwm_send_cmd_status(struct iwm_softc * sc,struct iwm_host_cmd * cmd,uint32_t * status)4369 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4370     uint32_t *status)
4371 {
4372 	struct iwm_rx_packet *pkt;
4373 	struct iwm_cmd_response *resp;
4374 	int err, resp_len;
4375 
4376 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4377 	cmd->flags |= IWM_CMD_WANT_SKB;
4378 
4379 	err = iwm_send_cmd(sc, cmd);
4380 	if (err)
4381 		return err;
4382 	pkt = cmd->resp_pkt;
4383 
4384 	/* Can happen if RFKILL is asserted */
4385 	if (!pkt) {
4386 		err = 0;
4387 		goto out_free_resp;
4388 	}
4389 
4390 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4391 		err = EIO;
4392 		goto out_free_resp;
4393 	}
4394 
4395 	resp_len = iwm_rx_packet_payload_len(pkt);
4396 	if (resp_len != sizeof(*resp)) {
4397 		err = EIO;
4398 		goto out_free_resp;
4399 	}
4400 
4401 	resp = (void *)pkt->data;
4402 	*status = le32toh(resp->status);
4403  out_free_resp:
4404 	iwm_free_resp(sc, cmd);
4405 	return err;
4406 }
4407 
4408 static int
iwm_send_cmd_pdu_status(struct iwm_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)4409 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4410     const void *data, uint32_t *status)
4411 {
4412 	struct iwm_host_cmd cmd = {
4413 		.id = id,
4414 		.len = { len, },
4415 		.data = { data, },
4416 	};
4417 
4418 	return iwm_send_cmd_status(sc, &cmd, status);
4419 }
4420 
4421 static void
iwm_free_resp(struct iwm_softc * sc,struct iwm_host_cmd * hcmd)4422 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4423 {
4424 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4425 	KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4426 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4427 	wakeup(&sc->sc_wantresp);
4428 }
4429 
4430 static void
iwm_cmd_done(struct iwm_softc * sc,int qid,int idx)4431 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4432 {
4433 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4434 	struct iwm_tx_data *data;
4435 	int s;
4436 
4437 	if (qid != IWM_CMD_QUEUE) {
4438 		return;	/* Not a command ack. */
4439 	}
4440 
4441 	s = splnet();
4442 
4443 	data = &ring->data[idx];
4444 
4445 	if (data->m != NULL) {
4446 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4447 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4448 		bus_dmamap_unload(sc->sc_dmat, data->map);
4449 		m_freem(data->m);
4450 		data->m = NULL;
4451 	}
4452 	wakeup(&ring->desc[idx]);
4453 
4454 	if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4455 		aprint_error_dev(sc->sc_dev,
4456 		    "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4457 		    idx, ring->queued, ring->cur);
4458 	}
4459 
4460 	KASSERT(ring->queued > 0);
4461 	if (--ring->queued == 0)
4462 		iwm_clear_cmd_in_flight(sc);
4463 
4464 	splx(s);
4465 }
4466 
4467 #if 0
4468 /*
4469  * necessary only for block ack mode
4470  */
4471 void
4472 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4473     uint16_t len)
4474 {
4475 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4476 	uint16_t w_val;
4477 
4478 	scd_bc_tbl = sc->sched_dma.vaddr;
4479 
4480 	len += 8; /* magic numbers came naturally from paris */
4481 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4482 		len = roundup(len, 4) / 4;
4483 
4484 	w_val = htole16(sta_id << 12 | len);
4485 
4486 	/* Update TX scheduler. */
4487 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4488 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4489 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4490 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4491 
4492 	/* I really wonder what this is ?!? */
4493 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4494 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4495 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4496 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4497 		    (char *)(void *)sc->sched_dma.vaddr,
4498 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4499 	}
4500 }
4501 #endif
4502 
4503 /*
4504  * Fill in various bit for management frames, and leave them
4505  * unfilled for data frames (firmware takes care of that).
4506  * Return the selected TX rate.
4507  */
4508 static const struct iwm_rate *
iwm_tx_fill_cmd(struct iwm_softc * sc,struct iwm_node * in,struct ieee80211_frame * wh,struct iwm_tx_cmd * tx)4509 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4510     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4511 {
4512 	struct ieee80211com *ic = &sc->sc_ic;
4513 	struct ieee80211_node *ni = &in->in_ni;
4514 	const struct iwm_rate *rinfo;
4515 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4516 	int ridx, rate_flags, i, ind;
4517 	int nrates = ni->ni_rates.rs_nrates;
4518 
4519 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4520 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4521 
4522 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4523 	    type != IEEE80211_FC0_TYPE_DATA) {
4524 		/* for non-data, use the lowest supported rate */
4525 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4526 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4527 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4528 #ifndef IEEE80211_NO_HT
4529 	} else if (ic->ic_fixed_mcs != -1) {
4530 		ridx = sc->sc_fixed_ridx;
4531 #endif
4532 	} else if (ic->ic_fixed_rate != -1) {
4533 		ridx = sc->sc_fixed_ridx;
4534 	} else {
4535 		/* for data frames, use RS table */
4536 		tx->initial_rate_index = 0;
4537 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4538 		DPRINTFN(12, ("start with txrate %d\n",
4539 		    tx->initial_rate_index));
4540 #ifndef IEEE80211_NO_HT
4541 		if (ni->ni_flags & IEEE80211_NODE_HT) {
4542 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
4543 			return &iwm_rates[ridx];
4544 		}
4545 #endif
4546 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4547 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
4548 		for (i = 0; i < nrates; i++) {
4549 			if (iwm_rates[i].rate == (ni->ni_txrate &
4550 			    IEEE80211_RATE_VAL)) {
4551 				ridx = i;
4552 				break;
4553 			}
4554 		}
4555 		return &iwm_rates[ridx];
4556 	}
4557 
4558 	rinfo = &iwm_rates[ridx];
4559 	for (i = 0, ind = sc->sc_mgmt_last_antenna;
4560 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
4561 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4562 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4563 			sc->sc_mgmt_last_antenna = ind;
4564 			break;
4565 		}
4566 	}
4567 	rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4568 	if (IWM_RIDX_IS_CCK(ridx))
4569 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
4570 #ifndef IEEE80211_NO_HT
4571 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4572 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4573 		rate_flags |= IWM_RATE_MCS_HT_MSK;
4574 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4575 	} else
4576 #endif
4577 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4578 
4579 	return rinfo;
4580 }
4581 
4582 #define TB0_SIZE 16
4583 static int
iwm_tx(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,int ac)4584 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4585 {
4586 	struct ieee80211com *ic = &sc->sc_ic;
4587 	struct iwm_node *in = (struct iwm_node *)ni;
4588 	struct iwm_tx_ring *ring;
4589 	struct iwm_tx_data *data;
4590 	struct iwm_tfd *desc;
4591 	struct iwm_device_cmd *cmd;
4592 	struct iwm_tx_cmd *tx;
4593 	struct ieee80211_frame *wh;
4594 	struct ieee80211_key *k = NULL;
4595 	struct mbuf *m1;
4596 	const struct iwm_rate *rinfo;
4597 	uint32_t flags;
4598 	u_int hdrlen;
4599 	bus_dma_segment_t *seg;
4600 	uint8_t tid, type;
4601 	int i, totlen, err, pad;
4602 
4603 	wh = mtod(m, struct ieee80211_frame *);
4604 	hdrlen = ieee80211_anyhdrsize(wh);
4605 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4606 
4607 	tid = 0;
4608 
4609 	ring = &sc->txq[ac];
4610 	desc = &ring->desc[ring->cur];
4611 	memset(desc, 0, sizeof(*desc));
4612 	data = &ring->data[ring->cur];
4613 
4614 	cmd = &ring->cmd[ring->cur];
4615 	cmd->hdr.code = IWM_TX_CMD;
4616 	cmd->hdr.flags = 0;
4617 	cmd->hdr.qid = ring->qid;
4618 	cmd->hdr.idx = ring->cur;
4619 
4620 	tx = (void *)cmd->data;
4621 	memset(tx, 0, sizeof(*tx));
4622 
4623 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4624 
4625 	if (__predict_false(sc->sc_drvbpf != NULL)) {
4626 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4627 
4628 		tap->wt_flags = 0;
4629 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4630 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4631 #ifndef IEEE80211_NO_HT
4632 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4633 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4634 		    type == IEEE80211_FC0_TYPE_DATA &&
4635 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
4636 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
4637 		} else
4638 #endif
4639 			tap->wt_rate = rinfo->rate;
4640 		tap->wt_hwqueue = ac;
4641 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4642 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4643 
4644 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m, BPF_D_OUT);
4645 	}
4646 
4647 	/* Encrypt the frame if need be. */
4648 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4649 		k = ieee80211_crypto_encap(ic, ni, m);
4650 		if (k == NULL) {
4651 			m_freem(m);
4652 			return ENOBUFS;
4653 		}
4654 		/* Packet header may have moved, reset our local pointer. */
4655 		wh = mtod(m, struct ieee80211_frame *);
4656 	}
4657 	totlen = m->m_pkthdr.len;
4658 
4659 	flags = 0;
4660 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4661 		flags |= IWM_TX_CMD_FLG_ACK;
4662 	}
4663 
4664 	if (type == IEEE80211_FC0_TYPE_DATA &&
4665 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4666 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4667 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
4668 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4669 
4670 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4671 	    type != IEEE80211_FC0_TYPE_DATA)
4672 		tx->sta_id = IWM_AUX_STA_ID;
4673 	else
4674 		tx->sta_id = IWM_STATION_ID;
4675 
4676 	if (type == IEEE80211_FC0_TYPE_MGT) {
4677 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4678 
4679 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4680 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4681 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4682 		else
4683 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4684 	} else {
4685 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4686 	}
4687 
4688 	if (hdrlen & 3) {
4689 		/* First segment length must be a multiple of 4. */
4690 		flags |= IWM_TX_CMD_FLG_MH_PAD;
4691 		pad = 4 - (hdrlen & 3);
4692 	} else
4693 		pad = 0;
4694 
4695 	tx->driver_txop = 0;
4696 	tx->next_frame_len = 0;
4697 
4698 	tx->len = htole16(totlen);
4699 	tx->tid_tspec = tid;
4700 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4701 
4702 	/* Set physical address of "scratch area". */
4703 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4704 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4705 
4706 	/* Copy 802.11 header in TX command. */
4707 	memcpy(tx + 1, wh, hdrlen);
4708 
4709 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4710 
4711 	tx->sec_ctl = 0;
4712 	tx->tx_flags |= htole32(flags);
4713 
4714 	/* Trim 802.11 header. */
4715 	m_adj(m, hdrlen);
4716 
4717 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4718 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4719 	if (err) {
4720 		if (err != EFBIG) {
4721 			aprint_error_dev(sc->sc_dev,
4722 			    "can't map mbuf (error %d)\n", err);
4723 			m_freem(m);
4724 			return err;
4725 		}
4726 		/* Too many DMA segments, linearize mbuf. */
4727 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
4728 		if (m1 == NULL) {
4729 			m_freem(m);
4730 			return ENOBUFS;
4731 		}
4732 		if (m->m_pkthdr.len > MHLEN) {
4733 			MCLGET(m1, M_DONTWAIT);
4734 			if (!(m1->m_flags & M_EXT)) {
4735 				m_freem(m);
4736 				m_freem(m1);
4737 				return ENOBUFS;
4738 			}
4739 		}
4740 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4741 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4742 		m_freem(m);
4743 		m = m1;
4744 
4745 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4746 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4747 		if (err) {
4748 			aprint_error_dev(sc->sc_dev,
4749 			    "can't map mbuf (error %d)\n", err);
4750 			m_freem(m);
4751 			return err;
4752 		}
4753 	}
4754 	data->m = m;
4755 	data->in = in;
4756 	data->done = 0;
4757 
4758 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4759 	KASSERT(data->in != NULL);
4760 
4761 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4762 	    "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4763 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4764 	    (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4765 	    le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4766 	    le32toh(tx->rate_n_flags)));
4767 
4768 	/* Fill TX descriptor. */
4769 	desc->num_tbs = 2 + data->map->dm_nsegs;
4770 
4771 	desc->tbs[0].lo = htole32(data->cmd_paddr);
4772 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4773 	    (TB0_SIZE << 4);
4774 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4775 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4776 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4777 	      + hdrlen + pad - TB0_SIZE) << 4);
4778 
4779 	/* Other DMA segments are for data payload. */
4780 	seg = data->map->dm_segs;
4781 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4782 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
4783 		desc->tbs[i+2].hi_n_len =
4784 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4785 		    | ((seg->ds_len) << 4);
4786 	}
4787 
4788 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4789 	    BUS_DMASYNC_PREWRITE);
4790 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4791 	    (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4792 	    BUS_DMASYNC_PREWRITE);
4793 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4794 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4795 	    BUS_DMASYNC_PREWRITE);
4796 
4797 #if 0
4798 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4799 	    le16toh(tx->len));
4800 #endif
4801 
4802 	/* Kick TX ring. */
4803 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4804 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4805 
4806 	/* Mark TX ring as full if we reach a certain threshold. */
4807 	if (++ring->queued > IWM_TX_RING_HIMARK) {
4808 		sc->qfullmsk |= 1 << ring->qid;
4809 	}
4810 
4811 	return 0;
4812 }
4813 
4814 #if 0
4815 /* not necessary? */
4816 static int
4817 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4818 {
4819 	struct iwm_tx_path_flush_cmd flush_cmd = {
4820 		.queues_ctl = htole32(tfd_msk),
4821 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4822 	};
4823 	int err;
4824 
4825 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4826 	    sizeof(flush_cmd), &flush_cmd);
4827 	if (err)
4828 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4829 		    err);
4830 	return err;
4831 }
4832 #endif
4833 
4834 static void
iwm_led_enable(struct iwm_softc * sc)4835 iwm_led_enable(struct iwm_softc *sc)
4836 {
4837 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4838 }
4839 
4840 static void
iwm_led_disable(struct iwm_softc * sc)4841 iwm_led_disable(struct iwm_softc *sc)
4842 {
4843 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4844 }
4845 
4846 static int
iwm_led_is_enabled(struct iwm_softc * sc)4847 iwm_led_is_enabled(struct iwm_softc *sc)
4848 {
4849 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4850 }
4851 
4852 static void
iwm_led_blink_timeout(void * arg)4853 iwm_led_blink_timeout(void *arg)
4854 {
4855 	struct iwm_softc *sc = arg;
4856 
4857 	if (iwm_led_is_enabled(sc))
4858 		iwm_led_disable(sc);
4859 	else
4860 		iwm_led_enable(sc);
4861 
4862 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4863 }
4864 
4865 static void
iwm_led_blink_start(struct iwm_softc * sc)4866 iwm_led_blink_start(struct iwm_softc *sc)
4867 {
4868 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4869 }
4870 
4871 static void
iwm_led_blink_stop(struct iwm_softc * sc)4872 iwm_led_blink_stop(struct iwm_softc *sc)
4873 {
4874 	callout_stop(&sc->sc_led_blink_to);
4875 	iwm_led_disable(sc);
4876 }
4877 
4878 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
4879 
4880 static int
iwm_beacon_filter_send_cmd(struct iwm_softc * sc,struct iwm_beacon_filter_cmd * cmd)4881 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4882     struct iwm_beacon_filter_cmd *cmd)
4883 {
4884 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4885 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4886 }
4887 
4888 static void
iwm_beacon_filter_set_cqm_params(struct iwm_softc * sc,struct iwm_node * in,struct iwm_beacon_filter_cmd * cmd)4889 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4890     struct iwm_beacon_filter_cmd *cmd)
4891 {
4892 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4893 }
4894 
4895 static int
iwm_update_beacon_abort(struct iwm_softc * sc,struct iwm_node * in,int enable)4896 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4897 {
4898 	struct iwm_beacon_filter_cmd cmd = {
4899 		IWM_BF_CMD_CONFIG_DEFAULTS,
4900 		.bf_enable_beacon_filter = htole32(1),
4901 		.ba_enable_beacon_abort = htole32(enable),
4902 	};
4903 
4904 	if (!sc->sc_bf.bf_enabled)
4905 		return 0;
4906 
4907 	sc->sc_bf.ba_enabled = enable;
4908 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4909 	return iwm_beacon_filter_send_cmd(sc, &cmd);
4910 }
4911 
4912 static void
iwm_power_build_cmd(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_power_cmd * cmd)4913 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4914     struct iwm_mac_power_cmd *cmd)
4915 {
4916 	struct ieee80211_node *ni = &in->in_ni;
4917 	int dtim_period, dtim_msec, keep_alive;
4918 
4919 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4920 	    in->in_color));
4921 	if (ni->ni_dtim_period)
4922 		dtim_period = ni->ni_dtim_period;
4923 	else
4924 		dtim_period = 1;
4925 
4926 	/*
4927 	 * Regardless of power management state the driver must set
4928 	 * keep alive period. FW will use it for sending keep alive NDPs
4929 	 * immediately after association. Check that keep alive period
4930 	 * is at least 3 * DTIM.
4931 	 */
4932 	dtim_msec = dtim_period * ni->ni_intval;
4933 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4934 	keep_alive = roundup(keep_alive, 1000) / 1000;
4935 	cmd->keep_alive_seconds = htole16(keep_alive);
4936 
4937 #ifdef notyet
4938 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4939 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4940 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4941 #endif
4942 }
4943 
4944 static int
iwm_power_mac_update_mode(struct iwm_softc * sc,struct iwm_node * in)4945 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4946 {
4947 	int err;
4948 	int ba_enable;
4949 	struct iwm_mac_power_cmd cmd;
4950 
4951 	memset(&cmd, 0, sizeof(cmd));
4952 
4953 	iwm_power_build_cmd(sc, in, &cmd);
4954 
4955 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4956 	    sizeof(cmd), &cmd);
4957 	if (err)
4958 		return err;
4959 
4960 	ba_enable = !!(cmd.flags &
4961 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4962 	return iwm_update_beacon_abort(sc, in, ba_enable);
4963 }
4964 
4965 static int
iwm_power_update_device(struct iwm_softc * sc)4966 iwm_power_update_device(struct iwm_softc *sc)
4967 {
4968 	struct iwm_device_power_cmd cmd = {
4969 #ifdef notyet
4970 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4971 #else
4972 		.flags = 0,
4973 #endif
4974 	};
4975 
4976 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4977 		return 0;
4978 
4979 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4980 	DPRINTF(("Sending device power command with flags = 0x%X\n",
4981 	    cmd.flags));
4982 
4983 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4984 }
4985 
4986 #ifdef notyet
4987 static int
iwm_enable_beacon_filter(struct iwm_softc * sc,struct iwm_node * in)4988 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4989 {
4990 	struct iwm_beacon_filter_cmd cmd = {
4991 		IWM_BF_CMD_CONFIG_DEFAULTS,
4992 		.bf_enable_beacon_filter = htole32(1),
4993 	};
4994 	int err;
4995 
4996 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4997 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
4998 
4999 	if (err == 0)
5000 		sc->sc_bf.bf_enabled = 1;
5001 
5002 	return err;
5003 }
5004 #endif
5005 
5006 static int
iwm_disable_beacon_filter(struct iwm_softc * sc)5007 iwm_disable_beacon_filter(struct iwm_softc *sc)
5008 {
5009 	struct iwm_beacon_filter_cmd cmd;
5010 	int err;
5011 
5012 	memset(&cmd, 0, sizeof(cmd));
5013 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
5014 		return 0;
5015 
5016 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
5017 	if (err == 0)
5018 		sc->sc_bf.bf_enabled = 0;
5019 
5020 	return err;
5021 }
5022 
5023 static int
iwm_add_sta_cmd(struct iwm_softc * sc,struct iwm_node * in,int update)5024 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5025 {
5026 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
5027 	int err;
5028 	uint32_t status;
5029 
5030 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5031 
5032 	add_sta_cmd.sta_id = IWM_STATION_ID;
5033 	add_sta_cmd.mac_id_n_color
5034 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5035 	if (!update) {
5036 		int ac;
5037 		for (ac = 0; ac < WME_NUM_AC; ac++) {
5038 			add_sta_cmd.tfd_queue_msk |=
5039 			    htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
5040 		}
5041 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5042 	}
5043 	add_sta_cmd.add_modify = update ? 1 : 0;
5044 	add_sta_cmd.station_flags_msk
5045 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5046 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
5047 	if (update)
5048 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5049 
5050 #ifndef IEEE80211_NO_HT
5051 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5052 		add_sta_cmd.station_flags_msk
5053 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5054 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5055 
5056 		add_sta_cmd.station_flags
5057 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5058 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5059 		case IEEE80211_AMPDU_PARAM_SS_2:
5060 			add_sta_cmd.station_flags
5061 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5062 			break;
5063 		case IEEE80211_AMPDU_PARAM_SS_4:
5064 			add_sta_cmd.station_flags
5065 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5066 			break;
5067 		case IEEE80211_AMPDU_PARAM_SS_8:
5068 			add_sta_cmd.station_flags
5069 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5070 			break;
5071 		case IEEE80211_AMPDU_PARAM_SS_16:
5072 			add_sta_cmd.station_flags
5073 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5074 			break;
5075 		default:
5076 			break;
5077 		}
5078 	}
5079 #endif
5080 
5081 	status = IWM_ADD_STA_SUCCESS;
5082 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
5083 	    &add_sta_cmd, &status);
5084 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5085 		err = EIO;
5086 
5087 	return err;
5088 }
5089 
5090 static int
iwm_add_aux_sta(struct iwm_softc * sc)5091 iwm_add_aux_sta(struct iwm_softc *sc)
5092 {
5093 	struct iwm_add_sta_cmd_v7 cmd;
5094 	int err;
5095 	uint32_t status;
5096 
5097 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
5098 	if (err)
5099 		return err;
5100 
5101 	memset(&cmd, 0, sizeof(cmd));
5102 	cmd.sta_id = IWM_AUX_STA_ID;
5103 	cmd.mac_id_n_color =
5104 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5105 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
5106 	cmd.tid_disable_tx = htole16(0xffff);
5107 
5108 	status = IWM_ADD_STA_SUCCESS;
5109 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
5110 	    &status);
5111 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5112 		err = EIO;
5113 
5114 	return err;
5115 }
5116 
5117 #define IWM_PLCP_QUIET_THRESH 1
5118 #define IWM_ACTIVE_QUIET_TIME 10
5119 #define LONG_OUT_TIME_PERIOD 600
5120 #define SHORT_OUT_TIME_PERIOD 200
5121 #define SUSPEND_TIME_PERIOD 100
5122 
5123 static uint16_t
iwm_scan_rx_chain(struct iwm_softc * sc)5124 iwm_scan_rx_chain(struct iwm_softc *sc)
5125 {
5126 	uint16_t rx_chain;
5127 	uint8_t rx_ant;
5128 
5129 	rx_ant = iwm_fw_valid_rx_ant(sc);
5130 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5131 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5132 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5133 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5134 	return htole16(rx_chain);
5135 }
5136 
5137 static uint32_t
iwm_scan_rate_n_flags(struct iwm_softc * sc,int flags,int no_cck)5138 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5139 {
5140 	uint32_t tx_ant;
5141 	int i, ind;
5142 
5143 	for (i = 0, ind = sc->sc_scan_last_antenna;
5144 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
5145 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5146 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5147 			sc->sc_scan_last_antenna = ind;
5148 			break;
5149 		}
5150 	}
5151 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5152 
5153 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5154 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5155 				   tx_ant);
5156 	else
5157 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
5158 }
5159 
5160 #ifdef notyet
5161 /*
5162  * If req->n_ssids > 0, it means we should do an active scan.
5163  * In case of active scan w/o directed scan, we receive a zero-length SSID
5164  * just to notify that this scan is active and not passive.
5165  * In order to notify the FW of the number of SSIDs we wish to scan (including
5166  * the zero-length one), we need to set the corresponding bits in chan->type,
5167  * one for each SSID, and set the active bit (first). If the first SSID is
5168  * already included in the probe template, so we need to set only
5169  * req->n_ssids - 1 bits in addition to the first bit.
5170  */
5171 static uint16_t
iwm_get_active_dwell(struct iwm_softc * sc,int flags,int n_ssids)5172 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
5173 {
5174 	if (flags & IEEE80211_CHAN_2GHZ)
5175 		return 30  + 3 * (n_ssids + 1);
5176 	return 20  + 2 * (n_ssids + 1);
5177 }
5178 
5179 static uint16_t
iwm_get_passive_dwell(struct iwm_softc * sc,int flags)5180 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
5181 {
5182 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
5183 }
5184 #endif
5185 
5186 static uint8_t
iwm_lmac_scan_fill_channels(struct iwm_softc * sc,struct iwm_scan_channel_cfg_lmac * chan,int n_ssids)5187 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5188     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
5189 {
5190 	struct ieee80211com *ic = &sc->sc_ic;
5191 	struct ieee80211_channel *c;
5192 	uint8_t nchan;
5193 
5194 	for (nchan = 0, c = &ic->ic_channels[1];
5195 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5196 	    nchan < sc->sc_capa_n_scan_channels;
5197 	    c++) {
5198 		if (c->ic_flags == 0)
5199 			continue;
5200 
5201 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5202 		chan->iter_count = htole16(1);
5203 		chan->iter_interval = htole32(0);
5204 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5205 		chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
5206 		if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
5207 			chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
5208 		chan++;
5209 		nchan++;
5210 	}
5211 
5212 	return nchan;
5213 }
5214 
5215 static uint8_t
iwm_umac_scan_fill_channels(struct iwm_softc * sc,struct iwm_scan_channel_cfg_umac * chan,int n_ssids)5216 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5217     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
5218 {
5219 	struct ieee80211com *ic = &sc->sc_ic;
5220 	struct ieee80211_channel *c;
5221 	uint8_t nchan;
5222 
5223 	for (nchan = 0, c = &ic->ic_channels[1];
5224 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5225 	    nchan < sc->sc_capa_n_scan_channels;
5226 	    c++) {
5227 		if (c->ic_flags == 0)
5228 			continue;
5229 
5230 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5231 		chan->iter_count = 1;
5232 		chan->iter_interval = htole16(0);
5233 		chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
5234 		chan++;
5235 		nchan++;
5236 	}
5237 
5238 	return nchan;
5239 }
5240 
5241 static int
iwm_fill_probe_req(struct iwm_softc * sc,struct iwm_scan_probe_req * preq)5242 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5243 {
5244 	struct ieee80211com *ic = &sc->sc_ic;
5245 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5246 	struct ieee80211_rateset *rs;
5247 	size_t remain = sizeof(preq->buf);
5248 	uint8_t *frm, *pos;
5249 
5250 	memset(preq, 0, sizeof(*preq));
5251 
5252 	KASSERT(ic->ic_des_esslen < sizeof(ic->ic_des_essid));
5253 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5254 		return ENOBUFS;
5255 
5256 	/*
5257 	 * Build a probe request frame.  Most of the following code is a
5258 	 * copy & paste of what is done in net80211.
5259 	 */
5260 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5261 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5262 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5263 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5264 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5265 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5266 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5267 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5268 
5269 	frm = (uint8_t *)(wh + 1);
5270 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5271 
5272 	/* Tell the firmware where the MAC header is. */
5273 	preq->mac_header.offset = 0;
5274 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5275 	remain -= frm - (uint8_t *)wh;
5276 
5277 	/* Fill in 2GHz IEs and tell firmware where they are. */
5278 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5279 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5280 		if (remain < 4 + rs->rs_nrates)
5281 			return ENOBUFS;
5282 	} else if (remain < 2 + rs->rs_nrates)
5283 		return ENOBUFS;
5284 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5285 	pos = frm;
5286 	frm = ieee80211_add_rates(frm, rs);
5287 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5288 		frm = ieee80211_add_xrates(frm, rs);
5289 	preq->band_data[0].len = htole16(frm - pos);
5290 	remain -= frm - pos;
5291 
5292 	if (isset(sc->sc_enabled_capa,
5293 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5294 		if (remain < 3)
5295 			return ENOBUFS;
5296 		*frm++ = IEEE80211_ELEMID_DSPARMS;
5297 		*frm++ = 1;
5298 		*frm++ = 0;
5299 		remain -= 3;
5300 	}
5301 
5302 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5303 		/* Fill in 5GHz IEs. */
5304 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5305 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5306 			if (remain < 4 + rs->rs_nrates)
5307 				return ENOBUFS;
5308 		} else if (remain < 2 + rs->rs_nrates)
5309 			return ENOBUFS;
5310 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5311 		pos = frm;
5312 		frm = ieee80211_add_rates(frm, rs);
5313 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5314 			frm = ieee80211_add_xrates(frm, rs);
5315 		preq->band_data[1].len = htole16(frm - pos);
5316 		remain -= frm - pos;
5317 	}
5318 
5319 #ifndef IEEE80211_NO_HT
5320 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
5321 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5322 	pos = frm;
5323 	if (ic->ic_flags & IEEE80211_F_HTON) {
5324 		if (remain < 28)
5325 			return ENOBUFS;
5326 		frm = ieee80211_add_htcaps(frm, ic);
5327 		/* XXX add WME info? */
5328 	}
5329 #endif
5330 
5331 	preq->common_data.len = htole16(frm - pos);
5332 
5333 	return 0;
5334 }
5335 
5336 static int
iwm_lmac_scan(struct iwm_softc * sc)5337 iwm_lmac_scan(struct iwm_softc *sc)
5338 {
5339 	struct ieee80211com *ic = &sc->sc_ic;
5340 	struct iwm_host_cmd hcmd = {
5341 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5342 		.len = { 0, },
5343 		.data = { NULL, },
5344 		.flags = 0,
5345 	};
5346 	struct iwm_scan_req_lmac *req;
5347 	size_t req_len;
5348 	int err;
5349 
5350 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5351 
5352 	req_len = sizeof(struct iwm_scan_req_lmac) +
5353 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5354 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5355 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5356 		return ENOMEM;
5357 	req = kmem_zalloc(req_len, KM_SLEEP);
5358 	hcmd.len[0] = (uint16_t)req_len;
5359 	hcmd.data[0] = (void *)req;
5360 
5361 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5362 	req->active_dwell = 10;
5363 	req->passive_dwell = 110;
5364 	req->fragmented_dwell = 44;
5365 	req->extended_dwell = 90;
5366 	req->max_out_time = 0;
5367 	req->suspend_time = 0;
5368 
5369 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5370 	req->rx_chain_select = iwm_scan_rx_chain(sc);
5371 	req->iter_num = htole32(1);
5372 	req->delay = 0;
5373 
5374 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5375 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5376 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5377 	if (ic->ic_des_esslen == 0)
5378 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5379 	else
5380 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5381 	if (isset(sc->sc_enabled_capa,
5382 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5383 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5384 
5385 	req->flags = htole32(IWM_PHY_BAND_24);
5386 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5387 		req->flags |= htole32(IWM_PHY_BAND_5);
5388 	req->filter_flags =
5389 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5390 
5391 	/* Tx flags 2 GHz. */
5392 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5393 	    IWM_TX_CMD_FLG_BT_DIS);
5394 	req->tx_cmd[0].rate_n_flags =
5395 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5396 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5397 
5398 	/* Tx flags 5 GHz. */
5399 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5400 	    IWM_TX_CMD_FLG_BT_DIS);
5401 	req->tx_cmd[1].rate_n_flags =
5402 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5403 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5404 
5405 	/* Check if we're doing an active directed scan. */
5406 	if (ic->ic_des_esslen != 0) {
5407 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5408 		req->direct_scan[0].len = ic->ic_des_esslen;
5409 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5410 		    ic->ic_des_esslen);
5411 	}
5412 
5413 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
5414 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
5415 	    ic->ic_des_esslen != 0);
5416 
5417 	err = iwm_fill_probe_req(sc,
5418 	    (struct iwm_scan_probe_req *)(req->data +
5419 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
5420 	     sc->sc_capa_n_scan_channels)));
5421 	if (err) {
5422 		kmem_free(req, req_len);
5423 		return err;
5424 	}
5425 
5426 	/* Specify the scan plan: We'll do one iteration. */
5427 	req->schedule[0].iterations = 1;
5428 	req->schedule[0].full_scan_mul = 1;
5429 
5430 	/* Disable EBS. */
5431 	req->channel_opt[0].non_ebs_ratio = 1;
5432 	req->channel_opt[1].non_ebs_ratio = 1;
5433 
5434 	err = iwm_send_cmd(sc, &hcmd);
5435 	kmem_free(req, req_len);
5436 	return err;
5437 }
5438 
5439 static int
iwm_config_umac_scan(struct iwm_softc * sc)5440 iwm_config_umac_scan(struct iwm_softc *sc)
5441 {
5442 	struct ieee80211com *ic = &sc->sc_ic;
5443 	struct iwm_scan_config *scan_config;
5444 	int err, nchan;
5445 	size_t cmd_size;
5446 	struct ieee80211_channel *c;
5447 	struct iwm_host_cmd hcmd = {
5448 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5449 		.flags = 0,
5450 	};
5451 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5452 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5453 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5454 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5455 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5456 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5457 	    IWM_SCAN_CONFIG_RATE_54M);
5458 
5459 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5460 
5461 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5462 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5463 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5464 	scan_config->legacy_rates = htole32(rates |
5465 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5466 
5467 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5468 	scan_config->dwell_active = 10;
5469 	scan_config->dwell_passive = 110;
5470 	scan_config->dwell_fragmented = 44;
5471 	scan_config->dwell_extended = 90;
5472 	scan_config->out_of_channel_time = htole32(0);
5473 	scan_config->suspend_time = htole32(0);
5474 
5475 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5476 
5477 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5478 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5479 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5480 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5481 
5482 	for (c = &ic->ic_channels[1], nchan = 0;
5483 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5484 	    nchan < sc->sc_capa_n_scan_channels; c++) {
5485 		if (c->ic_flags == 0)
5486 			continue;
5487 		scan_config->channel_array[nchan++] =
5488 		    ieee80211_mhz2ieee(c->ic_freq, 0);
5489 	}
5490 
5491 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5492 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5493 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5494 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5495 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5496 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5497 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5498 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5499 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5500 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5501 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5502 
5503 	hcmd.data[0] = scan_config;
5504 	hcmd.len[0] = cmd_size;
5505 
5506 	err = iwm_send_cmd(sc, &hcmd);
5507 	kmem_free(scan_config, cmd_size);
5508 	return err;
5509 }
5510 
5511 static int
iwm_umac_scan(struct iwm_softc * sc)5512 iwm_umac_scan(struct iwm_softc *sc)
5513 {
5514 	struct ieee80211com *ic = &sc->sc_ic;
5515 	struct iwm_host_cmd hcmd = {
5516 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5517 		.len = { 0, },
5518 		.data = { NULL, },
5519 		.flags = 0,
5520 	};
5521 	struct iwm_scan_req_umac *req;
5522 	struct iwm_scan_req_umac_tail *tail;
5523 	size_t req_len;
5524 	int err;
5525 
5526 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5527 
5528 	req_len = sizeof(struct iwm_scan_req_umac) +
5529 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
5530 	    sc->sc_capa_n_scan_channels) +
5531 	    sizeof(struct iwm_scan_req_umac_tail);
5532 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5533 		return ENOMEM;
5534 	req = kmem_zalloc(req_len, KM_SLEEP);
5535 
5536 	hcmd.len[0] = (uint16_t)req_len;
5537 	hcmd.data[0] = (void *)req;
5538 
5539 	/* These timings correspond to iwlwifi's UNASSOC scan. */
5540 	req->active_dwell = 10;
5541 	req->passive_dwell = 110;
5542 	req->fragmented_dwell = 44;
5543 	req->extended_dwell = 90;
5544 	req->max_out_time = 0;
5545 	req->suspend_time = 0;
5546 
5547 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5548 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5549 
5550 	req->n_channels = iwm_umac_scan_fill_channels(sc,
5551 	    (struct iwm_scan_channel_cfg_umac *)req->data,
5552 	    ic->ic_des_esslen != 0);
5553 
5554 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5555 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5556 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5557 
5558 	tail = (struct iwm_scan_req_umac_tail *)(req->data +
5559 		sizeof(struct iwm_scan_channel_cfg_umac) *
5560 			sc->sc_capa_n_scan_channels);
5561 
5562 	/* Check if we're doing an active directed scan. */
5563 	if (ic->ic_des_esslen != 0) {
5564 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5565 		tail->direct_scan[0].len = ic->ic_des_esslen;
5566 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5567 		    ic->ic_des_esslen);
5568 		req->general_flags |=
5569 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5570 	} else
5571 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5572 
5573 	if (isset(sc->sc_enabled_capa,
5574 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5575 		req->general_flags |=
5576 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5577 
5578 	err = iwm_fill_probe_req(sc, &tail->preq);
5579 	if (err) {
5580 		kmem_free(req, req_len);
5581 		return err;
5582 	}
5583 
5584 	/* Specify the scan plan: We'll do one iteration. */
5585 	tail->schedule[0].interval = 0;
5586 	tail->schedule[0].iter_count = 1;
5587 
5588 	err = iwm_send_cmd(sc, &hcmd);
5589 	kmem_free(req, req_len);
5590 	return err;
5591 }
5592 
5593 static uint8_t
iwm_ridx2rate(struct ieee80211_rateset * rs,int ridx)5594 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5595 {
5596 	int i;
5597 	uint8_t rval;
5598 
5599 	for (i = 0; i < rs->rs_nrates; i++) {
5600 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5601 		if (rval == iwm_rates[ridx].rate)
5602 			return rs->rs_rates[i];
5603 	}
5604 	return 0;
5605 }
5606 
5607 static void
iwm_ack_rates(struct iwm_softc * sc,struct iwm_node * in,int * cck_rates,int * ofdm_rates)5608 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5609     int *ofdm_rates)
5610 {
5611 	struct ieee80211_node *ni = &in->in_ni;
5612 	struct ieee80211_rateset *rs = &ni->ni_rates;
5613 	int lowest_present_ofdm = -1;
5614 	int lowest_present_cck = -1;
5615 	uint8_t cck = 0;
5616 	uint8_t ofdm = 0;
5617 	int i;
5618 
5619 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5620 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5621 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5622 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5623 				continue;
5624 			cck |= (1 << i);
5625 			if (lowest_present_cck == -1 || lowest_present_cck > i)
5626 				lowest_present_cck = i;
5627 		}
5628 	}
5629 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5630 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5631 			continue;
5632 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5633 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5634 			lowest_present_ofdm = i;
5635 	}
5636 
5637 	/*
5638 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
5639 	 * variables. This isn't sufficient though, as there might not
5640 	 * be all the right rates in the bitmap. E.g. if the only basic
5641 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5642 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5643 	 *
5644 	 *    [...] a STA responding to a received frame shall transmit
5645 	 *    its Control Response frame [...] at the highest rate in the
5646 	 *    BSSBasicRateSet parameter that is less than or equal to the
5647 	 *    rate of the immediately previous frame in the frame exchange
5648 	 *    sequence ([...]) and that is of the same modulation class
5649 	 *    ([...]) as the received frame. If no rate contained in the
5650 	 *    BSSBasicRateSet parameter meets these conditions, then the
5651 	 *    control frame sent in response to a received frame shall be
5652 	 *    transmitted at the highest mandatory rate of the PHY that is
5653 	 *    less than or equal to the rate of the received frame, and
5654 	 *    that is of the same modulation class as the received frame.
5655 	 *
5656 	 * As a consequence, we need to add all mandatory rates that are
5657 	 * lower than all of the basic rates to these bitmaps.
5658 	 */
5659 
5660 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5661 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5662 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5663 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5664 	/* 6M already there or needed so always add */
5665 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5666 
5667 	/*
5668 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5669 	 * Note, however:
5670 	 *  - if no CCK rates are basic, it must be ERP since there must
5671 	 *    be some basic rates at all, so they're OFDM => ERP PHY
5672 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
5673 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5674 	 *  - if 5.5M is basic, 1M and 2M are mandatory
5675 	 *  - if 2M is basic, 1M is mandatory
5676 	 *  - if 1M is basic, that's the only valid ACK rate.
5677 	 * As a consequence, it's not as complicated as it sounds, just add
5678 	 * any lower rates to the ACK rate bitmap.
5679 	 */
5680 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
5681 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5682 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
5683 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5684 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
5685 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5686 	/* 1M already there or needed so always add */
5687 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5688 
5689 	*cck_rates = cck;
5690 	*ofdm_rates = ofdm;
5691 }
5692 
5693 static void
iwm_mac_ctxt_cmd_common(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_ctx_cmd * cmd,uint32_t action,int assoc)5694 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5695     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5696 {
5697 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5698 	struct ieee80211com *ic = &sc->sc_ic;
5699 	struct ieee80211_node *ni = ic->ic_bss;
5700 	int cck_ack_rates, ofdm_ack_rates;
5701 	int i;
5702 
5703 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5704 	    in->in_color));
5705 	cmd->action = htole32(action);
5706 
5707 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5708 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
5709 
5710 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5711 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5712 
5713 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5714 	cmd->cck_rates = htole32(cck_ack_rates);
5715 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
5716 
5717 	cmd->cck_short_preamble
5718 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5719 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5720 	cmd->short_slot
5721 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5722 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
5723 
5724 	for (i = 0; i < WME_NUM_AC; i++) {
5725 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5726 		int txf = iwm_ac_to_tx_fifo[i];
5727 
5728 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5729 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5730 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5731 		cmd->ac[txf].fifos_mask = (1 << txf);
5732 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5733 	}
5734 	if (ni->ni_flags & IEEE80211_NODE_QOS)
5735 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5736 
5737 #ifndef IEEE80211_NO_HT
5738 	if (ni->ni_flags & IEEE80211_NODE_HT) {
5739 		enum ieee80211_htprot htprot =
5740 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5741 		switch (htprot) {
5742 		case IEEE80211_HTPROT_NONE:
5743 			break;
5744 		case IEEE80211_HTPROT_NONMEMBER:
5745 		case IEEE80211_HTPROT_NONHT_MIXED:
5746 			cmd->protection_flags |=
5747 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
5748 		case IEEE80211_HTPROT_20MHZ:
5749 			cmd->protection_flags |=
5750 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
5751 			    IWM_MAC_PROT_FLG_FAT_PROT);
5752 			break;
5753 		default:
5754 			break;
5755 		}
5756 
5757 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5758 	}
5759 #endif
5760 
5761 	if (ic->ic_flags & IEEE80211_F_USEPROT)
5762 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5763 
5764 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5765 #undef IWM_EXP2
5766 }
5767 
5768 static void
iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_data_sta * sta,int assoc)5769 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5770     struct iwm_mac_data_sta *sta, int assoc)
5771 {
5772 	struct ieee80211_node *ni = &in->in_ni;
5773 	uint32_t dtim_off;
5774 	uint64_t tsf;
5775 
5776 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5777 	tsf = le64toh(ni->ni_tstamp.tsf);
5778 
5779 	sta->is_assoc = htole32(assoc);
5780 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5781 	sta->dtim_tsf = htole64(tsf + dtim_off);
5782 	sta->bi = htole32(ni->ni_intval);
5783 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5784 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5785 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5786 	sta->listen_interval = htole32(10);
5787 	sta->assoc_id = htole32(ni->ni_associd);
5788 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5789 }
5790 
5791 static int
iwm_mac_ctxt_cmd(struct iwm_softc * sc,struct iwm_node * in,uint32_t action,int assoc)5792 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5793     int assoc)
5794 {
5795 	struct ieee80211_node *ni = &in->in_ni;
5796 	struct iwm_mac_ctx_cmd cmd;
5797 
5798 	memset(&cmd, 0, sizeof(cmd));
5799 
5800 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5801 
5802 	/* Allow beacons to pass through as long as we are not associated or we
5803 	 * do not have dtim period information */
5804 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5805 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5806 	else
5807 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5808 
5809 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5810 }
5811 
5812 #define IWM_MISSED_BEACONS_THRESHOLD 8
5813 
5814 static void
iwm_rx_missed_beacons_notif(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)5815 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5816 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5817 {
5818 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5819 	int s;
5820 
5821 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5822 	    le32toh(mb->mac_id),
5823 	    le32toh(mb->consec_missed_beacons),
5824 	    le32toh(mb->consec_missed_beacons_since_last_rx),
5825 	    le32toh(mb->num_recvd_beacons),
5826 	    le32toh(mb->num_expected_beacons)));
5827 
5828 	/*
5829 	 * TODO: the threshold should be adjusted based on latency conditions,
5830 	 * and/or in case of a CS flow on one of the other AP vifs.
5831 	 */
5832 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5833 	    IWM_MISSED_BEACONS_THRESHOLD) {
5834 		s = splnet();
5835 		ieee80211_beacon_miss(&sc->sc_ic);
5836 		splx(s);
5837 	}
5838 }
5839 
5840 static int
iwm_update_quotas(struct iwm_softc * sc,struct iwm_node * in)5841 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5842 {
5843 	struct iwm_time_quota_cmd cmd;
5844 	int i, idx, num_active_macs, quota, quota_rem;
5845 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5846 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
5847 	uint16_t id;
5848 
5849 	memset(&cmd, 0, sizeof(cmd));
5850 
5851 	/* currently, PHY ID == binding ID */
5852 	if (in) {
5853 		id = in->in_phyctxt->id;
5854 		KASSERT(id < IWM_MAX_BINDINGS);
5855 		colors[id] = in->in_phyctxt->color;
5856 
5857 		if (1)
5858 			n_ifs[id] = 1;
5859 	}
5860 
5861 	/*
5862 	 * The FW's scheduling session consists of
5863 	 * IWM_MAX_QUOTA fragments. Divide these fragments
5864 	 * equally between all the bindings that require quota
5865 	 */
5866 	num_active_macs = 0;
5867 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5868 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5869 		num_active_macs += n_ifs[i];
5870 	}
5871 
5872 	quota = 0;
5873 	quota_rem = 0;
5874 	if (num_active_macs) {
5875 		quota = IWM_MAX_QUOTA / num_active_macs;
5876 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
5877 	}
5878 
5879 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5880 		if (colors[i] < 0)
5881 			continue;
5882 
5883 		cmd.quotas[idx].id_and_color =
5884 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5885 
5886 		if (n_ifs[i] <= 0) {
5887 			cmd.quotas[idx].quota = htole32(0);
5888 			cmd.quotas[idx].max_duration = htole32(0);
5889 		} else {
5890 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5891 			cmd.quotas[idx].max_duration = htole32(0);
5892 		}
5893 		idx++;
5894 	}
5895 
5896 	/* Give the remainder of the session to the first binding */
5897 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5898 
5899 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5900 }
5901 
5902 static int
iwm_auth(struct iwm_softc * sc)5903 iwm_auth(struct iwm_softc *sc)
5904 {
5905 	struct ieee80211com *ic = &sc->sc_ic;
5906 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5907 	uint32_t duration;
5908 	int err;
5909 
5910 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5911 	if (err)
5912 		return err;
5913 
5914 	err = iwm_allow_mcast(sc);
5915 	if (err)
5916 		return err;
5917 
5918 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5919 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5920 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
5921 	if (err)
5922 		return err;
5923 	in->in_phyctxt = &sc->sc_phyctxt[0];
5924 
5925 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5926 	if (err) {
5927 		aprint_error_dev(sc->sc_dev,
5928 		    "could not add MAC context (error %d)\n", err);
5929 		return err;
5930 	}
5931 
5932 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5933 	if (err)
5934 		return err;
5935 
5936 	err = iwm_add_sta_cmd(sc, in, 0);
5937 	if (err)
5938 		return err;
5939 
5940 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5941 	if (err) {
5942 		aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5943 		return err;
5944 	}
5945 
5946 	/*
5947 	 * Prevent the FW from wandering off channel during association
5948 	 * by "protecting" the session with a time event.
5949 	 */
5950 	if (in->in_ni.ni_intval)
5951 		duration = in->in_ni.ni_intval * 2;
5952 	else
5953 		duration = IEEE80211_DUR_TU;
5954 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5955 	DELAY(100);
5956 
5957 	return 0;
5958 }
5959 
5960 static int
iwm_assoc(struct iwm_softc * sc)5961 iwm_assoc(struct iwm_softc *sc)
5962 {
5963 	struct ieee80211com *ic = &sc->sc_ic;
5964 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5965 	int err;
5966 
5967 	err = iwm_add_sta_cmd(sc, in, 1);
5968 	if (err)
5969 		return err;
5970 
5971 	return 0;
5972 }
5973 
5974 static struct ieee80211_node *
iwm_node_alloc(struct ieee80211_node_table * nt)5975 iwm_node_alloc(struct ieee80211_node_table *nt)
5976 {
5977 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5978 }
5979 
5980 static void
iwm_calib_timeout(void * arg)5981 iwm_calib_timeout(void *arg)
5982 {
5983 	struct iwm_softc *sc = arg;
5984 	struct ieee80211com *ic = &sc->sc_ic;
5985 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5986 #ifndef IEEE80211_NO_HT
5987 	struct ieee80211_node *ni = &in->in_ni;
5988 	int otxrate;
5989 #endif
5990 	int s;
5991 
5992 	s = splnet();
5993 	if ((ic->ic_fixed_rate == -1
5994 #ifndef IEEE80211_NO_HT
5995 	    || ic->ic_fixed_mcs == -1
5996 #endif
5997 	    ) &&
5998 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5999 #ifndef IEEE80211_NO_HT
6000 		if (ni->ni_flags & IEEE80211_NODE_HT)
6001 			otxrate = ni->ni_txmcs;
6002 		else
6003 			otxrate = ni->ni_txrate;
6004 #endif
6005 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
6006 
6007 #ifndef IEEE80211_NO_HT
6008 		/*
6009 		 * If AMRR has chosen a new TX rate we must update
6010 		 * the firwmare's LQ rate table from process context.
6011 		 */
6012 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6013 		    otxrate != ni->ni_txmcs)
6014 			softint_schedule(sc->setrates_task);
6015 		else if (otxrate != ni->ni_txrate)
6016 			softint_schedule(sc->setrates_task);
6017 #endif
6018 	}
6019 	splx(s);
6020 
6021 	callout_schedule(&sc->sc_calib_to, mstohz(500));
6022 }
6023 
6024 #ifndef IEEE80211_NO_HT
6025 static void
iwm_setrates_task(void * arg)6026 iwm_setrates_task(void *arg)
6027 {
6028 	struct iwm_softc *sc = arg;
6029 	struct ieee80211com *ic = &sc->sc_ic;
6030 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6031 
6032 	/* Update rates table based on new TX rate determined by AMRR. */
6033 	iwm_setrates(in);
6034 }
6035 
6036 static int
iwm_setrates(struct iwm_node * in)6037 iwm_setrates(struct iwm_node *in)
6038 {
6039 	struct ieee80211_node *ni = &in->in_ni;
6040 	struct ieee80211com *ic = ni->ni_ic;
6041 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6042 	struct iwm_lq_cmd *lq = &in->in_lq;
6043 	struct ieee80211_rateset *rs = &ni->ni_rates;
6044 	int i, j, ridx, ridx_min, tab = 0;
6045 #ifndef IEEE80211_NO_HT
6046 	int sgi_ok;
6047 #endif
6048 	struct iwm_host_cmd cmd = {
6049 		.id = IWM_LQ_CMD,
6050 		.len = { sizeof(in->in_lq), },
6051 	};
6052 
6053 	memset(lq, 0, sizeof(*lq));
6054 	lq->sta_id = IWM_STATION_ID;
6055 
6056 	if (ic->ic_flags & IEEE80211_F_USEPROT)
6057 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
6058 
6059 #ifndef IEEE80211_NO_HT
6060 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
6061 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
6062 #endif
6063 
6064 
6065 	/*
6066 	 * Fill the LQ rate selection table with legacy and/or HT rates
6067 	 * in descending order, i.e. with the node's current TX rate first.
6068 	 * In cases where throughput of an HT rate corresponds to a legacy
6069 	 * rate it makes no sense to add both. We rely on the fact that
6070 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
6071 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
6072 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
6073 	 */
6074 	j = 0;
6075 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6076 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6077 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
6078 		if (j >= __arraycount(lq->rs_table))
6079 			break;
6080 		tab = 0;
6081 #ifndef IEEE80211_NO_HT
6082 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6083 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6084 			for (i = ni->ni_txmcs; i >= 0; i--) {
6085 				if (isclr(ni->ni_rxmcs, i))
6086 					continue;
6087 				if (ridx == iwm_mcs2ridx[i]) {
6088 					tab = iwm_rates[ridx].ht_plcp;
6089 					tab |= IWM_RATE_MCS_HT_MSK;
6090 					if (sgi_ok)
6091 						tab |= IWM_RATE_MCS_SGI_MSK;
6092 					break;
6093 				}
6094 			}
6095 		}
6096 #endif
6097 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
6098 			for (i = ni->ni_txrate; i >= 0; i--) {
6099 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
6100 				    IEEE80211_RATE_VAL)) {
6101 					tab = iwm_rates[ridx].plcp;
6102 					break;
6103 				}
6104 			}
6105 		}
6106 
6107 		if (tab == 0)
6108 			continue;
6109 
6110 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
6111 		if (IWM_RIDX_IS_CCK(ridx))
6112 			tab |= IWM_RATE_MCS_CCK_MSK;
6113 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
6114 		lq->rs_table[j++] = htole32(tab);
6115 	}
6116 
6117 	/* Fill the rest with the lowest possible rate */
6118 	i = j > 0 ? j - 1 : 0;
6119 	while (j < __arraycount(lq->rs_table))
6120 		lq->rs_table[j++] = lq->rs_table[i];
6121 
6122 	lq->single_stream_ant_msk = IWM_ANT_A;
6123 	lq->dual_stream_ant_msk = IWM_ANT_AB;
6124 
6125 	lq->agg_time_limit = htole16(4000);	/* 4ms */
6126 	lq->agg_disable_start_th = 3;
6127 #ifdef notyet
6128 	lq->agg_frame_cnt_limit = 0x3f;
6129 #else
6130 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
6131 #endif
6132 
6133 	cmd.data[0] = &in->in_lq;
6134 	return iwm_send_cmd(sc, &cmd);
6135 }
6136 #endif
6137 
6138 static int
iwm_media_change(struct ifnet * ifp)6139 iwm_media_change(struct ifnet *ifp)
6140 {
6141 	struct iwm_softc *sc = ifp->if_softc;
6142 	struct ieee80211com *ic = &sc->sc_ic;
6143 	uint8_t rate, ridx;
6144 	int err;
6145 
6146 	err = ieee80211_media_change(ifp);
6147 	if (err != ENETRESET)
6148 		return err;
6149 
6150 #ifndef IEEE80211_NO_HT
6151 	if (ic->ic_fixed_mcs != -1)
6152 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
6153 	else
6154 #endif
6155 	if (ic->ic_fixed_rate != -1) {
6156 		rate = ic->ic_sup_rates[ic->ic_curmode].
6157 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6158 		/* Map 802.11 rate to HW rate index. */
6159 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
6160 			if (iwm_rates[ridx].rate == rate)
6161 				break;
6162 		sc->sc_fixed_ridx = ridx;
6163 	}
6164 
6165 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6166 	    (IFF_UP | IFF_RUNNING)) {
6167 		iwm_stop(ifp, 0);
6168 		err = iwm_init(ifp);
6169 	}
6170 	return err;
6171 }
6172 
6173 static int
iwm_do_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)6174 iwm_do_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6175 {
6176 	struct ifnet *ifp = IC2IFP(ic);
6177 	struct iwm_softc *sc = ifp->if_softc;
6178 	enum ieee80211_state ostate = ic->ic_state;
6179 	struct iwm_node *in;
6180 	int err;
6181 
6182 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
6183 	    ieee80211_state_name[nstate]));
6184 
6185 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
6186 		iwm_led_blink_stop(sc);
6187 
6188 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
6189 		iwm_disable_beacon_filter(sc);
6190 
6191 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
6192 	/* XXX Is there a way to switch states without a full reset? */
6193 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
6194 		/*
6195 		 * Upon receiving a deauth frame from AP the net80211 stack
6196 		 * puts the driver into AUTH state. This will fail with this
6197 		 * driver so bring the FSM from RUN to SCAN in this case.
6198 		 */
6199 		if (nstate != IEEE80211_S_INIT) {
6200 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
6201 			/* Always pass arg as -1 since we can't Tx right now. */
6202 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6203 			iwm_stop(ifp, 0);
6204 			iwm_init(ifp);
6205 			return 0;
6206 		}
6207 
6208 		iwm_stop_device(sc);
6209 		iwm_init_hw(sc);
6210 	}
6211 
6212 	switch (nstate) {
6213 	case IEEE80211_S_INIT:
6214 		break;
6215 
6216 	case IEEE80211_S_SCAN:
6217 		if (ostate == nstate &&
6218 		    ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
6219 			return 0;
6220 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6221 			err = iwm_umac_scan(sc);
6222 		else
6223 			err = iwm_lmac_scan(sc);
6224 		if (err) {
6225 			DPRINTF(("%s: could not initiate scan: %d\n",
6226 			    DEVNAME(sc), err));
6227 			return err;
6228 		}
6229 		SET(sc->sc_flags, IWM_FLAG_SCANNING);
6230 		ic->ic_state = nstate;
6231 		iwm_led_blink_start(sc);
6232 		return 0;
6233 
6234 	case IEEE80211_S_AUTH:
6235 		err = iwm_auth(sc);
6236 		if (err) {
6237 			DPRINTF(("%s: could not move to auth state: %d\n",
6238 			    DEVNAME(sc), err));
6239 			return err;
6240 		}
6241 		break;
6242 
6243 	case IEEE80211_S_ASSOC:
6244 		err = iwm_assoc(sc);
6245 		if (err) {
6246 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
6247 			    err));
6248 			return err;
6249 		}
6250 		break;
6251 
6252 	case IEEE80211_S_RUN:
6253 		in = (struct iwm_node *)ic->ic_bss;
6254 
6255 		/* We have now been assigned an associd by the AP. */
6256 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6257 		if (err) {
6258 			aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
6259 			return err;
6260 		}
6261 
6262 		err = iwm_power_update_device(sc);
6263 		if (err) {
6264 			aprint_error_dev(sc->sc_dev,
6265 			    "could send power command (error %d)\n", err);
6266 			return err;
6267 		}
6268 #ifdef notyet
6269 		/*
6270 		 * Disabled for now. Default beacon filter settings
6271 		 * prevent net80211 from getting ERP and HT protection
6272 		 * updates from beacons.
6273 		 */
6274 		err = iwm_enable_beacon_filter(sc, in);
6275 		if (err) {
6276 			aprint_error_dev(sc->sc_dev,
6277 			    "could not enable beacon filter\n");
6278 			return err;
6279 		}
6280 #endif
6281 		err = iwm_power_mac_update_mode(sc, in);
6282 		if (err) {
6283 			aprint_error_dev(sc->sc_dev,
6284 			    "could not update MAC power (error %d)\n", err);
6285 			return err;
6286 		}
6287 
6288 		err = iwm_update_quotas(sc, in);
6289 		if (err) {
6290 			aprint_error_dev(sc->sc_dev,
6291 			    "could not update quotas (error %d)\n", err);
6292 			return err;
6293 		}
6294 
6295 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6296 
6297 		/* Start at lowest available bit-rate, AMRR will raise. */
6298 		in->in_ni.ni_txrate = 0;
6299 #ifndef IEEE80211_NO_HT
6300 		in->in_ni.ni_txmcs = 0;
6301 		iwm_setrates(in);
6302 #endif
6303 
6304 		callout_schedule(&sc->sc_calib_to, mstohz(500));
6305 		iwm_led_enable(sc);
6306 		break;
6307 
6308 	default:
6309 		break;
6310 	}
6311 
6312 	return sc->sc_newstate(ic, nstate, arg);
6313 }
6314 
6315 static void
iwm_newstate_cb(struct work * wk,void * v)6316 iwm_newstate_cb(struct work *wk, void *v)
6317 {
6318 	struct iwm_softc *sc = v;
6319 	struct ieee80211com *ic = &sc->sc_ic;
6320 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
6321 	enum ieee80211_state nstate = iwmns->ns_nstate;
6322 	int generation = iwmns->ns_generation;
6323 	int arg = iwmns->ns_arg;
6324 	int s;
6325 
6326 	kmem_intr_free(iwmns, sizeof(*iwmns));
6327 
6328 	s = splnet();
6329 
6330 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
6331 	if (sc->sc_generation != generation) {
6332 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
6333 		if (nstate == IEEE80211_S_INIT) {
6334 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: "
6335 			    "calling sc_newstate()\n"));
6336 			(void) sc->sc_newstate(ic, nstate, arg);
6337 		}
6338 	} else
6339 		(void) iwm_do_newstate(ic, nstate, arg);
6340 
6341 	splx(s);
6342 }
6343 
6344 static int
iwm_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)6345 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6346 {
6347 	struct iwm_newstate_state *iwmns;
6348 	struct ifnet *ifp = IC2IFP(ic);
6349 	struct iwm_softc *sc = ifp->if_softc;
6350 
6351 	callout_stop(&sc->sc_calib_to);
6352 
6353 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
6354 	if (!iwmns) {
6355 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
6356 		return ENOMEM;
6357 	}
6358 
6359 	iwmns->ns_nstate = nstate;
6360 	iwmns->ns_arg = arg;
6361 	iwmns->ns_generation = sc->sc_generation;
6362 
6363 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6364 
6365 	return 0;
6366 }
6367 
6368 static void
iwm_endscan(struct iwm_softc * sc)6369 iwm_endscan(struct iwm_softc *sc)
6370 {
6371 	struct ieee80211com *ic = &sc->sc_ic;
6372 	int s;
6373 
6374 	DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6375 
6376 	s = splnet();
6377 	if (ic->ic_state == IEEE80211_S_SCAN)
6378 		ieee80211_end_scan(ic);
6379 	splx(s);
6380 }
6381 
6382 /*
6383  * Aging and idle timeouts for the different possible scenarios
6384  * in default configuration
6385  */
6386 static const uint32_t
6387 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6388 	{
6389 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6390 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6391 	},
6392 	{
6393 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6394 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6395 	},
6396 	{
6397 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6398 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6399 	},
6400 	{
6401 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
6402 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6403 	},
6404 	{
6405 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6406 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6407 	},
6408 };
6409 
6410 /*
6411  * Aging and idle timeouts for the different possible scenarios
6412  * in single BSS MAC configuration.
6413  */
6414 static const uint32_t
6415 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6416 	{
6417 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6418 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6419 	},
6420 	{
6421 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6422 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6423 	},
6424 	{
6425 		htole32(IWM_SF_MCAST_AGING_TIMER),
6426 		htole32(IWM_SF_MCAST_IDLE_TIMER)
6427 	},
6428 	{
6429 		htole32(IWM_SF_BA_AGING_TIMER),
6430 		htole32(IWM_SF_BA_IDLE_TIMER)
6431 	},
6432 	{
6433 		htole32(IWM_SF_TX_RE_AGING_TIMER),
6434 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
6435 	},
6436 };
6437 
6438 static void
iwm_fill_sf_command(struct iwm_softc * sc,struct iwm_sf_cfg_cmd * sf_cmd,struct ieee80211_node * ni)6439 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6440     struct ieee80211_node *ni)
6441 {
6442 	int i, j, watermark;
6443 
6444 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6445 
6446 	/*
6447 	 * If we are in association flow - check antenna configuration
6448 	 * capabilities of the AP station, and choose the watermark accordingly.
6449 	 */
6450 	if (ni) {
6451 #ifndef IEEE80211_NO_HT
6452 		if (ni->ni_flags & IEEE80211_NODE_HT) {
6453 #ifdef notyet
6454 			if (ni->ni_rxmcs[2] != 0)
6455 				watermark = IWM_SF_W_MARK_MIMO3;
6456 			else if (ni->ni_rxmcs[1] != 0)
6457 				watermark = IWM_SF_W_MARK_MIMO2;
6458 			else
6459 #endif
6460 				watermark = IWM_SF_W_MARK_SISO;
6461 		} else
6462 #endif
6463 			watermark = IWM_SF_W_MARK_LEGACY;
6464 	/* default watermark value for unassociated mode. */
6465 	} else {
6466 		watermark = IWM_SF_W_MARK_MIMO2;
6467 	}
6468 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6469 
6470 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6471 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6472 			sf_cmd->long_delay_timeouts[i][j] =
6473 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6474 		}
6475 	}
6476 
6477 	if (ni) {
6478 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6479 		       sizeof(iwm_sf_full_timeout));
6480 	} else {
6481 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6482 		       sizeof(iwm_sf_full_timeout_def));
6483 	}
6484 }
6485 
6486 static int
iwm_sf_config(struct iwm_softc * sc,int new_state)6487 iwm_sf_config(struct iwm_softc *sc, int new_state)
6488 {
6489 	struct ieee80211com *ic = &sc->sc_ic;
6490 	struct iwm_sf_cfg_cmd sf_cmd = {
6491 		.state = htole32(IWM_SF_FULL_ON),
6492 	};
6493 
6494 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6495 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6496 
6497 	switch (new_state) {
6498 	case IWM_SF_UNINIT:
6499 	case IWM_SF_INIT_OFF:
6500 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
6501 		break;
6502 	case IWM_SF_FULL_ON:
6503 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6504 		break;
6505 	default:
6506 		return EINVAL;
6507 	}
6508 
6509 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6510 	    sizeof(sf_cmd), &sf_cmd);
6511 }
6512 
6513 static int
iwm_send_bt_init_conf(struct iwm_softc * sc)6514 iwm_send_bt_init_conf(struct iwm_softc *sc)
6515 {
6516 	struct iwm_bt_coex_cmd bt_cmd;
6517 
6518 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6519 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6520 
6521 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6522 }
6523 
6524 static bool
iwm_is_lar_supported(struct iwm_softc * sc)6525 iwm_is_lar_supported(struct iwm_softc *sc)
6526 {
6527 	bool nvm_lar = sc->sc_nvm.lar_enabled;
6528 	bool tlv_lar = isset(sc->sc_enabled_capa,
6529 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6530 
6531 	if (iwm_lar_disable)
6532 		return false;
6533 
6534 	/*
6535 	 * Enable LAR only if it is supported by the FW (TLV) &&
6536 	 * enabled in the NVM
6537 	 */
6538 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6539 		return nvm_lar && tlv_lar;
6540 	else
6541 		return tlv_lar;
6542 }
6543 
6544 static int
iwm_send_update_mcc_cmd(struct iwm_softc * sc,const char * alpha2)6545 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6546 {
6547 	struct iwm_mcc_update_cmd mcc_cmd;
6548 	struct iwm_host_cmd hcmd = {
6549 		.id = IWM_MCC_UPDATE_CMD,
6550 		.flags = IWM_CMD_WANT_SKB,
6551 		.data = { &mcc_cmd },
6552 	};
6553 	int err;
6554 	int resp_v2 = isset(sc->sc_enabled_capa,
6555 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6556 
6557 	if (!iwm_is_lar_supported(sc)) {
6558 		DPRINTF(("%s: no LAR support\n", __func__));
6559 		return 0;
6560 	}
6561 
6562 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6563 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6564 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6565 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6566 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6567 	else
6568 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6569 
6570 	if (resp_v2)
6571 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6572 	else
6573 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6574 
6575 	err = iwm_send_cmd(sc, &hcmd);
6576 	if (err)
6577 		return err;
6578 
6579 	iwm_free_resp(sc, &hcmd);
6580 
6581 	return 0;
6582 }
6583 
6584 static void
iwm_tt_tx_backoff(struct iwm_softc * sc,uint32_t backoff)6585 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6586 {
6587 	struct iwm_host_cmd cmd = {
6588 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6589 		.len = { sizeof(uint32_t), },
6590 		.data = { &backoff, },
6591 	};
6592 
6593 	iwm_send_cmd(sc, &cmd);
6594 }
6595 
6596 static int
iwm_init_hw(struct iwm_softc * sc)6597 iwm_init_hw(struct iwm_softc *sc)
6598 {
6599 	struct ieee80211com *ic = &sc->sc_ic;
6600 	int err, i, ac;
6601 
6602 	err = iwm_start_hw(sc);
6603 	if (err) {
6604 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6605 		return err;
6606 	}
6607 
6608 	err = iwm_run_init_mvm_ucode(sc, 0);
6609 	if (err)
6610 		return err;
6611 
6612 	/* Should stop and start HW since INIT image just loaded. */
6613 	iwm_stop_device(sc);
6614 	err = iwm_start_hw(sc);
6615 	if (err) {
6616 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6617 		return err;
6618 	}
6619 
6620 	/* Restart, this time with the regular firmware */
6621 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6622 	if (err) {
6623 		aprint_error_dev(sc->sc_dev,
6624 		    "could not load firmware (error %d)\n", err);
6625 		goto err;
6626 	}
6627 
6628 	err = iwm_send_bt_init_conf(sc);
6629 	if (err) {
6630 		aprint_error_dev(sc->sc_dev,
6631 		    "could not init bt coex (error %d)\n", err);
6632 		goto err;
6633 	}
6634 
6635 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6636 	if (err) {
6637 		aprint_error_dev(sc->sc_dev,
6638 		    "could not init tx ant config (error %d)\n", err);
6639 		goto err;
6640 	}
6641 
6642 	/* Send phy db control command and then phy db calibration*/
6643 	err = iwm_send_phy_db_data(sc);
6644 	if (err) {
6645 		aprint_error_dev(sc->sc_dev,
6646 		    "could not init phy db (error %d)\n", err);
6647 		goto err;
6648 	}
6649 
6650 	err = iwm_send_phy_cfg_cmd(sc);
6651 	if (err) {
6652 		aprint_error_dev(sc->sc_dev,
6653 		    "could not send phy config (error %d)\n", err);
6654 		goto err;
6655 	}
6656 
6657 	/* Add auxiliary station for scanning */
6658 	err = iwm_add_aux_sta(sc);
6659 	if (err) {
6660 		aprint_error_dev(sc->sc_dev,
6661 		    "could not add aux station (error %d)\n", err);
6662 		goto err;
6663 	}
6664 
6665 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6666 		/*
6667 		 * The channel used here isn't relevant as it's
6668 		 * going to be overwritten in the other flows.
6669 		 * For now use the first channel we have.
6670 		 */
6671 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6672 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6673 		    IWM_FW_CTXT_ACTION_ADD, 0);
6674 		if (err) {
6675 			aprint_error_dev(sc->sc_dev,
6676 			    "could not add phy context %d (error %d)\n",
6677 			    i, err);
6678 			goto err;
6679 		}
6680 	}
6681 
6682 	/* Initialize tx backoffs to the minimum. */
6683 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6684 		iwm_tt_tx_backoff(sc, 0);
6685 
6686 	err = iwm_power_update_device(sc);
6687 	if (err) {
6688 		aprint_error_dev(sc->sc_dev,
6689 		    "could send power command (error %d)\n", err);
6690 		goto err;
6691 	}
6692 
6693 	err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6694 	if (err) {
6695 		aprint_error_dev(sc->sc_dev,
6696 		    "could not init LAR (error %d)\n", err);
6697 		goto err;
6698 	}
6699 
6700 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6701 		err = iwm_config_umac_scan(sc);
6702 		if (err) {
6703 			aprint_error_dev(sc->sc_dev,
6704 			    "could not configure scan (error %d)\n", err);
6705 			goto err;
6706 		}
6707 	}
6708 
6709 	for (ac = 0; ac < WME_NUM_AC; ac++) {
6710 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6711 		    iwm_ac_to_tx_fifo[ac]);
6712 		if (err) {
6713 			aprint_error_dev(sc->sc_dev,
6714 			    "could not enable Tx queue %d (error %d)\n",
6715 			    i, err);
6716 			goto err;
6717 		}
6718 	}
6719 
6720 	err = iwm_disable_beacon_filter(sc);
6721 	if (err) {
6722 		aprint_error_dev(sc->sc_dev,
6723 		    "could not disable beacon filter (error %d)\n", err);
6724 		goto err;
6725 	}
6726 
6727 	return 0;
6728 
6729  err:
6730 	iwm_stop_device(sc);
6731 	return err;
6732 }
6733 
6734 /* Allow multicast from our BSSID. */
6735 static int
iwm_allow_mcast(struct iwm_softc * sc)6736 iwm_allow_mcast(struct iwm_softc *sc)
6737 {
6738 	struct ieee80211com *ic = &sc->sc_ic;
6739 	struct ieee80211_node *ni = ic->ic_bss;
6740 	struct iwm_mcast_filter_cmd *cmd;
6741 	size_t size;
6742 	int err;
6743 
6744 	size = roundup(sizeof(*cmd), 4);
6745 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6746 	if (cmd == NULL)
6747 		return ENOMEM;
6748 	cmd->filter_own = 1;
6749 	cmd->port_id = 0;
6750 	cmd->count = 0;
6751 	cmd->pass_all = 1;
6752 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6753 
6754 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6755 	kmem_intr_free(cmd, size);
6756 	return err;
6757 }
6758 
6759 static int
iwm_init(struct ifnet * ifp)6760 iwm_init(struct ifnet *ifp)
6761 {
6762 	struct iwm_softc *sc = ifp->if_softc;
6763 	int err;
6764 
6765 	if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6766 		return 0;
6767 
6768 	sc->sc_generation++;
6769 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
6770 
6771 	err = iwm_init_hw(sc);
6772 	if (err) {
6773 		iwm_stop(ifp, 1);
6774 		return err;
6775 	}
6776 
6777 	ifp->if_flags &= ~IFF_OACTIVE;
6778 	ifp->if_flags |= IFF_RUNNING;
6779 
6780 	ieee80211_begin_scan(&sc->sc_ic, 0);
6781 	SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6782 
6783 	return 0;
6784 }
6785 
6786 static void
iwm_start(struct ifnet * ifp)6787 iwm_start(struct ifnet *ifp)
6788 {
6789 	struct iwm_softc *sc = ifp->if_softc;
6790 	struct ieee80211com *ic = &sc->sc_ic;
6791 	struct ieee80211_node *ni;
6792 	struct ether_header *eh;
6793 	struct mbuf *m;
6794 	int ac;
6795 
6796 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6797 		return;
6798 
6799 	for (;;) {
6800 		/* why isn't this done per-queue? */
6801 		if (sc->qfullmsk != 0) {
6802 			ifp->if_flags |= IFF_OACTIVE;
6803 			break;
6804 		}
6805 
6806 		/* need to send management frames even if we're not RUNning */
6807 		IF_DEQUEUE(&ic->ic_mgtq, m);
6808 		if (m) {
6809 			ni = M_GETCTX(m, struct ieee80211_node *);
6810 			M_CLEARCTX(m);
6811 			ac = WME_AC_BE;
6812 			goto sendit;
6813 		}
6814 		if (ic->ic_state != IEEE80211_S_RUN) {
6815 			break;
6816 		}
6817 
6818 		IFQ_DEQUEUE(&ifp->if_snd, m);
6819 		if (m == NULL)
6820 			break;
6821 
6822 		if (m->m_len < sizeof (*eh) &&
6823 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
6824 			if_statinc(ifp, if_oerrors);
6825 			continue;
6826 		}
6827 
6828 		eh = mtod(m, struct ether_header *);
6829 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6830 		if (ni == NULL) {
6831 			m_freem(m);
6832 			if_statinc(ifp, if_oerrors);
6833 			continue;
6834 		}
6835 
6836 		/* classify mbuf so we can find which tx ring to use */
6837 		if (ieee80211_classify(ic, m, ni) != 0) {
6838 			m_freem(m);
6839 			ieee80211_free_node(ni);
6840 			if_statinc(ifp, if_oerrors);
6841 			continue;
6842 		}
6843 
6844 		/* No QoS encapsulation for EAPOL frames. */
6845 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6846 		    M_WME_GETAC(m) : WME_AC_BE;
6847 
6848 		bpf_mtap(ifp, m, BPF_D_OUT);
6849 
6850 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6851 			ieee80211_free_node(ni);
6852 			if_statinc(ifp, if_oerrors);
6853 			continue;
6854 		}
6855 
6856  sendit:
6857 		bpf_mtap3(ic->ic_rawbpf, m, BPF_D_OUT);
6858 
6859 		if (iwm_tx(sc, m, ni, ac) != 0) {
6860 			ieee80211_free_node(ni);
6861 			if_statinc(ifp, if_oerrors);
6862 			continue;
6863 		}
6864 
6865 		if (ifp->if_flags & IFF_UP) {
6866 			sc->sc_tx_timer = 15;
6867 			ifp->if_timer = 1;
6868 		}
6869 	}
6870 }
6871 
6872 static void
iwm_stop(struct ifnet * ifp,int disable)6873 iwm_stop(struct ifnet *ifp, int disable)
6874 {
6875 	struct iwm_softc *sc = ifp->if_softc;
6876 	struct ieee80211com *ic = &sc->sc_ic;
6877 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6878 
6879 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6880 	sc->sc_flags |= IWM_FLAG_STOPPED;
6881 	sc->sc_generation++;
6882 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6883 
6884 	if (in)
6885 		in->in_phyctxt = NULL;
6886 
6887 	if (ic->ic_state != IEEE80211_S_INIT)
6888 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6889 
6890 	callout_stop(&sc->sc_calib_to);
6891 	iwm_led_blink_stop(sc);
6892 	ifp->if_timer = sc->sc_tx_timer = 0;
6893 	iwm_stop_device(sc);
6894 }
6895 
6896 static void
iwm_watchdog(struct ifnet * ifp)6897 iwm_watchdog(struct ifnet *ifp)
6898 {
6899 	struct iwm_softc *sc = ifp->if_softc;
6900 
6901 	ifp->if_timer = 0;
6902 	if (sc->sc_tx_timer > 0) {
6903 		if (--sc->sc_tx_timer == 0) {
6904 			aprint_error_dev(sc->sc_dev, "device timeout\n");
6905 #ifdef IWM_DEBUG
6906 			iwm_nic_error(sc);
6907 #endif
6908 			ifp->if_flags &= ~IFF_UP;
6909 			iwm_stop(ifp, 1);
6910 			if_statinc(ifp, if_oerrors);
6911 			return;
6912 		}
6913 		ifp->if_timer = 1;
6914 	}
6915 
6916 	ieee80211_watchdog(&sc->sc_ic);
6917 }
6918 
6919 static int
iwm_ioctl(struct ifnet * ifp,u_long cmd,void * data)6920 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6921 {
6922 	struct iwm_softc *sc = ifp->if_softc;
6923 	struct ieee80211com *ic = &sc->sc_ic;
6924 	const struct sockaddr *sa;
6925 	int s, err = 0;
6926 
6927 	s = splnet();
6928 
6929 	switch (cmd) {
6930 	case SIOCSIFADDR:
6931 		ifp->if_flags |= IFF_UP;
6932 		/* FALLTHROUGH */
6933 	case SIOCSIFFLAGS:
6934 		err = ifioctl_common(ifp, cmd, data);
6935 		if (err)
6936 			break;
6937 		if (ifp->if_flags & IFF_UP) {
6938 			if (!(ifp->if_flags & IFF_RUNNING)) {
6939 				err = iwm_init(ifp);
6940 				if (err)
6941 					ifp->if_flags &= ~IFF_UP;
6942 			}
6943 		} else {
6944 			if (ifp->if_flags & IFF_RUNNING)
6945 				iwm_stop(ifp, 1);
6946 		}
6947 		break;
6948 
6949 	case SIOCADDMULTI:
6950 	case SIOCDELMULTI:
6951 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6952 		err = (cmd == SIOCADDMULTI) ?
6953 		    ether_addmulti(sa, &sc->sc_ec) :
6954 		    ether_delmulti(sa, &sc->sc_ec);
6955 		if (err == ENETRESET)
6956 			err = 0;
6957 		break;
6958 
6959 	default:
6960 		err = ieee80211_ioctl(ic, cmd, data);
6961 		break;
6962 	}
6963 
6964 	if (err == ENETRESET) {
6965 		err = 0;
6966 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6967 		    (IFF_UP | IFF_RUNNING)) {
6968 			iwm_stop(ifp, 0);
6969 			err = iwm_init(ifp);
6970 		}
6971 	}
6972 
6973 	splx(s);
6974 	return err;
6975 }
6976 
6977 /*
6978  * Note: This structure is read from the device with IO accesses,
6979  * and the reading already does the endian conversion. As it is
6980  * read with uint32_t-sized accesses, any members with a different size
6981  * need to be ordered correctly though!
6982  */
6983 struct iwm_error_event_table {
6984 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
6985 	uint32_t error_id;		/* type of error */
6986 	uint32_t trm_hw_status0;	/* TRM HW status */
6987 	uint32_t trm_hw_status1;	/* TRM HW status */
6988 	uint32_t blink2;		/* branch link */
6989 	uint32_t ilink1;		/* interrupt link */
6990 	uint32_t ilink2;		/* interrupt link */
6991 	uint32_t data1;		/* error-specific data */
6992 	uint32_t data2;		/* error-specific data */
6993 	uint32_t data3;		/* error-specific data */
6994 	uint32_t bcon_time;		/* beacon timer */
6995 	uint32_t tsf_low;		/* network timestamp function timer */
6996 	uint32_t tsf_hi;		/* network timestamp function timer */
6997 	uint32_t gp1;		/* GP1 timer register */
6998 	uint32_t gp2;		/* GP2 timer register */
6999 	uint32_t fw_rev_type;	/* firmware revision type */
7000 	uint32_t major;		/* uCode version major */
7001 	uint32_t minor;		/* uCode version minor */
7002 	uint32_t hw_ver;		/* HW Silicon version */
7003 	uint32_t brd_ver;		/* HW board version */
7004 	uint32_t log_pc;		/* log program counter */
7005 	uint32_t frame_ptr;		/* frame pointer */
7006 	uint32_t stack_ptr;		/* stack pointer */
7007 	uint32_t hcmd;		/* last host command header */
7008 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
7009 				 * rxtx_flag */
7010 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
7011 				 * host_flag */
7012 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
7013 				 * enc_flag */
7014 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
7015 				 * time_flag */
7016 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
7017 				 * wico interrupt */
7018 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
7019 	uint32_t wait_event;		/* wait event() caller address */
7020 	uint32_t l2p_control;	/* L2pControlField */
7021 	uint32_t l2p_duration;	/* L2pDurationField */
7022 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
7023 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
7024 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
7025 				 * (LMPM_PMG_SEL) */
7026 	uint32_t u_timestamp;	/* indicate when the date and time of the
7027 				 * compilation */
7028 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
7029 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
7030 
7031 /*
7032  * UMAC error struct - relevant starting from family 8000 chip.
7033  * Note: This structure is read from the device with IO accesses,
7034  * and the reading already does the endian conversion. As it is
7035  * read with u32-sized accesses, any members with a different size
7036  * need to be ordered correctly though!
7037  */
7038 struct iwm_umac_error_event_table {
7039 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
7040 	uint32_t error_id;	/* type of error */
7041 	uint32_t blink1;	/* branch link */
7042 	uint32_t blink2;	/* branch link */
7043 	uint32_t ilink1;	/* interrupt link */
7044 	uint32_t ilink2;	/* interrupt link */
7045 	uint32_t data1;		/* error-specific data */
7046 	uint32_t data2;		/* error-specific data */
7047 	uint32_t data3;		/* error-specific data */
7048 	uint32_t umac_major;
7049 	uint32_t umac_minor;
7050 	uint32_t frame_pointer;	/* core register 27 */
7051 	uint32_t stack_pointer;	/* core register 28 */
7052 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
7053 	uint32_t nic_isr_pref;	/* ISR status register */
7054 } __packed;
7055 
7056 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
7057 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
7058 
7059 #ifdef IWM_DEBUG
7060 static const struct {
7061 	const char *name;
7062 	uint8_t num;
7063 } advanced_lookup[] = {
7064 	{ "NMI_INTERRUPT_WDG", 0x34 },
7065 	{ "SYSASSERT", 0x35 },
7066 	{ "UCODE_VERSION_MISMATCH", 0x37 },
7067 	{ "BAD_COMMAND", 0x38 },
7068 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
7069 	{ "FATAL_ERROR", 0x3D },
7070 	{ "NMI_TRM_HW_ERR", 0x46 },
7071 	{ "NMI_INTERRUPT_TRM", 0x4C },
7072 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
7073 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
7074 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
7075 	{ "NMI_INTERRUPT_HOST", 0x66 },
7076 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
7077 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
7078 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7079 	{ "ADVANCED_SYSASSERT", 0 },
7080 };
7081 
7082 static const char *
iwm_desc_lookup(uint32_t num)7083 iwm_desc_lookup(uint32_t num)
7084 {
7085 	int i;
7086 
7087 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
7088 		if (advanced_lookup[i].num == num)
7089 			return advanced_lookup[i].name;
7090 
7091 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7092 	return advanced_lookup[i].name;
7093 }
7094 
7095 /*
7096  * Support for dumping the error log seemed like a good idea ...
7097  * but it's mostly hex junk and the only sensible thing is the
7098  * hw/ucode revision (which we know anyway).  Since it's here,
7099  * I'll just leave it in, just in case e.g. the Intel guys want to
7100  * help us decipher some "ADVANCED_SYSASSERT" later.
7101  */
7102 static void
iwm_nic_error(struct iwm_softc * sc)7103 iwm_nic_error(struct iwm_softc *sc)
7104 {
7105 	struct iwm_error_event_table t;
7106 	uint32_t base;
7107 
7108 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
7109 	base = sc->sc_uc.uc_error_event_table;
7110 	if (base < 0x800000) {
7111 		aprint_error_dev(sc->sc_dev,
7112 		    "Invalid error log pointer 0x%08x\n", base);
7113 		return;
7114 	}
7115 
7116 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7117 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7118 		return;
7119 	}
7120 
7121 	if (!t.valid) {
7122 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
7123 		return;
7124 	}
7125 
7126 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7127 		aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
7128 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7129 		    sc->sc_flags, t.valid);
7130 	}
7131 
7132 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
7133 	    iwm_desc_lookup(t.error_id));
7134 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
7135 	    t.trm_hw_status0);
7136 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
7137 	    t.trm_hw_status1);
7138 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
7139 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
7140 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
7141 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
7142 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
7143 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
7144 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
7145 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
7146 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
7147 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
7148 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
7149 	aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
7150 	    t.fw_rev_type);
7151 	aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
7152 	    t.major);
7153 	aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
7154 	    t.minor);
7155 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
7156 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
7157 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
7158 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
7159 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
7160 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
7161 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
7162 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
7163 	aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
7164 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
7165 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
7166 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
7167 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
7168 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
7169 	    t.l2p_addr_match);
7170 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
7171 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
7172 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
7173 
7174 	if (sc->sc_uc.uc_umac_error_event_table)
7175 		iwm_nic_umac_error(sc);
7176 }
7177 
7178 static void
iwm_nic_umac_error(struct iwm_softc * sc)7179 iwm_nic_umac_error(struct iwm_softc *sc)
7180 {
7181 	struct iwm_umac_error_event_table t;
7182 	uint32_t base;
7183 
7184 	base = sc->sc_uc.uc_umac_error_event_table;
7185 
7186 	if (base < 0x800000) {
7187 		aprint_error_dev(sc->sc_dev,
7188 		    "Invalid error log pointer 0x%08x\n", base);
7189 		return;
7190 	}
7191 
7192 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7193 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7194 		return;
7195 	}
7196 
7197 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7198 		aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
7199 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7200 		    sc->sc_flags, t.valid);
7201 	}
7202 
7203 	aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
7204 		iwm_desc_lookup(t.error_id));
7205 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
7206 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
7207 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
7208 	    t.ilink1);
7209 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
7210 	    t.ilink2);
7211 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
7212 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
7213 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
7214 	aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
7215 	aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
7216 	aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
7217 	    t.frame_pointer);
7218 	aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
7219 	    t.stack_pointer);
7220 	aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
7221 	aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
7222 	    t.nic_isr_pref);
7223 }
7224 #endif
7225 
7226 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
7227 do {									\
7228 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7229 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
7230 	_var_ = (void *)((_pkt_)+1);					\
7231 } while (/*CONSTCOND*/0)
7232 
7233 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
7234 do {									\
7235 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
7236 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
7237 	_ptr_ = (void *)((_pkt_)+1);					\
7238 } while (/*CONSTCOND*/0)
7239 
7240 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
7241 
7242 static void
iwm_notif_intr(struct iwm_softc * sc)7243 iwm_notif_intr(struct iwm_softc *sc)
7244 {
7245 	uint16_t hw;
7246 
7247 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7248 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7249 
7250 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7251 	while (sc->rxq.cur != hw) {
7252 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7253 		struct iwm_rx_packet *pkt;
7254 		struct iwm_cmd_response *cresp;
7255 		int orig_qid, qid, idx, code;
7256 
7257 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
7258 		    BUS_DMASYNC_POSTREAD);
7259 		pkt = mtod(data->m, struct iwm_rx_packet *);
7260 
7261 		orig_qid = pkt->hdr.qid;
7262 		qid = orig_qid & ~0x80;
7263 		idx = pkt->hdr.idx;
7264 
7265 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7266 
7267 		/*
7268 		 * randomly get these from the firmware, no idea why.
7269 		 * they at least seem harmless, so just ignore them for now
7270 		 */
7271 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
7272 		    || pkt->len_n_flags == htole32(0x55550000))) {
7273 			ADVANCE_RXQ(sc);
7274 			continue;
7275 		}
7276 
7277 		switch (code) {
7278 		case IWM_REPLY_RX_PHY_CMD:
7279 			iwm_rx_rx_phy_cmd(sc, pkt, data);
7280 			break;
7281 
7282 		case IWM_REPLY_RX_MPDU_CMD:
7283 			iwm_rx_rx_mpdu(sc, pkt, data);
7284 			break;
7285 
7286 		case IWM_TX_CMD:
7287 			iwm_rx_tx_cmd(sc, pkt, data);
7288 			break;
7289 
7290 		case IWM_MISSED_BEACONS_NOTIFICATION:
7291 			iwm_rx_missed_beacons_notif(sc, pkt, data);
7292 			break;
7293 
7294 		case IWM_MFUART_LOAD_NOTIFICATION:
7295 			break;
7296 
7297 		case IWM_ALIVE: {
7298 			struct iwm_alive_resp_v1 *resp1;
7299 			struct iwm_alive_resp_v2 *resp2;
7300 			struct iwm_alive_resp_v3 *resp3;
7301 
7302 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
7303 				SYNC_RESP_STRUCT(resp1, pkt);
7304 				sc->sc_uc.uc_error_event_table
7305 				    = le32toh(resp1->error_event_table_ptr);
7306 				sc->sc_uc.uc_log_event_table
7307 				    = le32toh(resp1->log_event_table_ptr);
7308 				sc->sched_base = le32toh(resp1->scd_base_ptr);
7309 				if (resp1->status == IWM_ALIVE_STATUS_OK)
7310 					sc->sc_uc.uc_ok = 1;
7311 				else
7312 					sc->sc_uc.uc_ok = 0;
7313 			}
7314 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7315 				SYNC_RESP_STRUCT(resp2, pkt);
7316 				sc->sc_uc.uc_error_event_table
7317 				    = le32toh(resp2->error_event_table_ptr);
7318 				sc->sc_uc.uc_log_event_table
7319 				    = le32toh(resp2->log_event_table_ptr);
7320 				sc->sched_base = le32toh(resp2->scd_base_ptr);
7321 				sc->sc_uc.uc_umac_error_event_table
7322 				    = le32toh(resp2->error_info_addr);
7323 				if (resp2->status == IWM_ALIVE_STATUS_OK)
7324 					sc->sc_uc.uc_ok = 1;
7325 				else
7326 					sc->sc_uc.uc_ok = 0;
7327 			}
7328 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7329 				SYNC_RESP_STRUCT(resp3, pkt);
7330 				sc->sc_uc.uc_error_event_table
7331 				    = le32toh(resp3->error_event_table_ptr);
7332 				sc->sc_uc.uc_log_event_table
7333 				    = le32toh(resp3->log_event_table_ptr);
7334 				sc->sched_base = le32toh(resp3->scd_base_ptr);
7335 				sc->sc_uc.uc_umac_error_event_table
7336 				    = le32toh(resp3->error_info_addr);
7337 				if (resp3->status == IWM_ALIVE_STATUS_OK)
7338 					sc->sc_uc.uc_ok = 1;
7339 				else
7340 					sc->sc_uc.uc_ok = 0;
7341 			}
7342 
7343 			sc->sc_uc.uc_intr = 1;
7344 			wakeup(&sc->sc_uc);
7345 			break;
7346 		}
7347 
7348 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
7349 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
7350 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
7351 			uint16_t size = le16toh(phy_db_notif->length);
7352 			bus_dmamap_sync(sc->sc_dmat, data->map,
7353 			    sizeof(*pkt) + sizeof(*phy_db_notif),
7354 			    size, BUS_DMASYNC_POSTREAD);
7355 			iwm_phy_db_set_section(sc, phy_db_notif, size);
7356 			break;
7357 		}
7358 
7359 		case IWM_STATISTICS_NOTIFICATION: {
7360 			struct iwm_notif_statistics *stats;
7361 			SYNC_RESP_STRUCT(stats, pkt);
7362 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7363 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
7364 			break;
7365 		}
7366 
7367 		case IWM_NVM_ACCESS_CMD:
7368 		case IWM_MCC_UPDATE_CMD:
7369 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7370 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7371 				    sizeof(sc->sc_cmd_resp),
7372 				    BUS_DMASYNC_POSTREAD);
7373 				memcpy(sc->sc_cmd_resp,
7374 				    pkt, sizeof(sc->sc_cmd_resp));
7375 			}
7376 			break;
7377 
7378 		case IWM_MCC_CHUB_UPDATE_CMD: {
7379 			struct iwm_mcc_chub_notif *notif;
7380 			SYNC_RESP_STRUCT(notif, pkt);
7381 
7382 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7383 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7384 			sc->sc_fw_mcc[2] = '\0';
7385 			break;
7386 		}
7387 
7388 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
7389 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7390 		    IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7391 			struct iwm_dts_measurement_notif_v1 *notif1;
7392 			struct iwm_dts_measurement_notif_v2 *notif2;
7393 
7394 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7395 				SYNC_RESP_STRUCT(notif1, pkt);
7396 				DPRINTF(("%s: DTS temp=%d \n",
7397 				    DEVNAME(sc), notif1->temp));
7398 				break;
7399 			}
7400 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7401 				SYNC_RESP_STRUCT(notif2, pkt);
7402 				DPRINTF(("%s: DTS temp=%d \n",
7403 				    DEVNAME(sc), notif2->temp));
7404 				break;
7405 			}
7406 			break;
7407 		}
7408 
7409 		case IWM_PHY_CONFIGURATION_CMD:
7410 		case IWM_TX_ANT_CONFIGURATION_CMD:
7411 		case IWM_ADD_STA:
7412 		case IWM_MAC_CONTEXT_CMD:
7413 		case IWM_REPLY_SF_CFG_CMD:
7414 		case IWM_POWER_TABLE_CMD:
7415 		case IWM_PHY_CONTEXT_CMD:
7416 		case IWM_BINDING_CONTEXT_CMD:
7417 		case IWM_TIME_EVENT_CMD:
7418 		case IWM_SCAN_REQUEST_CMD:
7419 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7420 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7421 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7422 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7423 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
7424 		case IWM_REPLY_BEACON_FILTERING_CMD:
7425 		case IWM_MAC_PM_POWER_TABLE:
7426 		case IWM_TIME_QUOTA_CMD:
7427 		case IWM_REMOVE_STA:
7428 		case IWM_TXPATH_FLUSH:
7429 		case IWM_LQ_CMD:
7430 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_FW_PAGING_BLOCK_CMD):
7431 		case IWM_BT_CONFIG:
7432 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
7433 			SYNC_RESP_STRUCT(cresp, pkt);
7434 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
7435 				memcpy(sc->sc_cmd_resp,
7436 				    pkt, sizeof(*pkt) + sizeof(*cresp));
7437 			}
7438 			break;
7439 
7440 		/* ignore */
7441 		case IWM_PHY_DB_CMD:
7442 			break;
7443 
7444 		case IWM_INIT_COMPLETE_NOTIF:
7445 			sc->sc_init_complete = 1;
7446 			wakeup(&sc->sc_init_complete);
7447 			break;
7448 
7449 		case IWM_SCAN_OFFLOAD_COMPLETE: {
7450 			struct iwm_periodic_scan_complete *notif;
7451 			SYNC_RESP_STRUCT(notif, pkt);
7452 			break;
7453 		}
7454 
7455 		case IWM_SCAN_ITERATION_COMPLETE: {
7456 			struct iwm_lmac_scan_complete_notif *notif;
7457 			SYNC_RESP_STRUCT(notif, pkt);
7458 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7459 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7460 				iwm_endscan(sc);
7461 			}
7462 			break;
7463 		}
7464 
7465 		case IWM_SCAN_COMPLETE_UMAC: {
7466 			struct iwm_umac_scan_complete *notif;
7467 			SYNC_RESP_STRUCT(notif, pkt);
7468 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7469 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7470 				iwm_endscan(sc);
7471 			}
7472 			break;
7473 		}
7474 
7475 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7476 			struct iwm_umac_scan_iter_complete_notif *notif;
7477 			SYNC_RESP_STRUCT(notif, pkt);
7478 			if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7479 				CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7480 				iwm_endscan(sc);
7481 			}
7482 			break;
7483 		}
7484 
7485 		case IWM_REPLY_ERROR: {
7486 			struct iwm_error_resp *resp;
7487 			SYNC_RESP_STRUCT(resp, pkt);
7488 			aprint_error_dev(sc->sc_dev,
7489 			    "firmware error 0x%x, cmd 0x%x\n",
7490 			    le32toh(resp->error_type), resp->cmd_id);
7491 			break;
7492 		}
7493 
7494 		case IWM_TIME_EVENT_NOTIFICATION: {
7495 			struct iwm_time_event_notif *notif;
7496 			SYNC_RESP_STRUCT(notif, pkt);
7497 			break;
7498 		}
7499 
7500 		case IWM_DEBUG_LOG_MSG:
7501 			break;
7502 
7503 		case IWM_MCAST_FILTER_CMD:
7504 			break;
7505 
7506 		case IWM_SCD_QUEUE_CFG: {
7507 			struct iwm_scd_txq_cfg_rsp *rsp;
7508 			SYNC_RESP_STRUCT(rsp, pkt);
7509 			break;
7510 		}
7511 
7512 		default:
7513 			aprint_error_dev(sc->sc_dev,
7514 			    "unhandled firmware response 0x%x 0x%x/0x%x "
7515 			    "rx ring %d[%d]\n",
7516 			    code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7517 			break;
7518 		}
7519 
7520 		/*
7521 		 * uCode sets bit 0x80 when it originates the notification,
7522 		 * i.e. when the notification is not a direct response to a
7523 		 * command sent by the driver.
7524 		 * For example, uCode issues IWM_REPLY_RX when it sends a
7525 		 * received frame to the driver.
7526 		 */
7527 		if (!(orig_qid & (1 << 7))) {
7528 			iwm_cmd_done(sc, qid, idx);
7529 		}
7530 
7531 		ADVANCE_RXQ(sc);
7532 	}
7533 
7534 	/*
7535 	 * Seems like the hardware gets upset unless we align the write by 8??
7536 	 */
7537 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7538 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7539 }
7540 
7541 static int
iwm_intr(void * arg)7542 iwm_intr(void *arg)
7543 {
7544 	struct iwm_softc *sc = arg;
7545 
7546 	/* Disable interrupts */
7547 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7548 
7549 	softint_schedule(sc->sc_soft_ih);
7550 	return 1;
7551 }
7552 
7553 static void
iwm_softintr(void * arg)7554 iwm_softintr(void *arg)
7555 {
7556 	struct iwm_softc *sc = arg;
7557 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7558 	uint32_t r1, r2;
7559 	int isperiodic = 0, s;
7560 
7561 	if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
7562 		uint32_t *ict = sc->ict_dma.vaddr;
7563 		int tmp;
7564 
7565 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7566 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7567 		tmp = htole32(ict[sc->ict_cur]);
7568 		if (tmp == 0)
7569 			goto out_ena;	/* Interrupt not for us. */
7570 
7571 		/*
7572 		 * ok, there was something.  keep plowing until we have all.
7573 		 */
7574 		r1 = r2 = 0;
7575 		while (tmp) {
7576 			r1 |= tmp;
7577 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
7578 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7579 			tmp = htole32(ict[sc->ict_cur]);
7580 		}
7581 
7582 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7583 		    0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7584 
7585 		/* this is where the fun begins.  don't ask */
7586 		if (r1 == 0xffffffff)
7587 			r1 = 0;
7588 
7589 		/* i am not expected to understand this */
7590 		if (r1 & 0xc0000)
7591 			r1 |= 0x8000;
7592 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7593 	} else {
7594 		r1 = IWM_READ(sc, IWM_CSR_INT);
7595 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7596 			return;	/* Hardware gone! */
7597 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7598 	}
7599 	if (r1 == 0 && r2 == 0) {
7600 		goto out_ena;	/* Interrupt not for us. */
7601 	}
7602 
7603 	/* Acknowledge interrupts. */
7604 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7605 	if (__predict_false(!(sc->sc_flags & IWM_FLAG_USE_ICT)))
7606 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, r2);
7607 
7608 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7609 #ifdef IWM_DEBUG
7610 		int i;
7611 
7612 		iwm_nic_error(sc);
7613 
7614 		/* Dump driver status (TX and RX rings) while we're here. */
7615 		DPRINTF(("driver status:\n"));
7616 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
7617 			struct iwm_tx_ring *ring = &sc->txq[i];
7618 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
7619 			    "queued=%-3d\n",
7620 			    i, ring->qid, ring->cur, ring->queued));
7621 		}
7622 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
7623 		DPRINTF(("  802.11 state %s\n",
7624 		    ieee80211_state_name[sc->sc_ic.ic_state]));
7625 #endif
7626 
7627 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7628  fatal:
7629 		s = splnet();
7630 		ifp->if_flags &= ~IFF_UP;
7631 		iwm_stop(ifp, 1);
7632 		splx(s);
7633 		/* Don't restore interrupt mask */
7634 		return;
7635 
7636 	}
7637 
7638 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7639 		aprint_error_dev(sc->sc_dev,
7640 		    "hardware error, stopping device\n");
7641 		goto fatal;
7642 	}
7643 
7644 	/* firmware chunk loaded */
7645 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7646 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7647 		sc->sc_fw_chunk_done = 1;
7648 		wakeup(&sc->sc_fw);
7649 	}
7650 
7651 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7652 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP))
7653 			goto fatal;
7654 	}
7655 
7656 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7657 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7658 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7659 			IWM_WRITE_1(sc,
7660 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7661 		isperiodic = 1;
7662 	}
7663 
7664 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7665 	    isperiodic) {
7666 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7667 
7668 		iwm_notif_intr(sc);
7669 
7670 		/* enable periodic interrupt, see above */
7671 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7672 		    !isperiodic)
7673 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7674 			    IWM_CSR_INT_PERIODIC_ENA);
7675 	}
7676 
7677 out_ena:
7678 	iwm_restore_interrupts(sc);
7679 }
7680 
7681 /*
7682  * Autoconf glue-sniffing
7683  */
7684 
7685 static const pci_product_id_t iwm_devices[] = {
7686 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7687 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7688 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7689 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7690 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7691 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7692 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7693 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7694 	PCI_PRODUCT_INTEL_WIFI_LINK_3168,
7695 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7696 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7697 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7698 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7699 	PCI_PRODUCT_INTEL_WIFI_LINK_8265,
7700 };
7701 
7702 static int
iwm_match(device_t parent,cfdata_t match __unused,void * aux)7703 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7704 {
7705 	struct pci_attach_args *pa = aux;
7706 
7707 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7708 		return 0;
7709 
7710 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7711 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7712 			return 1;
7713 
7714 	return 0;
7715 }
7716 
7717 static int
iwm_preinit(struct iwm_softc * sc)7718 iwm_preinit(struct iwm_softc *sc)
7719 {
7720 	int err;
7721 
7722 	err = iwm_start_hw(sc);
7723 	if (err) {
7724 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7725 		return err;
7726 	}
7727 
7728 	err = iwm_run_init_mvm_ucode(sc, 1);
7729 	iwm_stop_device(sc);
7730 	if (err)
7731 		return err;
7732 
7733 	aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7734 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7735 	    ether_sprintf(sc->sc_nvm.hw_addr));
7736 
7737 	return 0;
7738 }
7739 
7740 static void
iwm_attach_hook(device_t dev)7741 iwm_attach_hook(device_t dev)
7742 {
7743 	struct iwm_softc *sc = device_private(dev);
7744 
7745 	iwm_config_complete(sc);
7746 }
7747 
7748 static void
iwm_attach(device_t parent,device_t self,void * aux)7749 iwm_attach(device_t parent, device_t self, void *aux)
7750 {
7751 	struct iwm_softc *sc = device_private(self);
7752 	struct pci_attach_args *pa = aux;
7753 	pcireg_t reg, memtype;
7754 	char intrbuf[PCI_INTRSTR_LEN];
7755 	const char *intrstr;
7756 	int err;
7757 	int txq_i;
7758 	const struct sysctlnode *node;
7759 
7760 	sc->sc_dev = self;
7761 	sc->sc_pct = pa->pa_pc;
7762 	sc->sc_pcitag = pa->pa_tag;
7763 	sc->sc_dmat = pa->pa_dmat;
7764 	sc->sc_pciid = pa->pa_id;
7765 
7766 	pci_aprint_devinfo(pa, NULL);
7767 
7768 	if (workqueue_create(&sc->sc_nswq, "iwmns",
7769 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7770 		panic("%s: could not create workqueue: newstate",
7771 		    device_xname(self));
7772 	sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7773 	if (sc->sc_soft_ih == NULL)
7774 		panic("%s: could not establish softint", device_xname(self));
7775 
7776 	/*
7777 	 * Get the offset of the PCI Express Capability Structure in PCI
7778 	 * Configuration Space.
7779 	 */
7780 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7781 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7782 	if (err == 0) {
7783 		aprint_error_dev(self,
7784 		    "PCIe capability structure not found!\n");
7785 		return;
7786 	}
7787 
7788 	/* Clear device-specific "PCI retry timeout" register (41h). */
7789 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7790 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7791 
7792 	/* Enable bus-mastering */
7793 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7794 	reg |= PCI_COMMAND_MASTER_ENABLE;
7795 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7796 
7797 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7798 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7799 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7800 	if (err) {
7801 		aprint_error_dev(self, "can't map mem space\n");
7802 		return;
7803 	}
7804 
7805 	/* Install interrupt handler. */
7806 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7807 	if (err) {
7808 		aprint_error_dev(self, "can't allocate interrupt\n");
7809 		return;
7810 	}
7811 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7812 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX)
7813 		CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7814 	else
7815 		SET(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7816 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7817 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7818 	    sizeof(intrbuf));
7819 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7820 	    IPL_NET, iwm_intr, sc, device_xname(self));
7821 	if (sc->sc_ih == NULL) {
7822 		aprint_error_dev(self, "can't establish interrupt");
7823 		if (intrstr != NULL)
7824 			aprint_error(" at %s", intrstr);
7825 		aprint_error("\n");
7826 		return;
7827 	}
7828 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7829 
7830 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7831 
7832 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7833 	switch (PCI_PRODUCT(sc->sc_pciid)) {
7834 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7835 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7836 		sc->sc_fwname = "iwlwifi-3160-17.ucode";
7837 		sc->host_interrupt_operation_mode = 1;
7838 		sc->apmg_wake_up_wa = 1;
7839 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7840 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7841 		break;
7842 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7843 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7844 		sc->sc_fwname = "iwlwifi-7265D-22.ucode";
7845 		sc->host_interrupt_operation_mode = 0;
7846 		sc->apmg_wake_up_wa = 1;
7847 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7848 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7849 		break;
7850 	case PCI_PRODUCT_INTEL_WIFI_LINK_3168:
7851 		sc->sc_fwname = "iwlwifi-3168-22.ucode";
7852 		sc->host_interrupt_operation_mode = 0;
7853 		sc->apmg_wake_up_wa = 1;
7854 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7855 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7856 		break;
7857 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7858 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7859 		sc->sc_fwname = "iwlwifi-7260-17.ucode";
7860 		sc->host_interrupt_operation_mode = 1;
7861 		sc->apmg_wake_up_wa = 1;
7862 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7863 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7864 		break;
7865 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7866 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7867 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7868 		    IWM_CSR_HW_REV_TYPE_7265D ?
7869 		    "iwlwifi-7265D-22.ucode": "iwlwifi-7265-17.ucode";
7870 		sc->host_interrupt_operation_mode = 0;
7871 		sc->apmg_wake_up_wa = 1;
7872 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7873 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7874 		break;
7875 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7876 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7877 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7878 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7879 		sc->sc_fwname = "iwlwifi-8000C-22.ucode";
7880 		sc->host_interrupt_operation_mode = 0;
7881 		sc->apmg_wake_up_wa = 0;
7882 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7883 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7884 		break;
7885 	case PCI_PRODUCT_INTEL_WIFI_LINK_8265:
7886 		sc->sc_fwname = "iwlwifi-8265-22.ucode";
7887 		sc->host_interrupt_operation_mode = 0;
7888 		sc->apmg_wake_up_wa = 0;
7889 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7890 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7891 		break;
7892 	default:
7893 		aprint_error_dev(self, "unknown product %#x",
7894 		    PCI_PRODUCT(sc->sc_pciid));
7895 		return;
7896 	}
7897 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7898 
7899 	/*
7900 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7901 	 * changed, and now the revision step also includes bit 0-1 (no more
7902 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7903 	 * in the old format.
7904 	 */
7905 
7906 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7907 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7908 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7909 
7910 	if (iwm_prepare_card_hw(sc) != 0) {
7911 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7912 		return;
7913 	}
7914 
7915 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7916 		uint32_t hw_step;
7917 
7918 		/*
7919 		 * In order to recognize C step the driver should read the
7920 		 * chip version id located at the AUX bus MISC address.
7921 		 */
7922 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7923 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7924 		DELAY(2);
7925 
7926 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7927 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7928 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7929 				   25000);
7930 		if (!err) {
7931 			aprint_error_dev(sc->sc_dev,
7932 			    "failed to wake up the nic\n");
7933 			return;
7934 		}
7935 
7936 		if (iwm_nic_lock(sc)) {
7937 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7938 			hw_step |= IWM_ENABLE_WFPM;
7939 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7940 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7941 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7942 			if (hw_step == 0x3)
7943 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7944 				    (IWM_SILICON_C_STEP << 2);
7945 			iwm_nic_unlock(sc);
7946 		} else {
7947 			aprint_error_dev(sc->sc_dev,
7948 			    "failed to lock the nic\n");
7949 			return;
7950 		}
7951 	}
7952 
7953 	/*
7954 	 * Allocate DMA memory for firmware transfers.
7955 	 * Must be aligned on a 16-byte boundary.
7956 	 */
7957 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7958 	    16);
7959 	if (err) {
7960 		aprint_error_dev(sc->sc_dev,
7961 		    "could not allocate memory for firmware\n");
7962 		return;
7963 	}
7964 
7965 	/* Allocate "Keep Warm" page, used internally by the card. */
7966 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7967 	if (err) {
7968 		aprint_error_dev(sc->sc_dev,
7969 		    "could not allocate keep warm page\n");
7970 		goto fail1;
7971 	}
7972 
7973 	/* Allocate interrupt cause table (ICT).*/
7974 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7975 	    1 << IWM_ICT_PADDR_SHIFT);
7976 	if (err) {
7977 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7978 		goto fail2;
7979 	}
7980 
7981 	/* TX scheduler rings must be aligned on a 1KB boundary. */
7982 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7983 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7984 	if (err) {
7985 		aprint_error_dev(sc->sc_dev,
7986 		    "could not allocate TX scheduler rings\n");
7987 		goto fail3;
7988 	}
7989 
7990 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
7991 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7992 		if (err) {
7993 			aprint_error_dev(sc->sc_dev,
7994 			    "could not allocate TX ring %d\n", txq_i);
7995 			goto fail4;
7996 		}
7997 	}
7998 
7999 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
8000 	if (err) {
8001 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
8002 		goto fail5;
8003 	}
8004 
8005 	/* Clear pending interrupts. */
8006 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
8007 
8008 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8009 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
8010 	    SYSCTL_DESCR("iwm per-controller controls"),
8011 	    NULL, 0, NULL, 0,
8012 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
8013 	    CTL_EOL)) != 0) {
8014 		aprint_normal_dev(sc->sc_dev,
8015 		    "couldn't create iwm per-controller sysctl node\n");
8016 	}
8017 	if (err == 0) {
8018 		int iwm_nodenum = node->sysctl_num;
8019 
8020 		/* Reload firmware sysctl node */
8021 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8022 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
8023 		    SYSCTL_DESCR("Reload firmware"),
8024 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
8025 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
8026 		    CTL_EOL)) != 0) {
8027 			aprint_normal_dev(sc->sc_dev,
8028 			    "couldn't create load_fw sysctl node\n");
8029 		}
8030 	}
8031 
8032 	callout_init(&sc->sc_calib_to, 0);
8033 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
8034 	callout_init(&sc->sc_led_blink_to, 0);
8035 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
8036 #ifndef IEEE80211_NO_HT
8037 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
8038 	    iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
8039 		panic("%s: could not create workqueue: setrates",
8040 		    device_xname(self));
8041 	if (workqueue_create(&sc->sc_bawq, "iwmba",
8042 	    iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
8043 		panic("%s: could not create workqueue: blockack",
8044 		    device_xname(self));
8045 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
8046 	    iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
8047 		panic("%s: could not create workqueue: htprot",
8048 		    device_xname(self));
8049 #endif
8050 
8051 	/*
8052 	 * We can't do normal attach before the file system is mounted
8053 	 * because we cannot read the MAC address without loading the
8054 	 * firmware from disk.  So we postpone until mountroot is done.
8055 	 * Notably, this will require a full driver unload/load cycle
8056 	 * (or reboot) in case the firmware is not present when the
8057 	 * hook runs.
8058 	 */
8059 	config_mountroot(self, iwm_attach_hook);
8060 
8061 	return;
8062 
8063 fail5:	while (--txq_i >= 0)
8064 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
8065 fail4:	iwm_dma_contig_free(&sc->sched_dma);
8066 fail3:	if (sc->ict_dma.vaddr != NULL)
8067 		iwm_dma_contig_free(&sc->ict_dma);
8068 fail2:	iwm_dma_contig_free(&sc->kw_dma);
8069 fail1:	iwm_dma_contig_free(&sc->fw_dma);
8070 }
8071 
8072 static int
iwm_config_complete(struct iwm_softc * sc)8073 iwm_config_complete(struct iwm_softc *sc)
8074 {
8075 	device_t self = sc->sc_dev;
8076 	struct ieee80211com *ic = &sc->sc_ic;
8077 	struct ifnet *ifp = &sc->sc_ec.ec_if;
8078 	int err;
8079 
8080 	KASSERT(!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED));
8081 
8082 	err = iwm_preinit(sc);
8083 	if (err)
8084 		return err;
8085 
8086 	/*
8087 	 * Attach interface
8088 	 */
8089 	ic->ic_ifp = ifp;
8090 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
8091 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
8092 	ic->ic_state = IEEE80211_S_INIT;
8093 
8094 	/* Set device capabilities. */
8095 	ic->ic_caps =
8096 	    IEEE80211_C_WEP |		/* WEP */
8097 	    IEEE80211_C_WPA |		/* 802.11i */
8098 #ifdef notyet
8099 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
8100 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
8101 #endif
8102 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
8103 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
8104 
8105 #ifndef IEEE80211_NO_HT
8106 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8107 	ic->ic_htxcaps = 0;
8108 	ic->ic_txbfcaps = 0;
8109 	ic->ic_aselcaps = 0;
8110 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8111 #endif
8112 
8113 	/* all hardware can do 2.4GHz band */
8114 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8115 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8116 
8117 	/* not all hardware can do 5GHz band */
8118 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
8119 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
8120 
8121 #ifndef IEEE80211_NO_HT
8122 	if (sc->sc_nvm.sku_cap_11n_enable)
8123 		iwm_setup_ht_rates(sc);
8124 #endif
8125 
8126 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
8127 		sc->sc_phyctxt[i].id = i;
8128 	}
8129 
8130 	sc->sc_amrr.amrr_min_success_threshold =  1;
8131 	sc->sc_amrr.amrr_max_success_threshold = 15;
8132 
8133 	/* IBSS channel undefined for now. */
8134 	ic->ic_ibss_chan = &ic->ic_channels[1];
8135 
8136 #if 0
8137 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
8138 #endif
8139 
8140 	ifp->if_softc = sc;
8141 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8142 	ifp->if_init = iwm_init;
8143 	ifp->if_stop = iwm_stop;
8144 	ifp->if_ioctl = iwm_ioctl;
8145 	ifp->if_start = iwm_start;
8146 	ifp->if_watchdog = iwm_watchdog;
8147 	IFQ_SET_READY(&ifp->if_snd);
8148 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8149 
8150 	if_initialize(ifp);
8151 	ieee80211_ifattach(ic);
8152 	/* Use common softint-based if_input */
8153 	ifp->if_percpuq = if_percpuq_create(ifp);
8154 	if_register(ifp);
8155 
8156 	ic->ic_node_alloc = iwm_node_alloc;
8157 
8158 	/* Override 802.11 state transition machine. */
8159 	sc->sc_newstate = ic->ic_newstate;
8160 	ic->ic_newstate = iwm_newstate;
8161 
8162 	/* XXX media locking needs revisiting */
8163 	mutex_init(&sc->sc_media_mtx, MUTEX_DEFAULT, IPL_SOFTNET);
8164 	ieee80211_media_init_with_lock(ic,
8165 	    iwm_media_change, ieee80211_media_status, &sc->sc_media_mtx);
8166 
8167 	ieee80211_announce(ic);
8168 
8169 	iwm_radiotap_attach(sc);
8170 
8171 	if (pmf_device_register(self, NULL, NULL))
8172 		pmf_class_network_register(self, ifp);
8173 	else
8174 		aprint_error_dev(self, "couldn't establish power handler\n");
8175 
8176 	sc->sc_flags |= IWM_FLAG_ATTACHED;
8177 
8178 	return 0;
8179 }
8180 
8181 void
iwm_radiotap_attach(struct iwm_softc * sc)8182 iwm_radiotap_attach(struct iwm_softc *sc)
8183 {
8184 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8185 
8186 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
8187 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
8188 	    &sc->sc_drvbpf);
8189 
8190 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8191 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8192 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
8193 
8194 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
8195 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8196 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
8197 }
8198 
8199 #if 0
8200 static void
8201 iwm_init_task(void *arg)
8202 {
8203 	struct iwm_softc *sc = arg;
8204 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8205 	int s;
8206 
8207 	rw_enter_write(&sc->ioctl_rwl);
8208 	s = splnet();
8209 
8210 	iwm_stop(ifp, 0);
8211 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8212 		iwm_init(ifp);
8213 
8214 	splx(s);
8215 	rw_exit(&sc->ioctl_rwl);
8216 }
8217 
8218 static void
8219 iwm_wakeup(struct iwm_softc *sc)
8220 {
8221 	pcireg_t reg;
8222 
8223 	/* Clear device-specific "PCI retry timeout" register (41h). */
8224 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8225 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8226 
8227 	iwm_init_task(sc);
8228 }
8229 
8230 static int
8231 iwm_activate(device_t self, enum devact act)
8232 {
8233 	struct iwm_softc *sc = device_private(self);
8234 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8235 
8236 	switch (act) {
8237 	case DVACT_DEACTIVATE:
8238 		if (ifp->if_flags & IFF_RUNNING)
8239 			iwm_stop(ifp, 0);
8240 		return 0;
8241 	default:
8242 		return EOPNOTSUPP;
8243 	}
8244 }
8245 #endif
8246 
8247 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
8248 	NULL, NULL);
8249 
8250 static int
iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)8251 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
8252 {
8253 	struct sysctlnode node;
8254 	struct iwm_softc *sc;
8255 	int err, t;
8256 
8257 	node = *rnode;
8258 	sc = node.sysctl_data;
8259 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
8260 	node.sysctl_data = &t;
8261 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
8262 	if (err || newp == NULL)
8263 		return err;
8264 
8265 	if (t == 0)
8266 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
8267 	return 0;
8268 }
8269 
8270 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
8271 {
8272 	const struct sysctlnode *rnode;
8273 #ifdef IWM_DEBUG
8274 	const struct sysctlnode *cnode;
8275 #endif /* IWM_DEBUG */
8276 	int rc;
8277 
8278 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
8279 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
8280 	    SYSCTL_DESCR("iwm global controls"),
8281 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
8282 		goto err;
8283 
8284 	iwm_sysctl_root_num = rnode->sysctl_num;
8285 
8286 #ifdef IWM_DEBUG
8287 	/* control debugging printfs */
8288 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
8289 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
8290 	    "debug", SYSCTL_DESCR("Enable debugging output"),
8291 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
8292 		goto err;
8293 #endif /* IWM_DEBUG */
8294 
8295 	return;
8296 
8297  err:
8298 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
8299 }
8300