1 /* $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $ */
2
3 /*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19 /*-
20 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
21 * which were used as the reference documentation for this implementation.
22 *
23 * Driver version we are currently based off of is
24 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34 *
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of version 2 of the GNU General Public License as
37 * published by the Free Software Foundation.
38 *
39 * This program is distributed in the hope that it will be useful, but
40 * WITHOUT ANY WARRANTY; without even the implied warranty of
41 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
42 * General Public License for more details.
43 *
44 * You should have received a copy of the GNU General Public License
45 * along with this program; if not, write to the Free Software
46 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
47 * USA
48 *
49 * The full GNU General Public License is included in this distribution
50 * in the file called COPYING.
51 *
52 * Contact Information:
53 * Intel Linux Wireless <ilw@linux.intel.com>
54 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
55 *
56 *
57 * BSD LICENSE
58 *
59 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions
64 * are met:
65 *
66 * * Redistributions of source code must retain the above copyright
67 * notice, this list of conditions and the following disclaimer.
68 * * Redistributions in binary form must reproduce the above copyright
69 * notice, this list of conditions and the following disclaimer in
70 * the documentation and/or other materials provided with the
71 * distribution.
72 * * Neither the name Intel Corporation nor the names of its
73 * contributors may be used to endorse or promote products derived
74 * from this software without specific prior written permission.
75 *
76 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
77 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
78 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
79 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
80 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
81 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
82 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
83 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
84 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
85 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
86 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
87 */
88
89 /*-
90 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
91 *
92 * Permission to use, copy, modify, and distribute this software for any
93 * purpose with or without fee is hereby granted, provided that the above
94 * copyright notice and this permission notice appear in all copies.
95 *
96 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
97 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
98 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
99 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
100 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
101 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
102 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
103 */
104 /*
105 * DragonFly work
106 *
107 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
108 * changes to remove per-device network interface (DragonFly has not
109 * caught up to that yet on the WLAN side).
110 *
111 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
112 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
113 * specifications to M_INTWAIT. We still don't
114 * understand why FreeBSD uses M_NOWAIT for
115 * critical must-not-fail kmalloc()s).
116 * free -> kfree
117 * printf -> kprintf
118 * (bug fix) memset in iwm_reset_rx_ring.
119 * (debug) added several kprintf()s on error
120 *
121 * header file paths (DFly allows localized path specifications).
122 * minor header file differences.
123 *
124 * Comprehensive list of adjustments for DragonFly #ifdef'd:
125 * (safety) added register read-back serialization in iwm_reset_rx_ring().
126 * packet counters
127 * msleep -> lksleep
128 * mtx -> lk (mtx functions -> lockmgr functions)
129 * callout differences
130 * taskqueue differences
131 * MSI differences
132 * bus_setup_intr() differences
133 * minor PCI config register naming differences
134 */
135 #include <sys/param.h>
136 #include <sys/bus.h>
137 #include <sys/endian.h>
138 #include <sys/firmware.h>
139 #include <sys/kernel.h>
140 #include <sys/malloc.h>
141 #include <sys/mbuf.h>
142 #include <sys/module.h>
143 #include <sys/rman.h>
144 #include <sys/sysctl.h>
145 #include <sys/linker.h>
146
147 #include <machine/endian.h>
148
149 #include <bus/pci/pcivar.h>
150 #include <bus/pci/pcireg.h>
151
152 #include <net/bpf.h>
153
154 #include <net/if.h>
155 #include <net/if_var.h>
156 #include <net/if_arp.h>
157 #include <net/if_dl.h>
158 #include <net/if_media.h>
159 #include <net/if_types.h>
160
161 #include <netinet/in.h>
162 #include <netinet/in_systm.h>
163 #include <netinet/if_ether.h>
164 #include <netinet/ip.h>
165
166 #include <netproto/802_11/ieee80211_var.h>
167 #include <netproto/802_11/ieee80211_regdomain.h>
168 #include <netproto/802_11/ieee80211_ratectl.h>
169 #include <netproto/802_11/ieee80211_radiotap.h>
170
171 #include "if_iwmreg.h"
172 #include "if_iwmvar.h"
173 #include "if_iwm_config.h"
174 #include "if_iwm_debug.h"
175 #include "if_iwm_notif_wait.h"
176 #include "if_iwm_util.h"
177 #include "if_iwm_binding.h"
178 #include "if_iwm_phy_db.h"
179 #include "if_iwm_mac_ctxt.h"
180 #include "if_iwm_phy_ctxt.h"
181 #include "if_iwm_time_event.h"
182 #include "if_iwm_power.h"
183 #include "if_iwm_scan.h"
184 #include "if_iwm_sf.h"
185 #include "if_iwm_sta.h"
186
187 #include "if_iwm_pcie_trans.h"
188 #include "if_iwm_led.h"
189 #include "if_iwm_fw.h"
190
191 #if defined(__DragonFly__)
192 #define mtodo(m, off) mtodoff((m), void *, (off))
193 #endif
194
195 const uint8_t iwm_nvm_channels[] = {
196 /* 2.4 GHz */
197 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
198 /* 5 GHz */
199 36, 40, 44, 48, 52, 56, 60, 64,
200 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
201 149, 153, 157, 161, 165
202 };
203 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
204 "IWM_NUM_CHANNELS is too small");
205
206 const uint8_t iwm_nvm_channels_8000[] = {
207 /* 2.4 GHz */
208 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
209 /* 5 GHz */
210 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
211 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
212 149, 153, 157, 161, 165, 169, 173, 177, 181
213 };
214 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
215 "IWM_NUM_CHANNELS_8000 is too small");
216
217 #define IWM_NUM_2GHZ_CHANNELS 14
218 #define IWM_N_HW_ADDR_MASK 0xF
219
220 /*
221 * XXX For now, there's simply a fixed set of rate table entries
222 * that are populated.
223 */
224 const struct iwm_rate {
225 uint8_t rate;
226 uint8_t plcp;
227 } iwm_rates[] = {
228 { 2, IWM_RATE_1M_PLCP },
229 { 4, IWM_RATE_2M_PLCP },
230 { 11, IWM_RATE_5M_PLCP },
231 { 22, IWM_RATE_11M_PLCP },
232 { 12, IWM_RATE_6M_PLCP },
233 { 18, IWM_RATE_9M_PLCP },
234 { 24, IWM_RATE_12M_PLCP },
235 { 36, IWM_RATE_18M_PLCP },
236 { 48, IWM_RATE_24M_PLCP },
237 { 72, IWM_RATE_36M_PLCP },
238 { 96, IWM_RATE_48M_PLCP },
239 { 108, IWM_RATE_54M_PLCP },
240 };
241 #define IWM_RIDX_CCK 0
242 #define IWM_RIDX_OFDM 4
243 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
244 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
245 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
246
247 struct iwm_nvm_section {
248 uint16_t length;
249 uint8_t *data;
250 };
251
252 #define IWM_UCODE_ALIVE_TIMEOUT hz
253 #define IWM_UCODE_CALIB_TIMEOUT (2*hz)
254
255 struct iwm_alive_data {
256 int valid;
257 uint32_t scd_base_addr;
258 };
259
260 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
261 static int iwm_firmware_store_section(struct iwm_softc *,
262 enum iwm_ucode_type,
263 const uint8_t *, size_t);
264 static int iwm_set_default_calib(struct iwm_softc *, const void *);
265 static void iwm_fw_info_free(struct iwm_fw_info *);
266 static int iwm_read_firmware(struct iwm_softc *);
267 static int iwm_alloc_fwmem(struct iwm_softc *);
268 static int iwm_alloc_sched(struct iwm_softc *);
269 static int iwm_alloc_kw(struct iwm_softc *);
270 static int iwm_alloc_ict(struct iwm_softc *);
271 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
275 int);
276 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void iwm_enable_interrupts(struct iwm_softc *);
279 static void iwm_restore_interrupts(struct iwm_softc *);
280 static void iwm_disable_interrupts(struct iwm_softc *);
281 static void iwm_ict_reset(struct iwm_softc *);
282 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
283 static void iwm_stop_device(struct iwm_softc *);
284 static void iwm_nic_config(struct iwm_softc *);
285 static int iwm_nic_rx_init(struct iwm_softc *);
286 static int iwm_nic_tx_init(struct iwm_softc *);
287 static int iwm_nic_init(struct iwm_softc *);
288 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
289 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
290 uint16_t, uint8_t *, uint16_t *);
291 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
292 uint16_t *, uint32_t);
293 static uint32_t iwm_eeprom_channel_flags(uint16_t);
294 static void iwm_add_channel_band(struct iwm_softc *,
295 struct ieee80211_channel[], int, int *, int, size_t,
296 const uint8_t[]);
297 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
298 struct ieee80211_channel[]);
299 static struct iwm_nvm_data *
300 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
301 const uint16_t *, const uint16_t *,
302 const uint16_t *, const uint16_t *,
303 const uint16_t *);
304 static void iwm_free_nvm_data(struct iwm_nvm_data *);
305 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
306 struct iwm_nvm_data *,
307 const uint16_t *,
308 const uint16_t *);
309 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
310 const uint16_t *);
311 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
312 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
313 const uint16_t *);
314 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
315 const uint16_t *);
316 static void iwm_set_radio_cfg(const struct iwm_softc *,
317 struct iwm_nvm_data *, uint32_t);
318 static struct iwm_nvm_data *
319 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
320 static int iwm_nvm_init(struct iwm_softc *);
321 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
322 const struct iwm_fw_desc *);
323 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
324 bus_addr_t, uint32_t);
325 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
326 const struct iwm_fw_img *,
327 int, int *);
328 static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
329 const struct iwm_fw_img *,
330 int, int *);
331 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
332 const struct iwm_fw_img *);
333 static int iwm_pcie_load_given_ucode(struct iwm_softc *,
334 const struct iwm_fw_img *);
335 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
336 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
337 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
338 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
339 enum iwm_ucode_type);
340 static int iwm_run_init_ucode(struct iwm_softc *, int);
341 static int iwm_config_ltr(struct iwm_softc *sc);
342 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
343 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
344 struct iwm_rx_packet *);
345 static int iwm_get_noise(struct iwm_softc *,
346 const struct iwm_statistics_rx_non_phy *);
347 static void iwm_handle_rx_statistics(struct iwm_softc *,
348 struct iwm_rx_packet *);
349 static bool iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
350 uint32_t, bool);
351 static int iwm_rx_tx_cmd_single(struct iwm_softc *,
352 struct iwm_rx_packet *,
353 struct iwm_node *);
354 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
355 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
356 #if 0
357 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
358 uint16_t);
359 #endif
360 static const struct iwm_rate *
361 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
362 struct mbuf *, struct iwm_tx_cmd *);
363 static int iwm_tx(struct iwm_softc *, struct mbuf *,
364 struct ieee80211_node *, int);
365 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
366 const struct ieee80211_bpf_params *);
367 static int iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
368 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
369 static struct ieee80211_node *
370 iwm_node_alloc(struct ieee80211vap *,
371 const uint8_t[IEEE80211_ADDR_LEN]);
372 static uint8_t iwm_rate_from_ucode_rate(uint32_t);
373 static int iwm_rate2ridx(struct iwm_softc *, uint8_t);
374 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
375 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
376 static void iwm_endscan_cb(void *, int);
377 static int iwm_send_bt_init_conf(struct iwm_softc *);
378 static boolean_t iwm_is_lar_supported(struct iwm_softc *);
379 static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
380 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
381 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
382 static int iwm_init_hw(struct iwm_softc *);
383 static void iwm_init(struct iwm_softc *);
384 static void iwm_start(struct iwm_softc *);
385 static void iwm_stop(struct iwm_softc *);
386 static void iwm_watchdog(void *);
387 static void iwm_parent(struct ieee80211com *);
388 #ifdef IWM_DEBUG
389 static const char *
390 iwm_desc_lookup(uint32_t);
391 static void iwm_nic_error(struct iwm_softc *);
392 static void iwm_nic_umac_error(struct iwm_softc *);
393 #endif
394 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
395 static void iwm_notif_intr(struct iwm_softc *);
396 static void iwm_intr(void *);
397 static int iwm_attach(device_t);
398 static int iwm_is_valid_ether_addr(uint8_t *);
399 static void iwm_preinit(void *);
400 static int iwm_detach_local(struct iwm_softc *sc, int);
401 static void iwm_init_task(void *);
402 static void iwm_radiotap_attach(struct iwm_softc *);
403 static struct ieee80211vap *
404 iwm_vap_create(struct ieee80211com *,
405 const char [IFNAMSIZ], int,
406 enum ieee80211_opmode, int,
407 const uint8_t [IEEE80211_ADDR_LEN],
408 const uint8_t [IEEE80211_ADDR_LEN]);
409 static void iwm_vap_delete(struct ieee80211vap *);
410 static void iwm_xmit_queue_drain(struct iwm_softc *);
411 static void iwm_scan_start(struct ieee80211com *);
412 static void iwm_scan_end(struct ieee80211com *);
413 static void iwm_update_mcast(struct ieee80211com *);
414 static void iwm_set_channel(struct ieee80211com *);
415 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
416 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
417 static int iwm_detach(device_t);
418
419 #if defined(__DragonFly__)
420 static int iwm_msi_enable = 1;
421
422 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
423 #endif
424
425 static int iwm_lar_disable = 0;
426 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
427
428 /*
429 * Firmware parser.
430 */
431
432 static int
iwm_store_cscheme(struct iwm_softc * sc,const uint8_t * data,size_t dlen)433 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
434 {
435 const struct iwm_fw_cscheme_list *l = (const void *)data;
436
437 if (dlen < sizeof(*l) ||
438 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
439 return EINVAL;
440
441 /* we don't actually store anything for now, always use s/w crypto */
442
443 return 0;
444 }
445
446 static int
iwm_firmware_store_section(struct iwm_softc * sc,enum iwm_ucode_type type,const uint8_t * data,size_t dlen)447 iwm_firmware_store_section(struct iwm_softc *sc,
448 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
449 {
450 struct iwm_fw_img *fws;
451 struct iwm_fw_desc *fwone;
452
453 if (type >= IWM_UCODE_TYPE_MAX)
454 return EINVAL;
455 if (dlen < sizeof(uint32_t))
456 return EINVAL;
457
458 fws = &sc->sc_fw.img[type];
459 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
460 return EINVAL;
461
462 fwone = &fws->sec[fws->fw_count];
463
464 /* first 32bit are device load offset */
465 memcpy(&fwone->offset, data, sizeof(uint32_t));
466
467 /* rest is data */
468 fwone->data = data + sizeof(uint32_t);
469 fwone->len = dlen - sizeof(uint32_t);
470
471 fws->fw_count++;
472
473 return 0;
474 }
475
476 #define IWM_DEFAULT_SCAN_CHANNELS 40
477
478 /* iwlwifi: iwl-drv.c */
479 struct iwm_tlv_calib_data {
480 uint32_t ucode_type;
481 struct iwm_tlv_calib_ctrl calib;
482 } __packed;
483
484 static int
iwm_set_default_calib(struct iwm_softc * sc,const void * data)485 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
486 {
487 const struct iwm_tlv_calib_data *def_calib = data;
488 uint32_t ucode_type = le32toh(def_calib->ucode_type);
489
490 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
491 device_printf(sc->sc_dev,
492 "Wrong ucode_type %u for default "
493 "calibration.\n", ucode_type);
494 return EINVAL;
495 }
496
497 sc->sc_default_calib[ucode_type].flow_trigger =
498 def_calib->calib.flow_trigger;
499 sc->sc_default_calib[ucode_type].event_trigger =
500 def_calib->calib.event_trigger;
501
502 return 0;
503 }
504
505 static int
iwm_set_ucode_api_flags(struct iwm_softc * sc,const uint8_t * data,struct iwm_ucode_capabilities * capa)506 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
507 struct iwm_ucode_capabilities *capa)
508 {
509 const struct iwm_ucode_api *ucode_api = (const void *)data;
510 uint32_t api_index = le32toh(ucode_api->api_index);
511 uint32_t api_flags = le32toh(ucode_api->api_flags);
512 int i;
513
514 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
515 device_printf(sc->sc_dev,
516 "api flags index %d larger than supported by driver\n",
517 api_index);
518 /* don't return an error so we can load FW that has more bits */
519 return 0;
520 }
521
522 for (i = 0; i < 32; i++) {
523 if (api_flags & (1U << i))
524 setbit(capa->enabled_api, i + 32 * api_index);
525 }
526
527 return 0;
528 }
529
530 static int
iwm_set_ucode_capabilities(struct iwm_softc * sc,const uint8_t * data,struct iwm_ucode_capabilities * capa)531 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
532 struct iwm_ucode_capabilities *capa)
533 {
534 const struct iwm_ucode_capa *ucode_capa = (const void *)data;
535 uint32_t api_index = le32toh(ucode_capa->api_index);
536 uint32_t api_flags = le32toh(ucode_capa->api_capa);
537 int i;
538
539 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
540 device_printf(sc->sc_dev,
541 "capa flags index %d larger than supported by driver\n",
542 api_index);
543 /* don't return an error so we can load FW that has more bits */
544 return 0;
545 }
546
547 for (i = 0; i < 32; i++) {
548 if (api_flags & (1U << i))
549 setbit(capa->enabled_capa, i + 32 * api_index);
550 }
551
552 return 0;
553 }
554
555 static void
iwm_fw_info_free(struct iwm_fw_info * fw)556 iwm_fw_info_free(struct iwm_fw_info *fw)
557 {
558 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
559 fw->fw_fp = NULL;
560 memset(fw->img, 0, sizeof(fw->img));
561 }
562
563 static int
iwm_read_firmware(struct iwm_softc * sc)564 iwm_read_firmware(struct iwm_softc *sc)
565 {
566 struct iwm_fw_info *fw = &sc->sc_fw;
567 const struct iwm_tlv_ucode_header *uhdr;
568 const struct iwm_ucode_tlv *tlv;
569 struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
570 enum iwm_ucode_tlv_type tlv_type;
571 const struct firmware *fwp;
572 const uint8_t *data;
573 uint32_t tlv_len;
574 uint32_t usniffer_img;
575 const uint8_t *tlv_data;
576 uint32_t paging_mem_size;
577 int num_of_cpus;
578 int error = 0;
579 size_t len;
580
581 /*
582 * Load firmware into driver memory.
583 * fw_fp will be set.
584 */
585 fwp = firmware_get(sc->cfg->fw_name);
586 if (fwp == NULL) {
587 device_printf(sc->sc_dev,
588 "could not read firmware %s (error %d)\n",
589 sc->cfg->fw_name, error);
590 goto out;
591 }
592 fw->fw_fp = fwp;
593
594 /* (Re-)Initialize default values. */
595 capa->flags = 0;
596 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
597 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
598 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
599 memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
600 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
601
602 /*
603 * Parse firmware contents
604 */
605
606 uhdr = (const void *)fw->fw_fp->data;
607 if (*(const uint32_t *)fw->fw_fp->data != 0
608 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
609 device_printf(sc->sc_dev, "invalid firmware %s\n",
610 sc->cfg->fw_name);
611 error = EINVAL;
612 goto out;
613 }
614
615 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
616 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
617 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
618 IWM_UCODE_API(le32toh(uhdr->ver)));
619 data = uhdr->data;
620 len = fw->fw_fp->datasize - sizeof(*uhdr);
621
622 while (len >= sizeof(*tlv)) {
623 len -= sizeof(*tlv);
624 tlv = (const void *)data;
625
626 tlv_len = le32toh(tlv->length);
627 tlv_type = le32toh(tlv->type);
628 tlv_data = tlv->data;
629
630 if (len < tlv_len) {
631 device_printf(sc->sc_dev,
632 "firmware too short: %zu bytes\n",
633 len);
634 error = EINVAL;
635 goto parse_out;
636 }
637 len -= roundup2(tlv_len, 4);
638 data += sizeof(*tlv) + roundup2(tlv_len, 4);
639
640 switch ((int)tlv_type) {
641 case IWM_UCODE_TLV_PROBE_MAX_LEN:
642 if (tlv_len != sizeof(uint32_t)) {
643 device_printf(sc->sc_dev,
644 "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
645 __func__, tlv_len);
646 error = EINVAL;
647 goto parse_out;
648 }
649 capa->max_probe_length =
650 le32_to_cpup((const uint32_t *)tlv_data);
651 /* limit it to something sensible */
652 if (capa->max_probe_length >
653 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
654 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
655 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
656 "ridiculous\n", __func__);
657 error = EINVAL;
658 goto parse_out;
659 }
660 break;
661 case IWM_UCODE_TLV_PAN:
662 if (tlv_len) {
663 device_printf(sc->sc_dev,
664 "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
665 __func__, tlv_len);
666 error = EINVAL;
667 goto parse_out;
668 }
669 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
670 break;
671 case IWM_UCODE_TLV_FLAGS:
672 if (tlv_len < sizeof(uint32_t)) {
673 device_printf(sc->sc_dev,
674 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
675 __func__, tlv_len);
676 error = EINVAL;
677 goto parse_out;
678 }
679 if (tlv_len % sizeof(uint32_t)) {
680 device_printf(sc->sc_dev,
681 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
682 __func__, tlv_len);
683 error = EINVAL;
684 goto parse_out;
685 }
686 /*
687 * Apparently there can be many flags, but Linux driver
688 * parses only the first one, and so do we.
689 *
690 * XXX: why does this override IWM_UCODE_TLV_PAN?
691 * Intentional or a bug? Observations from
692 * current firmware file:
693 * 1) TLV_PAN is parsed first
694 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
695 * ==> this resets TLV_PAN to itself... hnnnk
696 */
697 capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
698 break;
699 case IWM_UCODE_TLV_CSCHEME:
700 if ((error = iwm_store_cscheme(sc,
701 tlv_data, tlv_len)) != 0) {
702 device_printf(sc->sc_dev,
703 "%s: iwm_store_cscheme(): returned %d\n",
704 __func__, error);
705 goto parse_out;
706 }
707 break;
708 case IWM_UCODE_TLV_NUM_OF_CPU:
709 if (tlv_len != sizeof(uint32_t)) {
710 device_printf(sc->sc_dev,
711 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
712 __func__, tlv_len);
713 error = EINVAL;
714 goto parse_out;
715 }
716 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
717 if (num_of_cpus == 2) {
718 fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
719 TRUE;
720 fw->img[IWM_UCODE_INIT].is_dual_cpus =
721 TRUE;
722 fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
723 TRUE;
724 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
725 device_printf(sc->sc_dev,
726 "%s: Driver supports only 1 or 2 CPUs\n",
727 __func__);
728 error = EINVAL;
729 goto parse_out;
730 }
731 break;
732 case IWM_UCODE_TLV_SEC_RT:
733 if ((error = iwm_firmware_store_section(sc,
734 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
735 device_printf(sc->sc_dev,
736 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
737 __func__, error);
738 goto parse_out;
739 }
740 break;
741 case IWM_UCODE_TLV_SEC_INIT:
742 if ((error = iwm_firmware_store_section(sc,
743 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
744 device_printf(sc->sc_dev,
745 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
746 __func__, error);
747 goto parse_out;
748 }
749 break;
750 case IWM_UCODE_TLV_SEC_WOWLAN:
751 if ((error = iwm_firmware_store_section(sc,
752 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
753 device_printf(sc->sc_dev,
754 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
755 __func__, error);
756 goto parse_out;
757 }
758 break;
759 case IWM_UCODE_TLV_DEF_CALIB:
760 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
761 device_printf(sc->sc_dev,
762 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
763 __func__, tlv_len,
764 sizeof(struct iwm_tlv_calib_data));
765 error = EINVAL;
766 goto parse_out;
767 }
768 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
769 device_printf(sc->sc_dev,
770 "%s: iwm_set_default_calib() failed: %d\n",
771 __func__, error);
772 goto parse_out;
773 }
774 break;
775 case IWM_UCODE_TLV_PHY_SKU:
776 if (tlv_len != sizeof(uint32_t)) {
777 error = EINVAL;
778 device_printf(sc->sc_dev,
779 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
780 __func__, tlv_len);
781 goto parse_out;
782 }
783 sc->sc_fw.phy_config =
784 le32_to_cpup((const uint32_t *)tlv_data);
785 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
786 IWM_FW_PHY_CFG_TX_CHAIN) >>
787 IWM_FW_PHY_CFG_TX_CHAIN_POS;
788 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
789 IWM_FW_PHY_CFG_RX_CHAIN) >>
790 IWM_FW_PHY_CFG_RX_CHAIN_POS;
791 break;
792
793 case IWM_UCODE_TLV_API_CHANGES_SET: {
794 if (tlv_len != sizeof(struct iwm_ucode_api)) {
795 error = EINVAL;
796 goto parse_out;
797 }
798 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
799 error = EINVAL;
800 goto parse_out;
801 }
802 break;
803 }
804
805 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
806 if (tlv_len != sizeof(struct iwm_ucode_capa)) {
807 error = EINVAL;
808 goto parse_out;
809 }
810 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
811 error = EINVAL;
812 goto parse_out;
813 }
814 break;
815 }
816
817 case IWM_UCODE_TLV_CMD_VERSIONS:
818 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
819 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
820 /* ignore, not used by current driver */
821 break;
822
823 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
824 if ((error = iwm_firmware_store_section(sc,
825 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
826 tlv_len)) != 0)
827 goto parse_out;
828 break;
829
830 case IWM_UCODE_TLV_PAGING:
831 if (tlv_len != sizeof(uint32_t)) {
832 error = EINVAL;
833 goto parse_out;
834 }
835 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
836
837 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
838 "%s: Paging: paging enabled (size = %u bytes)\n",
839 __func__, paging_mem_size);
840 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
841 device_printf(sc->sc_dev,
842 "%s: Paging: driver supports up to %u bytes for paging image\n",
843 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
844 error = EINVAL;
845 goto out;
846 }
847 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
848 device_printf(sc->sc_dev,
849 "%s: Paging: image isn't multiple %u\n",
850 __func__, IWM_FW_PAGING_SIZE);
851 error = EINVAL;
852 goto out;
853 }
854
855 sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
856 paging_mem_size;
857 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
858 sc->sc_fw.img[usniffer_img].paging_mem_size =
859 paging_mem_size;
860 break;
861
862 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
863 if (tlv_len != sizeof(uint32_t)) {
864 error = EINVAL;
865 goto parse_out;
866 }
867 capa->n_scan_channels =
868 le32_to_cpup((const uint32_t *)tlv_data);
869 break;
870
871 case IWM_UCODE_TLV_FW_VERSION:
872 if (tlv_len != sizeof(uint32_t) * 3) {
873 error = EINVAL;
874 goto parse_out;
875 }
876 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
877 "%u.%u.%u",
878 le32toh(((const uint32_t *)tlv_data)[0]),
879 le32toh(((const uint32_t *)tlv_data)[1]),
880 le32toh(((const uint32_t *)tlv_data)[2]));
881 break;
882
883 case IWM_UCODE_TLV_FW_MEM_SEG:
884 break;
885
886 default:
887 device_printf(sc->sc_dev,
888 "%s: unknown firmware section %d, abort\n",
889 __func__, tlv_type);
890 error = EINVAL;
891 goto parse_out;
892 }
893 }
894
895 KASSERT(error == 0, ("unhandled error"));
896
897 parse_out:
898 if (error) {
899 device_printf(sc->sc_dev, "firmware parse error %d, "
900 "section type %d\n", error, tlv_type);
901 }
902
903 out:
904 if (error) {
905 if (fw->fw_fp != NULL)
906 iwm_fw_info_free(fw);
907 }
908
909 return error;
910 }
911
912 /*
913 * DMA resource routines
914 */
915
916 /* fwmem is used to load firmware onto the card */
917 static int
iwm_alloc_fwmem(struct iwm_softc * sc)918 iwm_alloc_fwmem(struct iwm_softc *sc)
919 {
920 /* Must be aligned on a 16-byte boundary. */
921 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
922 IWM_FH_MEM_TB_MAX_LENGTH, 16);
923 }
924
925 /* tx scheduler rings. not used? */
926 static int
iwm_alloc_sched(struct iwm_softc * sc)927 iwm_alloc_sched(struct iwm_softc *sc)
928 {
929 /* TX scheduler rings must be aligned on a 1KB boundary. */
930 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
931 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
932 }
933
934 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
935 static int
iwm_alloc_kw(struct iwm_softc * sc)936 iwm_alloc_kw(struct iwm_softc *sc)
937 {
938 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
939 }
940
941 /* interrupt cause table */
942 static int
iwm_alloc_ict(struct iwm_softc * sc)943 iwm_alloc_ict(struct iwm_softc *sc)
944 {
945 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
946 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
947 }
948
949 static int
iwm_alloc_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)950 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
951 {
952 bus_size_t size;
953 size_t descsz;
954 int count, i, error;
955
956 ring->cur = 0;
957 if (sc->cfg->mqrx_supported) {
958 count = IWM_RX_MQ_RING_COUNT;
959 descsz = sizeof(uint64_t);
960 } else {
961 count = IWM_RX_LEGACY_RING_COUNT;
962 descsz = sizeof(uint32_t);
963 }
964
965 /* Allocate RX descriptors (256-byte aligned). */
966 size = count * descsz;
967 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
968 256);
969 if (error != 0) {
970 device_printf(sc->sc_dev,
971 "could not allocate RX ring DMA memory\n");
972 goto fail;
973 }
974 ring->desc = ring->free_desc_dma.vaddr;
975
976 /* Allocate RX status area (16-byte aligned). */
977 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
978 sizeof(*ring->stat), 16);
979 if (error != 0) {
980 device_printf(sc->sc_dev,
981 "could not allocate RX status DMA memory\n");
982 goto fail;
983 }
984 ring->stat = ring->stat_dma.vaddr;
985
986 if (sc->cfg->mqrx_supported) {
987 size = count * sizeof(uint32_t);
988 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
989 size, 256);
990 if (error != 0) {
991 device_printf(sc->sc_dev,
992 "could not allocate RX ring DMA memory\n");
993 goto fail;
994 }
995 }
996
997 /* Create RX buffer DMA tag. */
998 #if defined(__DragonFly__)
999 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1000 0,
1001 BUS_SPACE_MAXADDR_32BIT,
1002 BUS_SPACE_MAXADDR,
1003 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1004 BUS_DMA_NOWAIT, &ring->data_dmat);
1005 #else
1006 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1007 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1008 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1009 #endif
1010 if (error != 0) {
1011 device_printf(sc->sc_dev,
1012 "%s: could not create RX buf DMA tag, error %d\n",
1013 __func__, error);
1014 goto fail;
1015 }
1016
1017 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1018 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1019 if (error != 0) {
1020 device_printf(sc->sc_dev,
1021 "%s: could not create RX buf DMA map, error %d\n",
1022 __func__, error);
1023 goto fail;
1024 }
1025
1026 /*
1027 * Allocate and map RX buffers.
1028 */
1029 for (i = 0; i < count; i++) {
1030 struct iwm_rx_data *data = &ring->data[i];
1031 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1032 if (error != 0) {
1033 device_printf(sc->sc_dev,
1034 "%s: could not create RX buf DMA map, error %d\n",
1035 __func__, error);
1036 goto fail;
1037 }
1038 data->m = NULL;
1039
1040 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1041 goto fail;
1042 }
1043 }
1044 return 0;
1045
1046 fail: iwm_free_rx_ring(sc, ring);
1047 return error;
1048 }
1049
1050 static void
iwm_reset_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1051 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1052 {
1053 /* Reset the ring state */
1054 ring->cur = 0;
1055
1056 /*
1057 * The hw rx ring index in shared memory must also be cleared,
1058 * otherwise the discrepancy can cause reprocessing chaos.
1059 */
1060 if (sc->rxq.stat)
1061 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1062 }
1063
1064 static void
iwm_free_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1065 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1066 {
1067 int count, i;
1068
1069 iwm_dma_contig_free(&ring->free_desc_dma);
1070 iwm_dma_contig_free(&ring->stat_dma);
1071 iwm_dma_contig_free(&ring->used_desc_dma);
1072
1073 count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1074 IWM_RX_LEGACY_RING_COUNT;
1075
1076 for (i = 0; i < count; i++) {
1077 struct iwm_rx_data *data = &ring->data[i];
1078
1079 if (data->m != NULL) {
1080 bus_dmamap_sync(ring->data_dmat, data->map,
1081 BUS_DMASYNC_POSTREAD);
1082 bus_dmamap_unload(ring->data_dmat, data->map);
1083 m_freem(data->m);
1084 data->m = NULL;
1085 }
1086 if (data->map != NULL) {
1087 bus_dmamap_destroy(ring->data_dmat, data->map);
1088 data->map = NULL;
1089 }
1090 }
1091 if (ring->spare_map != NULL) {
1092 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1093 ring->spare_map = NULL;
1094 }
1095 if (ring->data_dmat != NULL) {
1096 bus_dma_tag_destroy(ring->data_dmat);
1097 ring->data_dmat = NULL;
1098 }
1099 }
1100
1101 static int
iwm_alloc_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring,int qid)1102 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1103 {
1104 bus_addr_t paddr;
1105 bus_size_t size;
1106 size_t maxsize;
1107 int nsegments;
1108 int i, error;
1109
1110 ring->qid = qid;
1111 ring->queued = 0;
1112 ring->cur = 0;
1113
1114 /* Allocate TX descriptors (256-byte aligned). */
1115 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1116 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1117 if (error != 0) {
1118 device_printf(sc->sc_dev,
1119 "could not allocate TX ring DMA memory\n");
1120 goto fail;
1121 }
1122 ring->desc = ring->desc_dma.vaddr;
1123
1124 /*
1125 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1126 * to allocate commands space for other rings.
1127 */
1128 if (qid > IWM_CMD_QUEUE)
1129 return 0;
1130
1131 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1132 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1133 if (error != 0) {
1134 device_printf(sc->sc_dev,
1135 "could not allocate TX cmd DMA memory\n");
1136 goto fail;
1137 }
1138 ring->cmd = ring->cmd_dma.vaddr;
1139
1140 /* FW commands may require more mapped space than packets. */
1141 if (qid == IWM_CMD_QUEUE) {
1142 maxsize = IWM_RBUF_SIZE;
1143 nsegments = 1;
1144 } else {
1145 maxsize = MCLBYTES;
1146 nsegments = IWM_MAX_SCATTER - 2;
1147 }
1148
1149 #if defined(__DragonFly__)
1150 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1151 0,
1152 BUS_SPACE_MAXADDR_32BIT,
1153 BUS_SPACE_MAXADDR,
1154 maxsize, nsegments, maxsize,
1155 BUS_DMA_NOWAIT, &ring->data_dmat);
1156 #else
1157 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1158 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1159 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1160 #endif
1161 if (error != 0) {
1162 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1163 goto fail;
1164 }
1165
1166 paddr = ring->cmd_dma.paddr;
1167 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1168 struct iwm_tx_data *data = &ring->data[i];
1169
1170 data->cmd_paddr = paddr;
1171 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1172 + offsetof(struct iwm_tx_cmd, scratch);
1173 paddr += sizeof(struct iwm_device_cmd);
1174
1175 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1176 if (error != 0) {
1177 device_printf(sc->sc_dev,
1178 "could not create TX buf DMA map\n");
1179 goto fail;
1180 }
1181 }
1182 KASSERT(paddr == ring->cmd_dma.paddr + size,
1183 ("invalid physical address"));
1184 return 0;
1185
1186 fail: iwm_free_tx_ring(sc, ring);
1187 return error;
1188 }
1189
1190 static void
iwm_reset_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1191 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1192 {
1193 int i;
1194
1195 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1196 struct iwm_tx_data *data = &ring->data[i];
1197
1198 if (data->m != NULL) {
1199 bus_dmamap_sync(ring->data_dmat, data->map,
1200 BUS_DMASYNC_POSTWRITE);
1201 bus_dmamap_unload(ring->data_dmat, data->map);
1202 m_freem(data->m);
1203 data->m = NULL;
1204 }
1205 }
1206 /* Clear TX descriptors. */
1207 memset(ring->desc, 0, ring->desc_dma.size);
1208 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1209 BUS_DMASYNC_PREWRITE);
1210 sc->qfullmsk &= ~(1 << ring->qid);
1211 ring->queued = 0;
1212 ring->cur = 0;
1213
1214 if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1215 iwm_pcie_clear_cmd_in_flight(sc);
1216 }
1217
1218 static void
iwm_free_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1219 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1220 {
1221 int i;
1222
1223 iwm_dma_contig_free(&ring->desc_dma);
1224 iwm_dma_contig_free(&ring->cmd_dma);
1225
1226 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1227 struct iwm_tx_data *data = &ring->data[i];
1228
1229 if (data->m != NULL) {
1230 bus_dmamap_sync(ring->data_dmat, data->map,
1231 BUS_DMASYNC_POSTWRITE);
1232 bus_dmamap_unload(ring->data_dmat, data->map);
1233 m_freem(data->m);
1234 data->m = NULL;
1235 }
1236 if (data->map != NULL) {
1237 bus_dmamap_destroy(ring->data_dmat, data->map);
1238 data->map = NULL;
1239 }
1240 }
1241 if (ring->data_dmat != NULL) {
1242 bus_dma_tag_destroy(ring->data_dmat);
1243 ring->data_dmat = NULL;
1244 }
1245 }
1246
1247 /*
1248 * High-level hardware frobbing routines
1249 */
1250
1251 static void
iwm_enable_interrupts(struct iwm_softc * sc)1252 iwm_enable_interrupts(struct iwm_softc *sc)
1253 {
1254 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1255 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1256 }
1257
1258 static void
iwm_restore_interrupts(struct iwm_softc * sc)1259 iwm_restore_interrupts(struct iwm_softc *sc)
1260 {
1261 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1262 }
1263
1264 static void
iwm_disable_interrupts(struct iwm_softc * sc)1265 iwm_disable_interrupts(struct iwm_softc *sc)
1266 {
1267 /* disable interrupts */
1268 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1269
1270 /* acknowledge all interrupts */
1271 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1272 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1273 }
1274
1275 static void
iwm_ict_reset(struct iwm_softc * sc)1276 iwm_ict_reset(struct iwm_softc *sc)
1277 {
1278 iwm_disable_interrupts(sc);
1279
1280 /* Reset ICT table. */
1281 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1282 sc->ict_cur = 0;
1283
1284 /* Set physical address of ICT table (4KB aligned). */
1285 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1286 IWM_CSR_DRAM_INT_TBL_ENABLE
1287 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1288 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1289 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1290
1291 /* Switch to ICT interrupt mode in driver. */
1292 sc->sc_flags |= IWM_FLAG_USE_ICT;
1293
1294 /* Re-enable interrupts. */
1295 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1296 iwm_enable_interrupts(sc);
1297 }
1298
1299 /* iwlwifi pcie/trans.c */
1300
1301 /*
1302 * Since this .. hard-resets things, it's time to actually
1303 * mark the first vap (if any) as having no mac context.
1304 * It's annoying, but since the driver is potentially being
1305 * stop/start'ed whilst active (thanks openbsd port!) we
1306 * have to correctly track this.
1307 */
1308 static void
iwm_stop_device(struct iwm_softc * sc)1309 iwm_stop_device(struct iwm_softc *sc)
1310 {
1311 struct ieee80211com *ic = &sc->sc_ic;
1312 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1313 int chnl, qid;
1314 uint32_t mask = 0;
1315
1316 /* tell the device to stop sending interrupts */
1317 iwm_disable_interrupts(sc);
1318
1319 /*
1320 * FreeBSD-local: mark the first vap as not-uploaded,
1321 * so the next transition through auth/assoc
1322 * will correctly populate the MAC context.
1323 */
1324 if (vap) {
1325 struct iwm_vap *iv = IWM_VAP(vap);
1326 iv->phy_ctxt = NULL;
1327 iv->is_uploaded = 0;
1328 }
1329 sc->sc_firmware_state = 0;
1330 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1331
1332 /* device going down, Stop using ICT table */
1333 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1334
1335 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1336
1337 if (iwm_nic_lock(sc)) {
1338 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1339
1340 /* Stop each Tx DMA channel */
1341 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1342 IWM_WRITE(sc,
1343 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1344 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1345 }
1346
1347 /* Wait for DMA channels to be idle */
1348 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1349 5000)) {
1350 device_printf(sc->sc_dev,
1351 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1352 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1353 }
1354 iwm_nic_unlock(sc);
1355 }
1356 iwm_pcie_rx_stop(sc);
1357
1358 /* Stop RX ring. */
1359 iwm_reset_rx_ring(sc, &sc->rxq);
1360
1361 /* Reset all TX rings. */
1362 for (qid = 0; qid < nitems(sc->txq); qid++)
1363 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1364
1365 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1366 /* Power-down device's busmaster DMA clocks */
1367 if (iwm_nic_lock(sc)) {
1368 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1369 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1370 iwm_nic_unlock(sc);
1371 }
1372 DELAY(5);
1373 }
1374
1375 /* Make sure (redundant) we've released our request to stay awake */
1376 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1377 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1378
1379 /* Stop the device, and put it in low power state */
1380 iwm_apm_stop(sc);
1381
1382 /* stop and reset the on-board processor */
1383 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1384 DELAY(5000);
1385
1386 /*
1387 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1388 */
1389 iwm_disable_interrupts(sc);
1390
1391 /*
1392 * Even if we stop the HW, we still want the RF kill
1393 * interrupt
1394 */
1395 iwm_enable_rfkill_int(sc);
1396 iwm_check_rfkill(sc);
1397
1398 iwm_prepare_card_hw(sc);
1399 }
1400
1401 /* iwlwifi: mvm/ops.c */
1402 static void
iwm_nic_config(struct iwm_softc * sc)1403 iwm_nic_config(struct iwm_softc *sc)
1404 {
1405 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1406 uint32_t reg_val = 0;
1407 uint32_t phy_config = iwm_get_phy_config(sc);
1408
1409 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1410 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1411 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1412 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1413 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1414 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1415
1416 /* SKU control */
1417 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1418 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1419 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1420 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1421
1422 /* radio configuration */
1423 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1424 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1425 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1426
1427 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG,
1428 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1429 IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
1430 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1431 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
1432 IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1433 IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1434 IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
1435 reg_val);
1436
1437 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1438 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1439 radio_cfg_step, radio_cfg_dash);
1440
1441 /*
1442 * W/A : NIC is stuck in a reset state after Early PCIe power off
1443 * (PCIe power is lost before PERST# is asserted), causing ME FW
1444 * to lose ownership and not being able to obtain it back.
1445 */
1446 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1447 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1448 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1449 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1450 }
1451 }
1452
1453 static int
iwm_nic_rx_mq_init(struct iwm_softc * sc)1454 iwm_nic_rx_mq_init(struct iwm_softc *sc)
1455 {
1456 int enabled;
1457
1458 if (!iwm_nic_lock(sc))
1459 return EBUSY;
1460
1461 /* Stop RX DMA. */
1462 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1463 /* Disable RX used and free queue operation. */
1464 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
1465
1466 iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
1467 sc->rxq.free_desc_dma.paddr);
1468 iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
1469 sc->rxq.used_desc_dma.paddr);
1470 iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
1471 sc->rxq.stat_dma.paddr);
1472 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
1473 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
1474 iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
1475
1476 /* We configure only queue 0 for now. */
1477 enabled = ((1 << 0) << 16) | (1 << 0);
1478
1479 /* Enable RX DMA, 4KB buffer size. */
1480 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
1481 IWM_RFH_DMA_EN_ENABLE_VAL |
1482 IWM_RFH_RXF_DMA_RB_SIZE_4K |
1483 IWM_RFH_RXF_DMA_MIN_RB_4_8 |
1484 IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1485 IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
1486
1487 /* Enable RX DMA snooping. */
1488 iwm_write_prph(sc, IWM_RFH_GEN_CFG,
1489 IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
1490 IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1491 (sc->cfg->integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1492 IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1493
1494 /* Enable the configured queue(s). */
1495 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
1496
1497 iwm_nic_unlock(sc);
1498
1499 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1500
1501 IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
1502
1503 return (0);
1504 }
1505
1506 static int
iwm_nic_rx_legacy_init(struct iwm_softc * sc)1507 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
1508 {
1509
1510 /* Stop Rx DMA */
1511 iwm_pcie_rx_stop(sc);
1512
1513 if (!iwm_nic_lock(sc))
1514 return EBUSY;
1515
1516 /* reset and flush pointers */
1517 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1518 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1519 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1520 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1521
1522 /* Set physical address of RX ring (256-byte aligned). */
1523 IWM_WRITE(sc,
1524 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1525 sc->rxq.free_desc_dma.paddr >> 8);
1526
1527 /* Set physical address of RX status (16-byte aligned). */
1528 IWM_WRITE(sc,
1529 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1530
1531 #if defined(__DragonFly__)
1532 /* Force serialization (probably not needed but don't trust the HW) */
1533 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1534 #endif
1535
1536
1537 /* Enable Rx DMA
1538 * XXX 5000 HW isn't supported by the iwm(4) driver.
1539 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1540 * the credit mechanism in 5000 HW RX FIFO
1541 * Direct rx interrupts to hosts
1542 * Rx buffer size 4 or 8k or 12k
1543 * RB timeout 0x10
1544 * 256 RBDs
1545 */
1546 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1547 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1548 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1549 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1550 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1551 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1552 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1553
1554 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1555
1556 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1557 if (sc->cfg->host_interrupt_operation_mode)
1558 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1559
1560 iwm_nic_unlock(sc);
1561
1562 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1563
1564 return 0;
1565 }
1566
1567 static int
iwm_nic_rx_init(struct iwm_softc * sc)1568 iwm_nic_rx_init(struct iwm_softc *sc)
1569 {
1570 if (sc->cfg->mqrx_supported)
1571 return iwm_nic_rx_mq_init(sc);
1572 else
1573 return iwm_nic_rx_legacy_init(sc);
1574 }
1575
1576 static int
iwm_nic_tx_init(struct iwm_softc * sc)1577 iwm_nic_tx_init(struct iwm_softc *sc)
1578 {
1579 int qid;
1580
1581 if (!iwm_nic_lock(sc))
1582 return EBUSY;
1583
1584 /* Deactivate TX scheduler. */
1585 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1586
1587 /* Set physical address of "keep warm" page (16-byte aligned). */
1588 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1589
1590 /* Initialize TX rings. */
1591 for (qid = 0; qid < nitems(sc->txq); qid++) {
1592 struct iwm_tx_ring *txq = &sc->txq[qid];
1593
1594 /* Set physical address of TX ring (256-byte aligned). */
1595 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1596 txq->desc_dma.paddr >> 8);
1597 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1598 "%s: loading ring %d descriptors (%p) at %lx\n",
1599 __func__,
1600 qid, txq->desc,
1601 (unsigned long) (txq->desc_dma.paddr >> 8));
1602 }
1603
1604 iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
1605 IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
1606 IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
1607
1608 iwm_nic_unlock(sc);
1609
1610 return 0;
1611 }
1612
1613 static int
iwm_nic_init(struct iwm_softc * sc)1614 iwm_nic_init(struct iwm_softc *sc)
1615 {
1616 int error;
1617
1618 iwm_apm_init(sc);
1619 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1620 iwm_set_pwr(sc);
1621
1622 iwm_nic_config(sc);
1623
1624 if ((error = iwm_nic_rx_init(sc)) != 0)
1625 return error;
1626
1627 /*
1628 * Ditto for TX, from iwn
1629 */
1630 if ((error = iwm_nic_tx_init(sc)) != 0)
1631 return error;
1632
1633 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1634 "%s: shadow registers enabled\n", __func__);
1635 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1636
1637 return 0;
1638 }
1639
1640 int
iwm_enable_txq(struct iwm_softc * sc,int sta_id,int qid,int fifo)1641 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1642 {
1643 int qmsk;
1644
1645 qmsk = 1 << qid;
1646
1647 if (!iwm_nic_lock(sc)) {
1648 device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1649 __func__, qid);
1650 return EBUSY;
1651 }
1652
1653 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1654
1655 if (qid == IWM_CMD_QUEUE) {
1656 /* Disable the scheduler. */
1657 iwm_write_prph(sc, IWM_SCD_EN_CTRL, 0);
1658
1659 /* Stop the TX queue prior to configuration. */
1660 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1661 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1662 (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1663
1664 iwm_nic_unlock(sc);
1665
1666 /* Disable aggregations for this queue. */
1667 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, qmsk);
1668
1669 if (!iwm_nic_lock(sc)) {
1670 device_printf(sc->sc_dev,
1671 "%s: cannot enable txq %d\n", __func__, qid);
1672 return EBUSY;
1673 }
1674 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1675 iwm_nic_unlock(sc);
1676
1677 iwm_write_mem32(sc,
1678 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1679 /* Set scheduler window size and frame limit. */
1680 iwm_write_mem32(sc,
1681 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1682 sizeof(uint32_t),
1683 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1684 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1685 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1686 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1687
1688 if (!iwm_nic_lock(sc)) {
1689 device_printf(sc->sc_dev,
1690 "%s: cannot enable txq %d\n", __func__, qid);
1691 return EBUSY;
1692 }
1693 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1694 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1695 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1696 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1697 IWM_SCD_QUEUE_STTS_REG_MSK);
1698
1699 /* Enable the scheduler for this queue. */
1700 iwm_write_prph(sc, IWM_SCD_EN_CTRL, qmsk);
1701 } else {
1702 struct iwm_scd_txq_cfg_cmd cmd;
1703 int error;
1704
1705 iwm_nic_unlock(sc);
1706
1707 memset(&cmd, 0, sizeof(cmd));
1708 cmd.scd_queue = qid;
1709 cmd.enable = 1;
1710 cmd.sta_id = sta_id;
1711 cmd.tx_fifo = fifo;
1712 cmd.aggregate = 0;
1713 cmd.window = IWM_FRAME_LIMIT;
1714
1715 error = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1716 sizeof(cmd), &cmd);
1717 if (error) {
1718 device_printf(sc->sc_dev,
1719 "cannot enable txq %d\n", qid);
1720 return error;
1721 }
1722
1723 if (!iwm_nic_lock(sc))
1724 return EBUSY;
1725 }
1726
1727 iwm_nic_unlock(sc);
1728
1729 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1730 __func__, qid, fifo);
1731
1732 return 0;
1733 }
1734
1735 static int
iwm_trans_pcie_fw_alive(struct iwm_softc * sc,uint32_t scd_base_addr)1736 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1737 {
1738 int error, chnl;
1739
1740 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1741 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1742
1743 if (!iwm_nic_lock(sc))
1744 return EBUSY;
1745
1746 iwm_ict_reset(sc);
1747
1748 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1749 if (scd_base_addr != 0 &&
1750 scd_base_addr != sc->scd_base_addr) {
1751 device_printf(sc->sc_dev,
1752 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1753 __func__, sc->scd_base_addr, scd_base_addr);
1754 }
1755
1756 iwm_nic_unlock(sc);
1757
1758 /* reset context data, TX status and translation data */
1759 error = iwm_write_mem(sc,
1760 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1761 NULL, clear_dwords);
1762 if (error)
1763 return EBUSY;
1764
1765 if (!iwm_nic_lock(sc))
1766 return EBUSY;
1767
1768 /* Set physical address of TX scheduler rings (1KB aligned). */
1769 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1770
1771 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1772
1773 iwm_nic_unlock(sc);
1774
1775 /* enable command channel */
1776 error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1777 if (error)
1778 return error;
1779
1780 if (!iwm_nic_lock(sc))
1781 return EBUSY;
1782
1783 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1784
1785 /* Enable DMA channels. */
1786 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1787 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1788 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1789 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1790 }
1791
1792 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1793 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1794
1795 iwm_nic_unlock(sc);
1796
1797 /* Enable L1-Active */
1798 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
1799 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1800 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1801 }
1802
1803 return error;
1804 }
1805
1806 /*
1807 * NVM read access and content parsing. We do not support
1808 * external NVM or writing NVM.
1809 * iwlwifi/mvm/nvm.c
1810 */
1811
1812 /* Default NVM size to read */
1813 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1814
1815 #define IWM_NVM_WRITE_OPCODE 1
1816 #define IWM_NVM_READ_OPCODE 0
1817
1818 /* load nvm chunk response */
1819 enum {
1820 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1821 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1822 };
1823
1824 static int
iwm_nvm_read_chunk(struct iwm_softc * sc,uint16_t section,uint16_t offset,uint16_t length,uint8_t * data,uint16_t * len)1825 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1826 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1827 {
1828 struct iwm_nvm_access_cmd nvm_access_cmd = {
1829 .offset = htole16(offset),
1830 .length = htole16(length),
1831 .type = htole16(section),
1832 .op_code = IWM_NVM_READ_OPCODE,
1833 };
1834 struct iwm_nvm_access_resp *nvm_resp;
1835 struct iwm_rx_packet *pkt;
1836 struct iwm_host_cmd cmd = {
1837 .id = IWM_NVM_ACCESS_CMD,
1838 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1839 .data = { &nvm_access_cmd, },
1840 };
1841 int ret, bytes_read, offset_read;
1842 uint8_t *resp_data;
1843
1844 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1845
1846 ret = iwm_send_cmd(sc, &cmd);
1847 if (ret) {
1848 device_printf(sc->sc_dev,
1849 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1850 return ret;
1851 }
1852
1853 pkt = cmd.resp_pkt;
1854
1855 /* Extract NVM response */
1856 nvm_resp = (void *)pkt->data;
1857 ret = le16toh(nvm_resp->status);
1858 bytes_read = le16toh(nvm_resp->length);
1859 offset_read = le16toh(nvm_resp->offset);
1860 resp_data = nvm_resp->data;
1861 if (ret) {
1862 if ((offset != 0) &&
1863 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1864 /*
1865 * meaning of NOT_VALID_ADDRESS:
1866 * driver try to read chunk from address that is
1867 * multiple of 2K and got an error since addr is empty.
1868 * meaning of (offset != 0): driver already
1869 * read valid data from another chunk so this case
1870 * is not an error.
1871 */
1872 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1873 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1874 offset);
1875 *len = 0;
1876 ret = 0;
1877 } else {
1878 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1879 "NVM access command failed with status %d\n", ret);
1880 ret = EIO;
1881 }
1882 goto exit;
1883 }
1884
1885 if (offset_read != offset) {
1886 device_printf(sc->sc_dev,
1887 "NVM ACCESS response with invalid offset %d\n",
1888 offset_read);
1889 ret = EINVAL;
1890 goto exit;
1891 }
1892
1893 if (bytes_read > length) {
1894 device_printf(sc->sc_dev,
1895 "NVM ACCESS response with too much data "
1896 "(%d bytes requested, %d bytes received)\n",
1897 length, bytes_read);
1898 ret = EINVAL;
1899 goto exit;
1900 }
1901
1902 /* Write data to NVM */
1903 memcpy(data + offset, resp_data, bytes_read);
1904 *len = bytes_read;
1905
1906 exit:
1907 iwm_free_resp(sc, &cmd);
1908 return ret;
1909 }
1910
1911 /*
1912 * Reads an NVM section completely.
1913 * NICs prior to 7000 family don't have a real NVM, but just read
1914 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1915 * by uCode, we need to manually check in this case that we don't
1916 * overflow and try to read more than the EEPROM size.
1917 * For 7000 family NICs, we supply the maximal size we can read, and
1918 * the uCode fills the response with as much data as we can,
1919 * without overflowing, so no check is needed.
1920 */
1921 static int
iwm_nvm_read_section(struct iwm_softc * sc,uint16_t section,uint8_t * data,uint16_t * len,uint32_t size_read)1922 iwm_nvm_read_section(struct iwm_softc *sc,
1923 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1924 {
1925 uint16_t seglen, length, offset = 0;
1926 int ret;
1927
1928 /* Set nvm section read length */
1929 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1930
1931 seglen = length;
1932
1933 /* Read the NVM until exhausted (reading less than requested) */
1934 while (seglen == length) {
1935 /* Check no memory assumptions fail and cause an overflow */
1936 if ((size_read + offset + length) >
1937 sc->cfg->eeprom_size) {
1938 device_printf(sc->sc_dev,
1939 "EEPROM size is too small for NVM\n");
1940 return ENOBUFS;
1941 }
1942
1943 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1944 if (ret) {
1945 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1946 "Cannot read NVM from section %d offset %d, length %d\n",
1947 section, offset, length);
1948 return ret;
1949 }
1950 offset += seglen;
1951 }
1952
1953 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1954 "NVM section %d read completed\n", section);
1955 *len = offset;
1956 return 0;
1957 }
1958
1959 /*
1960 * BEGIN IWM_NVM_PARSE
1961 */
1962
1963 /* iwlwifi/iwl-nvm-parse.c */
1964
1965 /*
1966 * Translate EEPROM flags to net80211.
1967 */
1968 static uint32_t
iwm_eeprom_channel_flags(uint16_t ch_flags)1969 iwm_eeprom_channel_flags(uint16_t ch_flags)
1970 {
1971 uint32_t nflags;
1972
1973 nflags = 0;
1974 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1975 nflags |= IEEE80211_CHAN_PASSIVE;
1976 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1977 nflags |= IEEE80211_CHAN_NOADHOC;
1978 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1979 nflags |= IEEE80211_CHAN_DFS;
1980 /* Just in case. */
1981 nflags |= IEEE80211_CHAN_NOADHOC;
1982 }
1983
1984 return (nflags);
1985 }
1986
1987 static void
iwm_add_channel_band(struct iwm_softc * sc,struct ieee80211_channel chans[],int maxchans,int * nchans,int ch_idx,size_t ch_num,const uint8_t bands[])1988 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1989 int maxchans, int *nchans, int ch_idx, size_t ch_num,
1990 const uint8_t bands[])
1991 {
1992 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1993 uint32_t nflags;
1994 uint16_t ch_flags;
1995 uint8_t ieee;
1996 int error;
1997
1998 for (; ch_idx < ch_num; ch_idx++) {
1999 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2000 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2001 ieee = iwm_nvm_channels[ch_idx];
2002 else
2003 ieee = iwm_nvm_channels_8000[ch_idx];
2004
2005 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2006 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2007 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2008 ieee, ch_flags,
2009 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2010 "5.2" : "2.4");
2011 continue;
2012 }
2013
2014 nflags = iwm_eeprom_channel_flags(ch_flags);
2015 error = ieee80211_add_channel(chans, maxchans, nchans,
2016 ieee, 0, 0, nflags, bands);
2017 if (error != 0)
2018 break;
2019
2020 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2021 "Ch. %d Flags %x [%sGHz] - Added\n",
2022 ieee, ch_flags,
2023 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2024 "5.2" : "2.4");
2025 }
2026 }
2027
2028 static void
iwm_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])2029 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2030 struct ieee80211_channel chans[])
2031 {
2032 struct iwm_softc *sc = ic->ic_softc;
2033 struct iwm_nvm_data *data = sc->nvm_data;
2034 uint8_t bands[IEEE80211_MODE_BYTES];
2035 size_t ch_num;
2036
2037 memset(bands, 0, sizeof(bands));
2038 /* 1-13: 11b/g channels. */
2039 setbit(bands, IEEE80211_MODE_11B);
2040 setbit(bands, IEEE80211_MODE_11G);
2041 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2042 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2043
2044 /* 14: 11b channel only. */
2045 clrbit(bands, IEEE80211_MODE_11G);
2046 iwm_add_channel_band(sc, chans, maxchans, nchans,
2047 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2048
2049 if (data->sku_cap_band_52GHz_enable) {
2050 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2051 ch_num = nitems(iwm_nvm_channels);
2052 else
2053 ch_num = nitems(iwm_nvm_channels_8000);
2054 memset(bands, 0, sizeof(bands));
2055 setbit(bands, IEEE80211_MODE_11A);
2056 iwm_add_channel_band(sc, chans, maxchans, nchans,
2057 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2058 }
2059 }
2060
2061 static void
iwm_set_hw_address_family_8000(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * mac_override,const uint16_t * nvm_hw)2062 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2063 const uint16_t *mac_override, const uint16_t *nvm_hw)
2064 {
2065 const uint8_t *hw_addr;
2066
2067 if (mac_override) {
2068 static const uint8_t reserved_mac[] = {
2069 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2070 };
2071
2072 hw_addr = (const uint8_t *)(mac_override +
2073 IWM_MAC_ADDRESS_OVERRIDE_8000);
2074
2075 /*
2076 * Store the MAC address from MAO section.
2077 * No byte swapping is required in MAO section
2078 */
2079 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2080
2081 /*
2082 * Force the use of the OTP MAC address in case of reserved MAC
2083 * address in the NVM, or if address is given but invalid.
2084 */
2085 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2086 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2087 iwm_is_valid_ether_addr(data->hw_addr) &&
2088 !IEEE80211_IS_MULTICAST(data->hw_addr))
2089 return;
2090
2091 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2092 "%s: mac address from nvm override section invalid\n",
2093 __func__);
2094 }
2095
2096 if (nvm_hw) {
2097 /* read the mac address from WFMP registers */
2098 uint32_t mac_addr0 =
2099 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2100 uint32_t mac_addr1 =
2101 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2102
2103 hw_addr = (const uint8_t *)&mac_addr0;
2104 data->hw_addr[0] = hw_addr[3];
2105 data->hw_addr[1] = hw_addr[2];
2106 data->hw_addr[2] = hw_addr[1];
2107 data->hw_addr[3] = hw_addr[0];
2108
2109 hw_addr = (const uint8_t *)&mac_addr1;
2110 data->hw_addr[4] = hw_addr[1];
2111 data->hw_addr[5] = hw_addr[0];
2112
2113 return;
2114 }
2115
2116 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2117 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2118 }
2119
2120 static int
iwm_get_sku(const struct iwm_softc * sc,const uint16_t * nvm_sw,const uint16_t * phy_sku)2121 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2122 const uint16_t *phy_sku)
2123 {
2124 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2125 return le16_to_cpup(nvm_sw + IWM_SKU);
2126
2127 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2128 }
2129
2130 static int
iwm_get_nvm_version(const struct iwm_softc * sc,const uint16_t * nvm_sw)2131 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2132 {
2133 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2134 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2135 else
2136 return le32_to_cpup((const uint32_t *)(nvm_sw +
2137 IWM_NVM_VERSION_8000));
2138 }
2139
2140 static int
iwm_get_radio_cfg(const struct iwm_softc * sc,const uint16_t * nvm_sw,const uint16_t * phy_sku)2141 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2142 const uint16_t *phy_sku)
2143 {
2144 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2145 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2146
2147 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2148 }
2149
2150 static int
iwm_get_n_hw_addrs(const struct iwm_softc * sc,const uint16_t * nvm_sw)2151 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2152 {
2153 int n_hw_addr;
2154
2155 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000)
2156 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2157
2158 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2159
2160 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2161 }
2162
2163 static void
iwm_set_radio_cfg(const struct iwm_softc * sc,struct iwm_nvm_data * data,uint32_t radio_cfg)2164 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2165 uint32_t radio_cfg)
2166 {
2167 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2168 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2169 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2170 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2171 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2172 return;
2173 }
2174
2175 /* set the radio configuration for family 8000 */
2176 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2177 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2178 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2179 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2180 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2181 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2182 }
2183
2184 static int
iwm_set_hw_address(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * nvm_hw,const uint16_t * mac_override)2185 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2186 const uint16_t *nvm_hw, const uint16_t *mac_override)
2187 {
2188 #ifdef notyet /* for FAMILY 9000 */
2189 if (cfg->mac_addr_from_csr) {
2190 iwm_set_hw_address_from_csr(sc, data);
2191 } else
2192 #endif
2193 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2194 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2195
2196 /* The byte order is little endian 16 bit, meaning 214365 */
2197 data->hw_addr[0] = hw_addr[1];
2198 data->hw_addr[1] = hw_addr[0];
2199 data->hw_addr[2] = hw_addr[3];
2200 data->hw_addr[3] = hw_addr[2];
2201 data->hw_addr[4] = hw_addr[5];
2202 data->hw_addr[5] = hw_addr[4];
2203 } else {
2204 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2205 }
2206
2207 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2208 device_printf(sc->sc_dev, "no valid mac address was found\n");
2209 return EINVAL;
2210 }
2211
2212 return 0;
2213 }
2214
2215 static struct iwm_nvm_data *
iwm_parse_nvm_data(struct iwm_softc * sc,const uint16_t * nvm_hw,const uint16_t * nvm_sw,const uint16_t * nvm_calib,const uint16_t * mac_override,const uint16_t * phy_sku,const uint16_t * regulatory)2216 iwm_parse_nvm_data(struct iwm_softc *sc,
2217 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2218 const uint16_t *nvm_calib, const uint16_t *mac_override,
2219 const uint16_t *phy_sku, const uint16_t *regulatory)
2220 {
2221 struct iwm_nvm_data *data;
2222 uint32_t sku, radio_cfg;
2223 uint16_t lar_config;
2224
2225 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2226 data = kmalloc(sizeof(*data) +
2227 IWM_NUM_CHANNELS * sizeof(uint16_t),
2228 M_DEVBUF, M_WAITOK | M_ZERO);
2229 } else {
2230 data = kmalloc(sizeof(*data) +
2231 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2232 M_DEVBUF, M_WAITOK | M_ZERO);
2233 }
2234 if (!data)
2235 return NULL;
2236
2237 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2238
2239 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2240 iwm_set_radio_cfg(sc, data, radio_cfg);
2241
2242 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2243 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2244 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2245 data->sku_cap_11n_enable = 0;
2246
2247 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2248
2249 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2250 /* TODO: use IWL_NVM_EXT */
2251 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2252 IWM_NVM_LAR_OFFSET_8000_OLD :
2253 IWM_NVM_LAR_OFFSET_8000;
2254
2255 lar_config = le16_to_cpup(regulatory + lar_offset);
2256 data->lar_enabled = !!(lar_config &
2257 IWM_NVM_LAR_ENABLED_8000);
2258 }
2259
2260 /* If no valid mac address was found - bail out */
2261 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2262 kfree(data, M_DEVBUF);
2263 return NULL;
2264 }
2265
2266 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2267 memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2268 ®ulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2269 IWM_NUM_CHANNELS * sizeof(uint16_t));
2270 } else {
2271 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000],
2272 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2273 }
2274
2275 return data;
2276 }
2277
2278 static void
iwm_free_nvm_data(struct iwm_nvm_data * data)2279 iwm_free_nvm_data(struct iwm_nvm_data *data)
2280 {
2281 if (data != NULL)
2282 kfree(data, M_DEVBUF);
2283 }
2284
2285 static struct iwm_nvm_data *
iwm_parse_nvm_sections(struct iwm_softc * sc,struct iwm_nvm_section * sections)2286 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2287 {
2288 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2289
2290 /* Checking for required sections */
2291 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2292 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2293 !sections[sc->cfg->nvm_hw_section_num].data) {
2294 device_printf(sc->sc_dev,
2295 "Can't parse empty OTP/NVM sections\n");
2296 return NULL;
2297 }
2298 } else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2299 /* SW and REGULATORY sections are mandatory */
2300 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2301 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2302 device_printf(sc->sc_dev,
2303 "Can't parse empty OTP/NVM sections\n");
2304 return NULL;
2305 }
2306 /* MAC_OVERRIDE or at least HW section must exist */
2307 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2308 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2309 device_printf(sc->sc_dev,
2310 "Can't parse mac_address, empty sections\n");
2311 return NULL;
2312 }
2313
2314 /* PHY_SKU section is mandatory in B0 */
2315 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2316 device_printf(sc->sc_dev,
2317 "Can't parse phy_sku in B0, empty sections\n");
2318 return NULL;
2319 }
2320 } else {
2321 panic("unknown device family %d\n", sc->cfg->device_family);
2322 }
2323
2324 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2325 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2326 calib = (const uint16_t *)
2327 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2328 regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2329 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2330 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2331 mac_override = (const uint16_t *)
2332 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2333 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2334
2335 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2336 phy_sku, regulatory);
2337 }
2338
2339 static int
iwm_nvm_init(struct iwm_softc * sc)2340 iwm_nvm_init(struct iwm_softc *sc)
2341 {
2342 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2343 int i, ret, section;
2344 uint32_t size_read = 0;
2345 uint8_t *nvm_buffer, *temp;
2346 uint16_t len;
2347
2348 memset(nvm_sections, 0, sizeof(nvm_sections));
2349
2350 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_NUM_OF_SECTIONS)
2351 return EINVAL;
2352
2353 /* load NVM values from nic */
2354 /* Read From FW NVM */
2355 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2356
2357 nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF, M_WAITOK | M_ZERO);
2358 if (!nvm_buffer)
2359 return ENOMEM;
2360 for (section = 0; section < IWM_NVM_NUM_OF_SECTIONS; section++) {
2361 /* we override the constness for initial read */
2362 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2363 &len, size_read);
2364 if (ret)
2365 continue;
2366 size_read += len;
2367 temp = kmalloc(len, M_DEVBUF, M_WAITOK);
2368 if (!temp) {
2369 ret = ENOMEM;
2370 break;
2371 }
2372 memcpy(temp, nvm_buffer, len);
2373
2374 nvm_sections[section].data = temp;
2375 nvm_sections[section].length = len;
2376 }
2377 if (!size_read)
2378 device_printf(sc->sc_dev, "OTP is blank\n");
2379 kfree(nvm_buffer, M_DEVBUF);
2380
2381 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2382 if (!sc->nvm_data)
2383 return EINVAL;
2384 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2385 "nvm version = %x\n", sc->nvm_data->nvm_version);
2386
2387 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2388 if (nvm_sections[i].data != NULL)
2389 kfree(nvm_sections[i].data, M_DEVBUF);
2390 }
2391
2392 return 0;
2393 }
2394
2395 static int
iwm_pcie_load_section(struct iwm_softc * sc,uint8_t section_num,const struct iwm_fw_desc * section)2396 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2397 const struct iwm_fw_desc *section)
2398 {
2399 struct iwm_dma_info *dma = &sc->fw_dma;
2400 uint8_t *v_addr;
2401 bus_addr_t p_addr;
2402 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2403 int ret = 0;
2404
2405 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2406 "%s: [%d] uCode section being loaded...\n",
2407 __func__, section_num);
2408
2409 v_addr = dma->vaddr;
2410 p_addr = dma->paddr;
2411
2412 for (offset = 0; offset < section->len; offset += chunk_sz) {
2413 uint32_t copy_size, dst_addr;
2414 int extended_addr = FALSE;
2415
2416 copy_size = MIN(chunk_sz, section->len - offset);
2417 dst_addr = section->offset + offset;
2418
2419 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2420 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2421 extended_addr = TRUE;
2422
2423 if (extended_addr)
2424 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2425 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2426
2427 memcpy(v_addr, (const uint8_t *)section->data + offset,
2428 copy_size);
2429 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2430 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2431 copy_size);
2432
2433 if (extended_addr)
2434 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2435 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2436
2437 if (ret) {
2438 device_printf(sc->sc_dev,
2439 "%s: Could not load the [%d] uCode section\n",
2440 __func__, section_num);
2441 break;
2442 }
2443 }
2444
2445 return ret;
2446 }
2447
2448 /*
2449 * ucode
2450 */
2451 static int
iwm_pcie_load_firmware_chunk(struct iwm_softc * sc,uint32_t dst_addr,bus_addr_t phy_addr,uint32_t byte_cnt)2452 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2453 bus_addr_t phy_addr, uint32_t byte_cnt)
2454 {
2455 sc->sc_fw_chunk_done = 0;
2456
2457 if (!iwm_nic_lock(sc))
2458 return EBUSY;
2459
2460 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2461 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2462
2463 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2464 dst_addr);
2465
2466 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2467 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2468
2469 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2470 (iwm_get_dma_hi_addr(phy_addr)
2471 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2472
2473 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2474 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2475 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2476 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2477
2478 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2479 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2480 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2481 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2482
2483 iwm_nic_unlock(sc);
2484
2485 /* wait up to 5s for this segment to load */
2486 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz * 5);
2487
2488 if (!sc->sc_fw_chunk_done) {
2489 device_printf(sc->sc_dev,
2490 "fw chunk addr 0x%x len %d failed to load\n",
2491 dst_addr, byte_cnt);
2492 return ETIMEDOUT;
2493 }
2494
2495 return 0;
2496 }
2497
2498 static int
iwm_pcie_load_cpu_sections_8000(struct iwm_softc * sc,const struct iwm_fw_img * image,int cpu,int * first_ucode_section)2499 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2500 const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2501 {
2502 int shift_param;
2503 int i, ret = 0, sec_num = 0x1;
2504 uint32_t val, last_read_idx = 0;
2505
2506 if (cpu == 1) {
2507 shift_param = 0;
2508 *first_ucode_section = 0;
2509 } else {
2510 shift_param = 16;
2511 (*first_ucode_section)++;
2512 }
2513
2514 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2515 last_read_idx = i;
2516
2517 /*
2518 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2519 * CPU1 to CPU2.
2520 * PAGING_SEPARATOR_SECTION delimiter - separate between
2521 * CPU2 non paged to CPU2 paging sec.
2522 */
2523 if (!image->sec[i].data ||
2524 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2525 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2526 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2527 "Break since Data not valid or Empty section, sec = %d\n",
2528 i);
2529 break;
2530 }
2531 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2532 if (ret)
2533 return ret;
2534
2535 /* Notify the ucode of the loaded section number and status */
2536 if (iwm_nic_lock(sc)) {
2537 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2538 val = val | (sec_num << shift_param);
2539 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2540 sec_num = (sec_num << 1) | 0x1;
2541 iwm_nic_unlock(sc);
2542 }
2543 }
2544
2545 *first_ucode_section = last_read_idx;
2546
2547 iwm_enable_interrupts(sc);
2548
2549 if (iwm_nic_lock(sc)) {
2550 if (cpu == 1)
2551 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2552 else
2553 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2554 iwm_nic_unlock(sc);
2555 }
2556
2557 return 0;
2558 }
2559
2560 static int
iwm_pcie_load_cpu_sections(struct iwm_softc * sc,const struct iwm_fw_img * image,int cpu,int * first_ucode_section)2561 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2562 const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2563 {
2564 int i, ret = 0;
2565 uint32_t last_read_idx = 0;
2566
2567 if (cpu == 1) {
2568 *first_ucode_section = 0;
2569 } else {
2570 (*first_ucode_section)++;
2571 }
2572
2573 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2574 last_read_idx = i;
2575
2576 /*
2577 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2578 * CPU1 to CPU2.
2579 * PAGING_SEPARATOR_SECTION delimiter - separate between
2580 * CPU2 non paged to CPU2 paging sec.
2581 */
2582 if (!image->sec[i].data ||
2583 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2584 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2585 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2586 "Break since Data not valid or Empty section, sec = %d\n",
2587 i);
2588 break;
2589 }
2590
2591 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2592 if (ret)
2593 return ret;
2594 }
2595
2596 *first_ucode_section = last_read_idx;
2597
2598 return 0;
2599
2600 }
2601
2602 static int
iwm_pcie_load_given_ucode(struct iwm_softc * sc,const struct iwm_fw_img * image)2603 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2604 {
2605 int ret = 0;
2606 int first_ucode_section;
2607
2608 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2609 image->is_dual_cpus ? "Dual" : "Single");
2610
2611 /* load to FW the binary non secured sections of CPU1 */
2612 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2613 if (ret)
2614 return ret;
2615
2616 if (image->is_dual_cpus) {
2617 /* set CPU2 header address */
2618 if (iwm_nic_lock(sc)) {
2619 iwm_write_prph(sc,
2620 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2621 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2622 iwm_nic_unlock(sc);
2623 }
2624
2625 /* load to FW the binary sections of CPU2 */
2626 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2627 &first_ucode_section);
2628 if (ret)
2629 return ret;
2630 }
2631
2632 iwm_enable_interrupts(sc);
2633
2634 /* release CPU reset */
2635 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2636
2637 return 0;
2638 }
2639
2640 int
iwm_pcie_load_given_ucode_8000(struct iwm_softc * sc,const struct iwm_fw_img * image)2641 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2642 const struct iwm_fw_img *image)
2643 {
2644 int ret = 0;
2645 int first_ucode_section;
2646
2647 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2648 image->is_dual_cpus ? "Dual" : "Single");
2649
2650 /* configure the ucode to be ready to get the secured image */
2651 /* release CPU reset */
2652 if (iwm_nic_lock(sc)) {
2653 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2654 IWM_RELEASE_CPU_RESET_BIT);
2655 iwm_nic_unlock(sc);
2656 }
2657
2658 /* load to FW the binary Secured sections of CPU1 */
2659 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2660 &first_ucode_section);
2661 if (ret)
2662 return ret;
2663
2664 /* load to FW the binary sections of CPU2 */
2665 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2666 &first_ucode_section);
2667 }
2668
2669 /* XXX Get rid of this definition */
2670 static inline void
iwm_enable_fw_load_int(struct iwm_softc * sc)2671 iwm_enable_fw_load_int(struct iwm_softc *sc)
2672 {
2673 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2674 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2675 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2676 }
2677
2678 /* XXX Add proper rfkill support code */
2679 static int
iwm_start_fw(struct iwm_softc * sc,const struct iwm_fw_img * fw)2680 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2681 {
2682 int ret;
2683
2684 /* This may fail if AMT took ownership of the device */
2685 if (iwm_prepare_card_hw(sc)) {
2686 device_printf(sc->sc_dev,
2687 "%s: Exit HW not ready\n", __func__);
2688 ret = EIO;
2689 goto out;
2690 }
2691
2692 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2693
2694 iwm_disable_interrupts(sc);
2695
2696 /* make sure rfkill handshake bits are cleared */
2697 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2698 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2699 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2700
2701 /* clear (again), then enable host interrupts */
2702 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2703
2704 ret = iwm_nic_init(sc);
2705 if (ret) {
2706 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2707 goto out;
2708 }
2709
2710 /*
2711 * Now, we load the firmware and don't want to be interrupted, even
2712 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2713 * FH_TX interrupt which is needed to load the firmware). If the
2714 * RF-Kill switch is toggled, we will find out after having loaded
2715 * the firmware and return the proper value to the caller.
2716 */
2717 iwm_enable_fw_load_int(sc);
2718
2719 /* really make sure rfkill handshake bits are cleared */
2720 /* maybe we should write a few times more? just to make sure */
2721 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2722 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2723
2724 /* Load the given image to the HW */
2725 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
2726 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2727 else
2728 ret = iwm_pcie_load_given_ucode(sc, fw);
2729
2730 /* XXX re-check RF-Kill state */
2731
2732 out:
2733 return ret;
2734 }
2735
2736 static int
iwm_send_tx_ant_cfg(struct iwm_softc * sc,uint8_t valid_tx_ant)2737 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2738 {
2739 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2740 .valid = htole32(valid_tx_ant),
2741 };
2742
2743 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2744 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2745 }
2746
2747 /* iwlwifi: mvm/fw.c */
2748 static int
iwm_send_phy_cfg_cmd(struct iwm_softc * sc)2749 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2750 {
2751 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2752 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2753
2754 /* Set parameters */
2755 phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2756 phy_cfg_cmd.calib_control.event_trigger =
2757 sc->sc_default_calib[ucode_type].event_trigger;
2758 phy_cfg_cmd.calib_control.flow_trigger =
2759 sc->sc_default_calib[ucode_type].flow_trigger;
2760
2761 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2762 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2763 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2764 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2765 }
2766
2767 static int
iwm_alive_fn(struct iwm_softc * sc,struct iwm_rx_packet * pkt,void * data)2768 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2769 {
2770 struct iwm_alive_data *alive_data = data;
2771 struct iwm_alive_resp_v3 *palive3;
2772 struct iwm_alive_resp *palive;
2773 struct iwm_umac_alive *umac;
2774 struct iwm_lmac_alive *lmac1;
2775 struct iwm_lmac_alive *lmac2 = NULL;
2776 uint16_t status;
2777
2778 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2779 palive = (void *)pkt->data;
2780 umac = &palive->umac_data;
2781 lmac1 = &palive->lmac_data[0];
2782 lmac2 = &palive->lmac_data[1];
2783 status = le16toh(palive->status);
2784 } else {
2785 palive3 = (void *)pkt->data;
2786 umac = &palive3->umac_data;
2787 lmac1 = &palive3->lmac_data;
2788 status = le16toh(palive3->status);
2789 }
2790
2791 sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2792 if (lmac2)
2793 sc->error_event_table[1] =
2794 le32toh(lmac2->error_event_table_ptr);
2795 sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2796 sc->umac_error_event_table = le32toh(umac->error_info_addr);
2797 alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2798 alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2799 if (sc->umac_error_event_table)
2800 sc->support_umac_log = TRUE;
2801
2802 IWM_DPRINTF(sc, IWM_DEBUG_FW,
2803 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2804 status, lmac1->ver_type, lmac1->ver_subtype);
2805
2806 if (lmac2)
2807 IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2808
2809 IWM_DPRINTF(sc, IWM_DEBUG_FW,
2810 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2811 le32toh(umac->umac_major),
2812 le32toh(umac->umac_minor));
2813
2814 return TRUE;
2815 }
2816
2817 static int
iwm_wait_phy_db_entry(struct iwm_softc * sc,struct iwm_rx_packet * pkt,void * data)2818 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2819 struct iwm_rx_packet *pkt, void *data)
2820 {
2821 struct iwm_phy_db *phy_db = data;
2822
2823 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2824 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2825 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2826 __func__, pkt->hdr.code);
2827 }
2828 return TRUE;
2829 }
2830
2831 if (iwm_phy_db_set_section(phy_db, pkt)) {
2832 device_printf(sc->sc_dev,
2833 "%s: iwm_phy_db_set_section failed\n", __func__);
2834 }
2835
2836 return FALSE;
2837 }
2838
2839 static int
iwm_load_ucode_wait_alive(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)2840 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
2841 enum iwm_ucode_type ucode_type)
2842 {
2843 struct iwm_notification_wait alive_wait;
2844 struct iwm_alive_data alive_data;
2845 const struct iwm_fw_img *fw;
2846 enum iwm_ucode_type old_type = sc->cur_ucode;
2847 int error;
2848 static const uint16_t alive_cmd[] = { IWM_ALIVE };
2849
2850 fw = &sc->sc_fw.img[ucode_type];
2851 sc->cur_ucode = ucode_type;
2852 sc->ucode_loaded = FALSE;
2853
2854 memset(&alive_data, 0, sizeof(alive_data));
2855 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2856 alive_cmd, nitems(alive_cmd),
2857 iwm_alive_fn, &alive_data);
2858
2859 error = iwm_start_fw(sc, fw);
2860 if (error) {
2861 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2862 sc->cur_ucode = old_type;
2863 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2864 return error;
2865 }
2866
2867 /*
2868 * Some things may run in the background now, but we
2869 * just wait for the ALIVE notification here.
2870 */
2871 IWM_UNLOCK(sc);
2872 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2873 IWM_UCODE_ALIVE_TIMEOUT);
2874 IWM_LOCK(sc);
2875 if (error) {
2876 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2877 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2878 if (iwm_nic_lock(sc)) {
2879 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2880 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2881 iwm_nic_unlock(sc);
2882 }
2883 device_printf(sc->sc_dev,
2884 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2885 a, b);
2886 }
2887 sc->cur_ucode = old_type;
2888 return error;
2889 }
2890
2891 if (!alive_data.valid) {
2892 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2893 __func__);
2894 sc->cur_ucode = old_type;
2895 return EIO;
2896 }
2897
2898 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2899
2900 /*
2901 * configure and operate fw paging mechanism.
2902 * driver configures the paging flow only once, CPU2 paging image
2903 * included in the IWM_UCODE_INIT image.
2904 */
2905 if (fw->paging_mem_size) {
2906 error = iwm_save_fw_paging(sc, fw);
2907 if (error) {
2908 device_printf(sc->sc_dev,
2909 "%s: failed to save the FW paging image\n",
2910 __func__);
2911 return error;
2912 }
2913
2914 error = iwm_send_paging_cmd(sc, fw);
2915 if (error) {
2916 device_printf(sc->sc_dev,
2917 "%s: failed to send the paging cmd\n", __func__);
2918 iwm_free_fw_paging(sc);
2919 return error;
2920 }
2921 }
2922
2923 if (!error)
2924 sc->ucode_loaded = TRUE;
2925 return error;
2926 }
2927
2928 /*
2929 * mvm misc bits
2930 */
2931
2932 /*
2933 * follows iwlwifi/fw.c
2934 */
2935 static int
iwm_run_init_ucode(struct iwm_softc * sc,int justnvm)2936 iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2937 {
2938 struct iwm_notification_wait calib_wait;
2939 static const uint16_t init_complete[] = {
2940 IWM_INIT_COMPLETE_NOTIF,
2941 IWM_CALIB_RES_NOTIF_PHY_DB
2942 };
2943 int ret;
2944
2945 /* do not operate with rfkill switch turned on */
2946 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2947 device_printf(sc->sc_dev,
2948 "radio is disabled by hardware switch\n");
2949 return EPERM;
2950 }
2951
2952 iwm_init_notification_wait(sc->sc_notif_wait,
2953 &calib_wait,
2954 init_complete,
2955 nitems(init_complete),
2956 iwm_wait_phy_db_entry,
2957 sc->sc_phy_db);
2958
2959 /* Will also start the device */
2960 ret = iwm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2961 if (ret) {
2962 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2963 ret);
2964 goto error;
2965 }
2966
2967 if (sc->cfg->device_family < IWM_DEVICE_FAMILY_8000) {
2968 ret = iwm_send_bt_init_conf(sc);
2969 if (ret) {
2970 device_printf(sc->sc_dev,
2971 "failed to send bt coex configuration: %d\n", ret);
2972 goto error;
2973 }
2974 }
2975
2976 if (justnvm) {
2977 /* Read nvm */
2978 ret = iwm_nvm_init(sc);
2979 if (ret) {
2980 device_printf(sc->sc_dev, "failed to read nvm\n");
2981 goto error;
2982 }
2983 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2984 goto error;
2985 }
2986
2987 /* Send TX valid antennas before triggering calibrations */
2988 ret = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
2989 if (ret) {
2990 device_printf(sc->sc_dev,
2991 "failed to send antennas before calibration: %d\n", ret);
2992 goto error;
2993 }
2994
2995 /*
2996 * Send phy configurations command to init uCode
2997 * to start the 16.0 uCode init image internal calibrations.
2998 */
2999 ret = iwm_send_phy_cfg_cmd(sc);
3000 if (ret) {
3001 device_printf(sc->sc_dev,
3002 "%s: Failed to run INIT calibrations: %d\n",
3003 __func__, ret);
3004 goto error;
3005 }
3006
3007 /*
3008 * Nothing to do but wait for the init complete notification
3009 * from the firmware.
3010 */
3011 IWM_UNLOCK(sc);
3012 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3013 IWM_UCODE_CALIB_TIMEOUT);
3014 IWM_LOCK(sc);
3015
3016
3017 goto out;
3018
3019 error:
3020 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3021 out:
3022 return ret;
3023 }
3024
3025 static int
iwm_config_ltr(struct iwm_softc * sc)3026 iwm_config_ltr(struct iwm_softc *sc)
3027 {
3028 struct iwm_ltr_config_cmd cmd = {
3029 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3030 };
3031
3032 if (!sc->sc_ltr_enabled)
3033 return 0;
3034
3035 return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3036 }
3037
3038 /*
3039 * receive side
3040 */
3041
3042 /* (re)stock rx ring, called at init-time and at runtime */
3043 static int
iwm_rx_addbuf(struct iwm_softc * sc,int size,int idx)3044 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3045 {
3046 struct iwm_rx_ring *ring = &sc->rxq;
3047 struct iwm_rx_data *data = &ring->data[idx];
3048 struct mbuf *m;
3049 bus_dmamap_t dmamap;
3050 bus_dma_segment_t seg;
3051 int nsegs, error;
3052
3053 m = m_getjcl(M_WAITOK, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3054 if (m == NULL)
3055 return ENOBUFS;
3056
3057 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3058 #if defined(__DragonFly__)
3059 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3060 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3061 #else
3062 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3063 &seg, &nsegs, BUS_DMA_NOWAIT);
3064 #endif
3065 if (error != 0) {
3066 device_printf(sc->sc_dev,
3067 "%s: can't map mbuf, error %d\n", __func__, error);
3068 m_freem(m);
3069 return error;
3070 }
3071
3072 if (data->m != NULL)
3073 bus_dmamap_unload(ring->data_dmat, data->map);
3074
3075 /* Swap ring->spare_map with data->map */
3076 dmamap = data->map;
3077 data->map = ring->spare_map;
3078 ring->spare_map = dmamap;
3079
3080 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3081 data->m = m;
3082
3083 /* Update RX descriptor. */
3084 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3085 if (sc->cfg->mqrx_supported)
3086 ((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3087 else
3088 ((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3089 bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3090 BUS_DMASYNC_PREWRITE);
3091
3092 return 0;
3093 }
3094
3095 static void
iwm_rx_rx_phy_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3096 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3097 {
3098 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3099
3100 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3101
3102 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3103 }
3104
3105 /*
3106 * Retrieve the average noise (in dBm) among receivers.
3107 */
3108 static int
iwm_get_noise(struct iwm_softc * sc,const struct iwm_statistics_rx_non_phy * stats)3109 iwm_get_noise(struct iwm_softc *sc,
3110 const struct iwm_statistics_rx_non_phy *stats)
3111 {
3112 int i, noise;
3113 #ifdef IWM_DEBUG
3114 int nbant, total;
3115 #else
3116 int nbant __unused, total __unused;
3117 #endif
3118
3119 total = nbant = noise = 0;
3120 for (i = 0; i < 3; i++) {
3121 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3122 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3123 __func__,
3124 i,
3125 noise);
3126
3127 if (noise) {
3128 total += noise;
3129 nbant++;
3130 }
3131 }
3132
3133 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3134 __func__, nbant, total);
3135 #if 0
3136 /* There should be at least one antenna but check anyway. */
3137 return (nbant == 0) ? -127 : (total / nbant) - 107;
3138 #else
3139 /* For now, just hard-code it to -96 to be safe */
3140 return (-96);
3141 #endif
3142 }
3143
3144 static void
iwm_handle_rx_statistics(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3145 iwm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3146 {
3147 struct iwm_notif_statistics *stats = (void *)&pkt->data;
3148
3149 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3150 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3151 }
3152
3153 /* iwlwifi: mvm/rx.c */
3154 /*
3155 * iwm_get_signal_strength - use new rx PHY INFO API
3156 * values are reported by the fw as positive values - need to negate
3157 * to obtain their dBM. Account for missing antennas by replacing 0
3158 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3159 */
3160 static int
iwm_rx_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)3161 iwm_rx_get_signal_strength(struct iwm_softc *sc,
3162 struct iwm_rx_phy_info *phy_info)
3163 {
3164 int energy_a, energy_b, energy_c, max_energy;
3165 uint32_t val;
3166
3167 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3168 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3169 IWM_RX_INFO_ENERGY_ANT_A_POS;
3170 energy_a = energy_a ? -energy_a : -256;
3171 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3172 IWM_RX_INFO_ENERGY_ANT_B_POS;
3173 energy_b = energy_b ? -energy_b : -256;
3174 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3175 IWM_RX_INFO_ENERGY_ANT_C_POS;
3176 energy_c = energy_c ? -energy_c : -256;
3177 max_energy = MAX(energy_a, energy_b);
3178 max_energy = MAX(max_energy, energy_c);
3179
3180 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3181 "energy In A %d B %d C %d , and max %d\n",
3182 energy_a, energy_b, energy_c, max_energy);
3183
3184 return max_energy;
3185 }
3186
3187 static int
iwm_rxmq_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_mpdu_desc * desc)3188 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
3189 struct iwm_rx_mpdu_desc *desc)
3190 {
3191 int energy_a, energy_b;
3192
3193 energy_a = desc->v1.energy_a;
3194 energy_b = desc->v1.energy_b;
3195 energy_a = energy_a ? -energy_a : -256;
3196 energy_b = energy_b ? -energy_b : -256;
3197 return MAX(energy_a, energy_b);
3198 }
3199
3200 /*
3201 * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3202 *
3203 * Handles the actual data of the Rx packet from the fw
3204 */
3205 static bool
iwm_rx_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3206 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3207 bool stolen)
3208 {
3209 struct ieee80211com *ic = &sc->sc_ic;
3210 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3211 struct ieee80211_rx_stats rxs;
3212 struct iwm_rx_phy_info *phy_info;
3213 struct iwm_rx_mpdu_res_start *rx_res;
3214 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3215 uint32_t len;
3216 uint32_t rx_pkt_status;
3217 int rssi;
3218
3219 phy_info = &sc->sc_last_phy_info;
3220 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3221 len = le16toh(rx_res->byte_count);
3222 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3223
3224 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3225 device_printf(sc->sc_dev,
3226 "dsp size out of range [0,20]: %d\n",
3227 phy_info->cfg_phy_cnt);
3228 return false;
3229 }
3230
3231 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3232 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3233 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3234 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3235 return false;
3236 }
3237
3238 rssi = iwm_rx_get_signal_strength(sc, phy_info);
3239
3240 /* Map it to relative value */
3241 rssi = rssi - sc->sc_noise;
3242
3243 /* replenish ring for the buffer we're going to feed to the sharks */
3244 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3245 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3246 __func__);
3247 return false;
3248 }
3249
3250 m->m_data = pkt->data + sizeof(*rx_res);
3251 m->m_pkthdr.len = m->m_len = len;
3252
3253 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3254 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3255
3256 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3257 "%s: phy_info: channel=%d, flags=0x%08x\n",
3258 __func__,
3259 le16toh(phy_info->channel),
3260 le16toh(phy_info->phy_flags));
3261
3262 /*
3263 * Populate an RX state struct with the provided information.
3264 */
3265 bzero(&rxs, sizeof(rxs));
3266 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3267 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3268 rxs.c_ieee = le16toh(phy_info->channel);
3269 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3270 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3271 } else {
3272 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3273 }
3274
3275 /* rssi is in 1/2db units */
3276 rxs.c_rssi = rssi * 2;
3277 rxs.c_nf = sc->sc_noise;
3278 if (ieee80211_add_rx_params(m, &rxs) == 0)
3279 return false;
3280
3281 if (ieee80211_radiotap_active_vap(vap)) {
3282 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3283
3284 tap->wr_flags = 0;
3285 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3286 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3287 tap->wr_chan_freq = htole16(rxs.c_freq);
3288 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3289 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3290 tap->wr_dbm_antsignal = (int8_t)rssi;
3291 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3292 tap->wr_tsft = phy_info->system_timestamp;
3293 switch (phy_info->rate) {
3294 /* CCK rates. */
3295 case 10: tap->wr_rate = 2; break;
3296 case 20: tap->wr_rate = 4; break;
3297 case 55: tap->wr_rate = 11; break;
3298 case 110: tap->wr_rate = 22; break;
3299 /* OFDM rates. */
3300 case 0xd: tap->wr_rate = 12; break;
3301 case 0xf: tap->wr_rate = 18; break;
3302 case 0x5: tap->wr_rate = 24; break;
3303 case 0x7: tap->wr_rate = 36; break;
3304 case 0x9: tap->wr_rate = 48; break;
3305 case 0xb: tap->wr_rate = 72; break;
3306 case 0x1: tap->wr_rate = 96; break;
3307 case 0x3: tap->wr_rate = 108; break;
3308 /* Unknown rate: should not happen. */
3309 default: tap->wr_rate = 0;
3310 }
3311 }
3312
3313 return true;
3314 }
3315
3316 static bool
iwm_rx_mpdu_mq(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3317 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3318 bool stolen)
3319 {
3320 struct ieee80211com *ic = &sc->sc_ic;
3321 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3322 struct ieee80211_frame *wh;
3323 struct ieee80211_rx_stats rxs;
3324 struct iwm_rx_mpdu_desc *desc;
3325 struct iwm_rx_packet *pkt;
3326 int rssi;
3327 uint32_t hdrlen, len, rate_n_flags;
3328 uint16_t phy_info;
3329 uint8_t channel;
3330
3331 pkt = mtodo(m, offset);
3332 desc = (void *)pkt->data;
3333
3334 if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3335 !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3336 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3337 "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3338 return false;
3339 }
3340
3341 channel = desc->v1.channel;
3342 len = le16toh(desc->mpdu_len);
3343 phy_info = le16toh(desc->phy_info);
3344 rate_n_flags = desc->v1.rate_n_flags;
3345
3346 wh = mtodo(m, sizeof(*desc));
3347 m->m_data = pkt->data + sizeof(*desc);
3348 m->m_pkthdr.len = m->m_len = len;
3349 m->m_len = len;
3350
3351 /* Account for padding following the frame header. */
3352 if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3353 hdrlen = ieee80211_anyhdrsize(wh);
3354 memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3355 m->m_data = mtodo(m, 2);
3356 wh = mtod(m, struct ieee80211_frame *);
3357 }
3358
3359 /* Map it to relative value */
3360 rssi = iwm_rxmq_get_signal_strength(sc, desc);
3361 rssi = rssi - sc->sc_noise;
3362
3363 /* replenish ring for the buffer we're going to feed to the sharks */
3364 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3365 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3366 __func__);
3367 return false;
3368 }
3369
3370 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3371 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3372
3373 /*
3374 * Populate an RX state struct with the provided information.
3375 */
3376 bzero(&rxs, sizeof(rxs));
3377 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3378 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3379 rxs.c_ieee = channel;
3380 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3381 channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3382
3383 /* rssi is in 1/2db units */
3384 rxs.c_rssi = rssi * 2;
3385 rxs.c_nf = sc->sc_noise;
3386 if (ieee80211_add_rx_params(m, &rxs) == 0)
3387 return false;
3388
3389 if (ieee80211_radiotap_active_vap(vap)) {
3390 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3391
3392 tap->wr_flags = 0;
3393 if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3394 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3395 tap->wr_chan_freq = htole16(rxs.c_freq);
3396 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3397 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3398 tap->wr_dbm_antsignal = (int8_t)rssi;
3399 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3400 tap->wr_tsft = desc->v1.gp2_on_air_rise;
3401 switch ((rate_n_flags & 0xff)) {
3402 /* CCK rates. */
3403 case 10: tap->wr_rate = 2; break;
3404 case 20: tap->wr_rate = 4; break;
3405 case 55: tap->wr_rate = 11; break;
3406 case 110: tap->wr_rate = 22; break;
3407 /* OFDM rates. */
3408 case 0xd: tap->wr_rate = 12; break;
3409 case 0xf: tap->wr_rate = 18; break;
3410 case 0x5: tap->wr_rate = 24; break;
3411 case 0x7: tap->wr_rate = 36; break;
3412 case 0x9: tap->wr_rate = 48; break;
3413 case 0xb: tap->wr_rate = 72; break;
3414 case 0x1: tap->wr_rate = 96; break;
3415 case 0x3: tap->wr_rate = 108; break;
3416 /* Unknown rate: should not happen. */
3417 default: tap->wr_rate = 0;
3418 }
3419 }
3420
3421 return true;
3422 }
3423
3424 static bool
iwm_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,uint32_t offset,bool stolen)3425 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3426 bool stolen)
3427 {
3428 struct ieee80211com *ic;
3429 struct ieee80211_frame *wh;
3430 struct ieee80211_node *ni;
3431 bool ret;
3432
3433 ic = &sc->sc_ic;
3434
3435 ret = sc->cfg->mqrx_supported ?
3436 iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3437 iwm_rx_rx_mpdu(sc, m, offset, stolen);
3438 if (!ret) {
3439 #if !defined(__DragonFly__)
3440 counter_u64_add(ic->ic_ierrors, 1);
3441 #else
3442 ++sc->sc_ic.ic_ierrors;
3443 #endif
3444 return (ret);
3445 }
3446
3447 wh = mtod(m, struct ieee80211_frame *);
3448 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3449
3450 IWM_UNLOCK(sc);
3451 if (ni != NULL) {
3452 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3453 #if !defined(__DragonFly__)
3454 ieee80211_input_mimo(ni, m);
3455 #else
3456 ieee80211_input_mimo(ni, m, NULL);
3457 #endif
3458 ieee80211_free_node(ni);
3459 } else {
3460 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3461 #if !defined(__DragonFly__)
3462 ieee80211_input_mimo_all(ic, m);
3463 #else
3464 ieee80211_input_mimo_all(ic, m, NULL);
3465 #endif
3466 }
3467 IWM_LOCK(sc);
3468
3469 return true;
3470 }
3471
3472 static int
iwm_rx_tx_cmd_single(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_node * in)3473 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3474 struct iwm_node *in)
3475 {
3476 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3477 #if !defined(__DragonFly__)
3478 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3479 #endif
3480 struct ieee80211_node *ni = &in->in_ni;
3481 struct ieee80211vap *vap = ni->ni_vap;
3482 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3483 int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3484 boolean_t rate_matched;
3485 uint8_t tx_resp_rate;
3486
3487 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3488
3489 /* Update rate control statistics. */
3490 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3491 __func__,
3492 (int) le16toh(tx_resp->status.status),
3493 (int) le16toh(tx_resp->status.sequence),
3494 tx_resp->frame_count,
3495 tx_resp->bt_kill_count,
3496 tx_resp->failure_rts,
3497 tx_resp->failure_frame,
3498 le32toh(tx_resp->initial_rate),
3499 (int) le16toh(tx_resp->wireless_media_time));
3500
3501 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3502
3503 /* For rate control, ignore frames sent at different initial rate */
3504 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3505
3506 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3507 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3508 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3509 "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3510 }
3511
3512 #if !defined(__DragonFly__)
3513 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3514 IEEE80211_RATECTL_STATUS_LONG_RETRY;
3515 txs->short_retries = tx_resp->failure_rts;
3516 txs->long_retries = tx_resp->failure_frame;
3517 if (status != IWM_TX_STATUS_SUCCESS &&
3518 status != IWM_TX_STATUS_DIRECT_DONE) {
3519 switch (status) {
3520 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3521 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3522 break;
3523 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3524 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3525 break;
3526 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3527 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3528 break;
3529 default:
3530 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3531 break;
3532 }
3533 } else {
3534 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3535 }
3536
3537 if (rate_matched) {
3538 ieee80211_ratectl_tx_complete(ni, txs);
3539
3540 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3541 new_rate = vap->iv_bss->ni_txrate;
3542 if (new_rate != 0 && new_rate != cur_rate) {
3543 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3544 iwm_setrates(sc, in, rix);
3545 iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3546 }
3547 }
3548
3549 return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3550 #else
3551 /*
3552 * XXX try to use old ieee80211 ABI, the new one isn't incorporated
3553 * into our ieee80211 yet.
3554 */
3555 int failack = tx_resp->failure_frame;
3556 int ret;
3557
3558 if (status != IWM_TX_STATUS_SUCCESS &&
3559 status != IWM_TX_STATUS_DIRECT_DONE) {
3560 if (rate_matched) {
3561 ieee80211_ratectl_tx_complete(vap, ni,
3562 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3563 }
3564 ret = 1;
3565 } else {
3566 if (rate_matched) {
3567 ieee80211_ratectl_tx_complete(vap, ni,
3568 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3569 }
3570 ret = 0;
3571 }
3572
3573 if (rate_matched) {
3574 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3575 new_rate = vap->iv_bss->ni_txrate;
3576 if (new_rate != 0 && new_rate != cur_rate) {
3577 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3578 iwm_setrates(sc, in, rix);
3579 }
3580 }
3581
3582 return ret;
3583
3584 #endif
3585 }
3586
3587 static void
iwm_rx_tx_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3588 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3589 {
3590 struct iwm_cmd_header *cmd_hdr;
3591 struct iwm_tx_ring *ring;
3592 struct iwm_tx_data *txd;
3593 struct iwm_node *in;
3594 struct mbuf *m;
3595 int idx, qid, qmsk, status;
3596
3597 cmd_hdr = &pkt->hdr;
3598 idx = cmd_hdr->idx;
3599 qid = cmd_hdr->qid;
3600
3601 ring = &sc->txq[qid];
3602 txd = &ring->data[idx];
3603 in = txd->in;
3604 m = txd->m;
3605
3606 KASSERT(txd->done == 0, ("txd not done"));
3607 KASSERT(txd->in != NULL, ("txd without node"));
3608 KASSERT(txd->m != NULL, ("txd without mbuf"));
3609
3610 sc->sc_tx_timer = 0;
3611
3612 status = iwm_rx_tx_cmd_single(sc, pkt, in);
3613
3614 /* Unmap and free mbuf. */
3615 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3616 bus_dmamap_unload(ring->data_dmat, txd->map);
3617
3618 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3619 "free txd %p, in %p\n", txd, txd->in);
3620 txd->done = 1;
3621 txd->m = NULL;
3622 txd->in = NULL;
3623
3624 ieee80211_tx_complete(&in->in_ni, m, status);
3625
3626 qmsk = 1 << qid;
3627 if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3628 sc->qfullmsk &= ~qmsk;
3629 if (sc->qfullmsk == 0)
3630 iwm_start(sc);
3631 }
3632 }
3633
3634 /*
3635 * transmit side
3636 */
3637
3638 /*
3639 * Process a "command done" firmware notification. This is where we wakeup
3640 * processes waiting for a synchronous command completion.
3641 * from if_iwn
3642 */
3643 static void
iwm_cmd_done(struct iwm_softc * sc,struct iwm_rx_packet * pkt)3644 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3645 {
3646 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3647 struct iwm_tx_data *data;
3648
3649 if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3650 return; /* Not a command ack. */
3651 }
3652
3653 /* XXX wide commands? */
3654 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3655 "cmd notification type 0x%x qid %d idx %d\n",
3656 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3657
3658 data = &ring->data[pkt->hdr.idx];
3659
3660 /* If the command was mapped in an mbuf, free it. */
3661 if (data->m != NULL) {
3662 bus_dmamap_sync(ring->data_dmat, data->map,
3663 BUS_DMASYNC_POSTWRITE);
3664 bus_dmamap_unload(ring->data_dmat, data->map);
3665 m_freem(data->m);
3666 data->m = NULL;
3667 }
3668 wakeup(&ring->desc[pkt->hdr.idx]);
3669
3670 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3671 device_printf(sc->sc_dev,
3672 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3673 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3674 /* XXX call iwm_force_nmi() */
3675 }
3676
3677 KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3678 ring->queued--;
3679 if (ring->queued == 0)
3680 iwm_pcie_clear_cmd_in_flight(sc);
3681 }
3682
3683 #if 0
3684 /*
3685 * necessary only for block ack mode
3686 */
3687 void
3688 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3689 uint16_t len)
3690 {
3691 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3692 uint16_t w_val;
3693
3694 scd_bc_tbl = sc->sched_dma.vaddr;
3695
3696 len += 8; /* magic numbers came naturally from paris */
3697 len = roundup(len, 4) / 4;
3698
3699 w_val = htole16(sta_id << 12 | len);
3700
3701 /* Update TX scheduler. */
3702 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3703 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3704 BUS_DMASYNC_PREWRITE);
3705
3706 /* I really wonder what this is ?!? */
3707 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3708 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3709 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3710 BUS_DMASYNC_PREWRITE);
3711 }
3712 }
3713 #endif
3714
3715 static int
iwm_tx_rateidx_global_lookup(struct iwm_softc * sc,uint8_t rate)3716 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3717 {
3718 int i;
3719
3720 for (i = 0; i < nitems(iwm_rates); i++) {
3721 if (iwm_rates[i].rate == rate)
3722 return (i);
3723 }
3724 /* XXX error? */
3725 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3726 "%s: couldn't find an entry for rate=%d\n",
3727 __func__,
3728 rate);
3729 return (0);
3730 }
3731
3732 /*
3733 * Fill in the rate related information for a transmit command.
3734 */
3735 static const struct iwm_rate *
iwm_tx_fill_cmd(struct iwm_softc * sc,struct iwm_node * in,struct mbuf * m,struct iwm_tx_cmd * tx)3736 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3737 struct mbuf *m, struct iwm_tx_cmd *tx)
3738 {
3739 struct ieee80211_node *ni = &in->in_ni;
3740 struct ieee80211_frame *wh;
3741 const struct ieee80211_txparam *tp = ni->ni_txparms;
3742 const struct iwm_rate *rinfo;
3743 int type;
3744 int ridx, rate_flags;
3745
3746 wh = mtod(m, struct ieee80211_frame *);
3747 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3748
3749 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3750 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3751
3752 if (type == IEEE80211_FC0_TYPE_MGT ||
3753 type == IEEE80211_FC0_TYPE_CTL ||
3754 (m->m_flags & M_EAPOL) != 0) {
3755 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3756 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3757 "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3758 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3759 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3760 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3761 "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3762 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3763 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3764 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3765 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3766 } else {
3767 /* for data frames, use RS table */
3768 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3769 ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3770 if (ridx == -1)
3771 ridx = 0;
3772
3773 /* This is the index into the programmed table */
3774 tx->initial_rate_index = 0;
3775 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3776 }
3777
3778 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3779 "%s: frame type=%d txrate %d\n",
3780 __func__, type, iwm_rates[ridx].rate);
3781
3782 rinfo = &iwm_rates[ridx];
3783
3784 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3785 __func__, ridx,
3786 rinfo->rate,
3787 !! (IWM_RIDX_IS_CCK(ridx))
3788 );
3789
3790 /* XXX TODO: hard-coded TX antenna? */
3791 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_9000)
3792 rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3793 else
3794 rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3795 if (IWM_RIDX_IS_CCK(ridx))
3796 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3797 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3798
3799 return rinfo;
3800 }
3801
3802 #define TB0_SIZE 16
3803 static int
iwm_tx(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,int ac)3804 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3805 {
3806 struct ieee80211com *ic = &sc->sc_ic;
3807 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3808 struct iwm_node *in = IWM_NODE(ni);
3809 struct iwm_tx_ring *ring;
3810 struct iwm_tx_data *data;
3811 struct iwm_tfd *desc;
3812 struct iwm_device_cmd *cmd;
3813 struct iwm_tx_cmd *tx;
3814 struct ieee80211_frame *wh;
3815 struct ieee80211_key *k = NULL;
3816 struct mbuf *m1;
3817 const struct iwm_rate *rinfo;
3818 uint32_t flags;
3819 u_int hdrlen;
3820 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3821 int nsegs;
3822 uint8_t tid, type;
3823 int i, totlen, error, pad;
3824
3825 wh = mtod(m, struct ieee80211_frame *);
3826 hdrlen = ieee80211_anyhdrsize(wh);
3827 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3828 tid = 0;
3829 ring = &sc->txq[ac];
3830 desc = &ring->desc[ring->cur];
3831 data = &ring->data[ring->cur];
3832
3833 /* Fill out iwm_tx_cmd to send to the firmware */
3834 cmd = &ring->cmd[ring->cur];
3835 cmd->hdr.code = IWM_TX_CMD;
3836 cmd->hdr.flags = 0;
3837 cmd->hdr.qid = ring->qid;
3838 cmd->hdr.idx = ring->cur;
3839
3840 tx = (void *)cmd->data;
3841 memset(tx, 0, sizeof(*tx));
3842
3843 rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3844
3845 /* Encrypt the frame if need be. */
3846 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3847 /* Retrieve key for TX && do software encryption. */
3848 k = ieee80211_crypto_encap(ni, m);
3849 if (k == NULL) {
3850 m_freem(m);
3851 return (ENOBUFS);
3852 }
3853 /* 802.11 header may have moved. */
3854 wh = mtod(m, struct ieee80211_frame *);
3855 }
3856
3857 if (ieee80211_radiotap_active_vap(vap)) {
3858 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3859
3860 tap->wt_flags = 0;
3861 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3862 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3863 tap->wt_rate = rinfo->rate;
3864 if (k != NULL)
3865 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3866 ieee80211_radiotap_tx(vap, m);
3867 }
3868
3869 flags = 0;
3870 totlen = m->m_pkthdr.len;
3871 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3872 flags |= IWM_TX_CMD_FLG_ACK;
3873 }
3874
3875 if (type == IEEE80211_FC0_TYPE_DATA &&
3876 totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3877 !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3878 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3879 }
3880
3881 tx->sta_id = IWM_STATION_ID;
3882
3883 if (type == IEEE80211_FC0_TYPE_MGT) {
3884 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3885
3886 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3887 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3888 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3889 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3890 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3891 } else {
3892 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3893 }
3894 } else {
3895 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3896 }
3897
3898 if (hdrlen & 3) {
3899 /* First segment length must be a multiple of 4. */
3900 flags |= IWM_TX_CMD_FLG_MH_PAD;
3901 tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
3902 pad = 4 - (hdrlen & 3);
3903 } else {
3904 tx->offload_assist = 0;
3905 pad = 0;
3906 }
3907
3908 tx->len = htole16(totlen);
3909 tx->tid_tspec = tid;
3910 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3911
3912 /* Set physical address of "scratch area". */
3913 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3914 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3915
3916 /* Copy 802.11 header in TX command. */
3917 memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3918
3919 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3920
3921 tx->sec_ctl = 0;
3922 tx->tx_flags |= htole32(flags);
3923
3924 /* Trim 802.11 header. */
3925 m_adj(m, hdrlen);
3926 #if !defined(__DragonFly__)
3927 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3928 segs, &nsegs, BUS_DMA_NOWAIT);
3929 #else
3930 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3931 segs, IWM_MAX_SCATTER - 2,
3932 &nsegs, BUS_DMA_NOWAIT);
3933 #endif
3934 if (error != 0) {
3935 if (error != EFBIG) {
3936 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3937 error);
3938 m_freem(m);
3939 return error;
3940 }
3941 /* Too many DMA segments, linearize mbuf. */
3942 #if !defined(__DragonFly__)
3943 m1 = m_collapse(m, M_WAITOK, IWM_MAX_SCATTER - 2);
3944 #else
3945 m1 = m_defrag(m, M_NOWAIT);
3946 #endif
3947 if (m1 == NULL) {
3948 device_printf(sc->sc_dev,
3949 "%s: could not defrag mbuf\n", __func__);
3950 m_freem(m);
3951 return (ENOBUFS);
3952 }
3953 m = m1;
3954
3955 #if !defined(__DragonFly__)
3956 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3957 segs, &nsegs, BUS_DMA_NOWAIT);
3958 #else
3959 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map,
3960 &m, segs, IWM_MAX_SCATTER - 2, &nsegs, BUS_DMA_NOWAIT);
3961 #endif
3962 if (error != 0) {
3963 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3964 error);
3965 m_freem(m);
3966 return error;
3967 }
3968 }
3969 data->m = m;
3970 data->in = in;
3971 data->done = 0;
3972
3973 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3974 "sending txd %p, in %p\n", data, data->in);
3975 KASSERT(data->in != NULL, ("node is NULL"));
3976
3977 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3978 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3979 ring->qid, ring->cur, totlen, nsegs,
3980 le32toh(tx->tx_flags),
3981 le32toh(tx->rate_n_flags),
3982 tx->initial_rate_index
3983 );
3984
3985 /* Fill TX descriptor. */
3986 memset(desc, 0, sizeof(*desc));
3987 desc->num_tbs = 2 + nsegs;
3988
3989 desc->tbs[0].lo = htole32(data->cmd_paddr);
3990 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3991 (TB0_SIZE << 4));
3992 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3993 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3994 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3995 hdrlen + pad - TB0_SIZE) << 4));
3996
3997 /* Other DMA segments are for data payload. */
3998 for (i = 0; i < nsegs; i++) {
3999 seg = &segs[i];
4000 desc->tbs[i + 2].lo = htole32(seg->ds_addr);
4001 desc->tbs[i + 2].hi_n_len =
4002 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
4003 (seg->ds_len << 4);
4004 }
4005
4006 bus_dmamap_sync(ring->data_dmat, data->map,
4007 BUS_DMASYNC_PREWRITE);
4008 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
4009 BUS_DMASYNC_PREWRITE);
4010 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4011 BUS_DMASYNC_PREWRITE);
4012
4013 #if 0
4014 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4015 #endif
4016
4017 /* Kick TX ring. */
4018 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4019 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4020
4021 /* Mark TX ring as full if we reach a certain threshold. */
4022 if (++ring->queued > IWM_TX_RING_HIMARK) {
4023 sc->qfullmsk |= 1 << ring->qid;
4024 }
4025
4026 return 0;
4027 }
4028
4029 static int
iwm_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)4030 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4031 const struct ieee80211_bpf_params *params)
4032 {
4033 struct ieee80211com *ic = ni->ni_ic;
4034 struct iwm_softc *sc = ic->ic_softc;
4035 int error = 0;
4036
4037 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4038 "->%s begin\n", __func__);
4039
4040 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4041 m_freem(m);
4042 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
4043 "<-%s not RUNNING\n", __func__);
4044 return (ENETDOWN);
4045 }
4046
4047 IWM_LOCK(sc);
4048 /* XXX fix this */
4049 if (params == NULL) {
4050 error = iwm_tx(sc, m, ni, 0);
4051 } else {
4052 error = iwm_tx(sc, m, ni, 0);
4053 }
4054 if (sc->sc_tx_timer == 0)
4055 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4056 sc->sc_tx_timer = 5;
4057 IWM_UNLOCK(sc);
4058
4059 return (error);
4060 }
4061
4062 /*
4063 * mvm/tx.c
4064 */
4065
4066 /*
4067 * Note that there are transports that buffer frames before they reach
4068 * the firmware. This means that after flush_tx_path is called, the
4069 * queue might not be empty. The race-free way to handle this is to:
4070 * 1) set the station as draining
4071 * 2) flush the Tx path
4072 * 3) wait for the transport queues to be empty
4073 */
4074 int
iwm_flush_tx_path(struct iwm_softc * sc,uint32_t tfd_msk,uint32_t flags)4075 iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
4076 {
4077 int ret;
4078 struct iwm_tx_path_flush_cmd_v1 flush_cmd = {
4079 .queues_ctl = htole32(tfd_msk),
4080 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4081 };
4082
4083 ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4084 sizeof(flush_cmd), &flush_cmd);
4085 if (ret)
4086 device_printf(sc->sc_dev,
4087 "Flushing tx queue failed: %d\n", ret);
4088 return ret;
4089 }
4090
4091 /*
4092 * BEGIN mvm/quota.c
4093 */
4094
4095 static int
iwm_update_quotas(struct iwm_softc * sc,struct iwm_vap * ivp)4096 iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4097 {
4098 struct iwm_time_quota_cmd_v1 cmd;
4099 int i, idx, ret, num_active_macs, quota, quota_rem;
4100 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4101 int n_ifs[IWM_MAX_BINDINGS] = {0, };
4102 uint16_t id;
4103
4104 memset(&cmd, 0, sizeof(cmd));
4105
4106 /* currently, PHY ID == binding ID */
4107 if (ivp) {
4108 id = ivp->phy_ctxt->id;
4109 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4110 colors[id] = ivp->phy_ctxt->color;
4111
4112 if (1)
4113 n_ifs[id] = 1;
4114 }
4115
4116 /*
4117 * The FW's scheduling session consists of
4118 * IWM_MAX_QUOTA fragments. Divide these fragments
4119 * equally between all the bindings that require quota
4120 */
4121 num_active_macs = 0;
4122 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4123 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4124 num_active_macs += n_ifs[i];
4125 }
4126
4127 quota = 0;
4128 quota_rem = 0;
4129 if (num_active_macs) {
4130 quota = IWM_MAX_QUOTA / num_active_macs;
4131 quota_rem = IWM_MAX_QUOTA % num_active_macs;
4132 }
4133
4134 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4135 if (colors[i] < 0)
4136 continue;
4137
4138 cmd.quotas[idx].id_and_color =
4139 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4140
4141 if (n_ifs[i] <= 0) {
4142 cmd.quotas[idx].quota = htole32(0);
4143 cmd.quotas[idx].max_duration = htole32(0);
4144 } else {
4145 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4146 cmd.quotas[idx].max_duration = htole32(0);
4147 }
4148 idx++;
4149 }
4150
4151 /* Give the remainder of the session to the first binding */
4152 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4153
4154 ret = iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4155 sizeof(cmd), &cmd);
4156 if (ret)
4157 device_printf(sc->sc_dev,
4158 "%s: Failed to send quota: %d\n", __func__, ret);
4159 return ret;
4160 }
4161
4162 /*
4163 * END mvm/quota.c
4164 */
4165
4166 /*
4167 * ieee80211 routines
4168 */
4169
4170 /*
4171 * Change to AUTH state in 80211 state machine. Roughly matches what
4172 * Linux does in bss_info_changed().
4173 */
4174 static int
iwm_auth(struct ieee80211vap * vap,struct iwm_softc * sc)4175 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4176 {
4177 struct ieee80211_node *ni;
4178 struct iwm_node *in;
4179 struct iwm_vap *iv = IWM_VAP(vap);
4180 uint32_t duration;
4181 int error;
4182
4183 /*
4184 * XXX i have a feeling that the vap node is being
4185 * freed from underneath us. Grr.
4186 */
4187 ni = ieee80211_ref_node(vap->iv_bss);
4188 in = IWM_NODE(ni);
4189 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4190 "%s: called; vap=%p, bss ni=%p\n",
4191 __func__,
4192 vap,
4193 ni);
4194 IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4195 __func__, ether_sprintf(ni->ni_bssid));
4196
4197 in->in_assoc = 0;
4198 iv->iv_auth = 1;
4199
4200 /*
4201 * Firmware bug - it'll crash if the beacon interval is less
4202 * than 16. We can't avoid connecting at all, so refuse the
4203 * station state change, this will cause net80211 to abandon
4204 * attempts to connect to this AP, and eventually wpa_s will
4205 * blacklist the AP...
4206 */
4207 if (ni->ni_intval < 16) {
4208 device_printf(sc->sc_dev,
4209 "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4210 ether_sprintf(ni->ni_bssid), ni->ni_intval);
4211 error = EINVAL;
4212 goto out;
4213 }
4214
4215 error = iwm_allow_mcast(vap, sc);
4216 if (error) {
4217 device_printf(sc->sc_dev,
4218 "%s: failed to set multicast\n", __func__);
4219 goto out;
4220 }
4221
4222 /*
4223 * This is where it deviates from what Linux does.
4224 *
4225 * Linux iwlwifi doesn't reset the nic each time, nor does it
4226 * call ctxt_add() here. Instead, it adds it during vap creation,
4227 * and always does a mac_ctx_changed().
4228 *
4229 * The openbsd port doesn't attempt to do that - it reset things
4230 * at odd states and does the add here.
4231 *
4232 * So, until the state handling is fixed (ie, we never reset
4233 * the NIC except for a firmware failure, which should drag
4234 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4235 * contexts that are required), let's do a dirty hack here.
4236 */
4237 if (iv->is_uploaded) {
4238 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4239 device_printf(sc->sc_dev,
4240 "%s: failed to update MAC\n", __func__);
4241 goto out;
4242 }
4243 } else {
4244 if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4245 device_printf(sc->sc_dev,
4246 "%s: failed to add MAC\n", __func__);
4247 goto out;
4248 }
4249 }
4250 sc->sc_firmware_state = 1;
4251
4252 if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4253 in->in_ni.ni_chan, 1, 1)) != 0) {
4254 device_printf(sc->sc_dev,
4255 "%s: failed update phy ctxt\n", __func__);
4256 goto out;
4257 }
4258 iv->phy_ctxt = &sc->sc_phyctxt[0];
4259
4260 if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4261 device_printf(sc->sc_dev,
4262 "%s: binding update cmd\n", __func__);
4263 goto out;
4264 }
4265 sc->sc_firmware_state = 2;
4266 /*
4267 * Authentication becomes unreliable when powersaving is left enabled
4268 * here. Powersaving will be activated again when association has
4269 * finished or is aborted.
4270 */
4271 iv->ps_disabled = TRUE;
4272 error = iwm_power_update_mac(sc);
4273 iv->ps_disabled = FALSE;
4274 if (error != 0) {
4275 device_printf(sc->sc_dev,
4276 "%s: failed to update power management\n",
4277 __func__);
4278 goto out;
4279 }
4280 if ((error = iwm_add_sta(sc, in)) != 0) {
4281 device_printf(sc->sc_dev,
4282 "%s: failed to add sta\n", __func__);
4283 goto out;
4284 }
4285 sc->sc_firmware_state = 3;
4286
4287 /*
4288 * Prevent the FW from wandering off channel during association
4289 * by "protecting" the session with a time event.
4290 */
4291 /* XXX duration is in units of TU, not MS */
4292 duration = IWM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4293 iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4294
4295 error = 0;
4296 out:
4297 if (error != 0)
4298 iv->iv_auth = 0;
4299 ieee80211_free_node(ni);
4300 return (error);
4301 }
4302
4303 static struct ieee80211_node *
iwm_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])4304 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4305 {
4306 return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4307 M_WAITOK | M_ZERO);
4308 }
4309
4310 static uint8_t
iwm_rate_from_ucode_rate(uint32_t rate_n_flags)4311 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4312 {
4313 uint8_t plcp = rate_n_flags & 0xff;
4314 int i;
4315
4316 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4317 if (iwm_rates[i].plcp == plcp)
4318 return iwm_rates[i].rate;
4319 }
4320 return 0;
4321 }
4322
4323 uint8_t
iwm_ridx2rate(struct ieee80211_rateset * rs,int ridx)4324 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4325 {
4326 int i;
4327 uint8_t rval;
4328
4329 for (i = 0; i < rs->rs_nrates; i++) {
4330 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4331 if (rval == iwm_rates[ridx].rate)
4332 return rs->rs_rates[i];
4333 }
4334
4335 return 0;
4336 }
4337
4338 static int
iwm_rate2ridx(struct iwm_softc * sc,uint8_t rate)4339 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4340 {
4341 int i;
4342
4343 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4344 if (iwm_rates[i].rate == rate)
4345 return i;
4346 }
4347
4348 device_printf(sc->sc_dev,
4349 "%s: WARNING: device rate for %u not found!\n",
4350 __func__, rate);
4351
4352 return -1;
4353 }
4354
4355
4356 static void
iwm_setrates(struct iwm_softc * sc,struct iwm_node * in,int rix)4357 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4358 {
4359 struct ieee80211_node *ni = &in->in_ni;
4360 struct iwm_lq_cmd *lq = &in->in_lq;
4361 struct ieee80211_rateset *rs = &ni->ni_rates;
4362 int nrates = rs->rs_nrates;
4363 int i, ridx, tab = 0;
4364 // int txant = 0;
4365
4366 KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4367
4368 if (nrates > nitems(lq->rs_table)) {
4369 device_printf(sc->sc_dev,
4370 "%s: node supports %d rates, driver handles "
4371 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4372 return;
4373 }
4374 if (nrates == 0) {
4375 device_printf(sc->sc_dev,
4376 "%s: node supports 0 rates, odd!\n", __func__);
4377 return;
4378 }
4379 nrates = imin(rix + 1, nrates);
4380
4381 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4382 "%s: nrates=%d\n", __func__, nrates);
4383
4384 /* then construct a lq_cmd based on those */
4385 memset(lq, 0, sizeof(*lq));
4386 lq->sta_id = IWM_STATION_ID;
4387
4388 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4389 if (ni->ni_flags & IEEE80211_NODE_HT)
4390 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4391
4392 /*
4393 * are these used? (we don't do SISO or MIMO)
4394 * need to set them to non-zero, though, or we get an error.
4395 */
4396 lq->single_stream_ant_msk = 1;
4397 lq->dual_stream_ant_msk = 1;
4398
4399 /*
4400 * Build the actual rate selection table.
4401 * The lowest bits are the rates. Additionally,
4402 * CCK needs bit 9 to be set. The rest of the bits
4403 * we add to the table select the tx antenna
4404 * Note that we add the rates in the highest rate first
4405 * (opposite of ni_rates).
4406 */
4407 for (i = 0; i < nrates; i++) {
4408 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4409 int nextant;
4410
4411 /* Map 802.11 rate to HW rate index. */
4412 ridx = iwm_rate2ridx(sc, rate);
4413 if (ridx == -1)
4414 continue;
4415
4416 #if 0
4417 if (txant == 0)
4418 txant = iwm_get_valid_tx_ant(sc);
4419 nextant = 1<<(ffs(txant)-1);
4420 txant &= ~nextant;
4421 #else
4422 nextant = iwm_get_valid_tx_ant(sc);
4423 #endif
4424 tab = iwm_rates[ridx].plcp;
4425 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4426 if (IWM_RIDX_IS_CCK(ridx))
4427 tab |= IWM_RATE_MCS_CCK_MSK;
4428 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4429 "station rate i=%d, rate=%d, hw=%x\n",
4430 i, iwm_rates[ridx].rate, tab);
4431 lq->rs_table[i] = htole32(tab);
4432 }
4433 /* then fill the rest with the lowest possible rate */
4434 for (i = nrates; i < nitems(lq->rs_table); i++) {
4435 KASSERT(tab != 0, ("invalid tab"));
4436 lq->rs_table[i] = htole32(tab);
4437 }
4438 }
4439
4440 static void
iwm_bring_down_firmware(struct iwm_softc * sc,struct ieee80211vap * vap)4441 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4442 {
4443 struct iwm_vap *ivp = IWM_VAP(vap);
4444 int error;
4445
4446 /* Avoid Tx watchdog triggering, when transfers get dropped here. */
4447 sc->sc_tx_timer = 0;
4448
4449 ivp->iv_auth = 0;
4450 if (sc->sc_firmware_state == 3) {
4451 iwm_xmit_queue_drain(sc);
4452 // iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4453 error = iwm_rm_sta(sc, vap, TRUE);
4454 if (error) {
4455 device_printf(sc->sc_dev,
4456 "%s: Failed to remove station: %d\n",
4457 __func__, error);
4458 }
4459 }
4460 if (sc->sc_firmware_state == 3) {
4461 error = iwm_mac_ctxt_changed(sc, vap);
4462 if (error) {
4463 device_printf(sc->sc_dev,
4464 "%s: Failed to change mac context: %d\n",
4465 __func__, error);
4466 }
4467 }
4468 if (sc->sc_firmware_state == 3) {
4469 error = iwm_sf_update(sc, vap, FALSE);
4470 if (error) {
4471 device_printf(sc->sc_dev,
4472 "%s: Failed to update smart FIFO: %d\n",
4473 __func__, error);
4474 }
4475 }
4476 if (sc->sc_firmware_state == 3) {
4477 error = iwm_rm_sta_id(sc, vap);
4478 if (error) {
4479 device_printf(sc->sc_dev,
4480 "%s: Failed to remove station id: %d\n",
4481 __func__, error);
4482 }
4483 }
4484 if (sc->sc_firmware_state == 3) {
4485 error = iwm_update_quotas(sc, NULL);
4486 if (error) {
4487 device_printf(sc->sc_dev,
4488 "%s: Failed to update PHY quota: %d\n",
4489 __func__, error);
4490 }
4491 }
4492 if (sc->sc_firmware_state == 3) {
4493 /* XXX Might need to specify bssid correctly. */
4494 error = iwm_mac_ctxt_changed(sc, vap);
4495 if (error) {
4496 device_printf(sc->sc_dev,
4497 "%s: Failed to change mac context: %d\n",
4498 __func__, error);
4499 }
4500 }
4501 if (sc->sc_firmware_state == 3) {
4502 sc->sc_firmware_state = 2;
4503 }
4504 if (sc->sc_firmware_state > 1) {
4505 error = iwm_binding_remove_vif(sc, ivp);
4506 if (error) {
4507 device_printf(sc->sc_dev,
4508 "%s: Failed to remove channel ctx: %d\n",
4509 __func__, error);
4510 }
4511 }
4512 if (sc->sc_firmware_state > 1) {
4513 sc->sc_firmware_state = 1;
4514 }
4515 ivp->phy_ctxt = NULL;
4516 if (sc->sc_firmware_state > 0) {
4517 error = iwm_mac_ctxt_changed(sc, vap);
4518 if (error) {
4519 device_printf(sc->sc_dev,
4520 "%s: Failed to change mac context: %d\n",
4521 __func__, error);
4522 }
4523 }
4524 if (sc->sc_firmware_state > 0) {
4525 error = iwm_power_update_mac(sc);
4526 if (error != 0) {
4527 device_printf(sc->sc_dev,
4528 "%s: failed to update power management\n",
4529 __func__);
4530 }
4531 }
4532 sc->sc_firmware_state = 0;
4533 }
4534
4535 static int
iwm_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)4536 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4537 {
4538 struct iwm_vap *ivp = IWM_VAP(vap);
4539 struct ieee80211com *ic = vap->iv_ic;
4540 struct iwm_softc *sc = ic->ic_softc;
4541 struct iwm_node *in;
4542 int error;
4543
4544 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4545 "switching state %s -> %s arg=0x%x\n",
4546 ieee80211_state_name[vap->iv_state],
4547 ieee80211_state_name[nstate],
4548 arg);
4549
4550 IEEE80211_UNLOCK(ic);
4551 IWM_LOCK(sc);
4552
4553 if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4554 (nstate == IEEE80211_S_AUTH ||
4555 nstate == IEEE80211_S_ASSOC ||
4556 nstate == IEEE80211_S_RUN)) {
4557 /* Stop blinking for a scan, when authenticating. */
4558 iwm_led_blink_stop(sc);
4559 }
4560
4561 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4562 iwm_led_disable(sc);
4563 /* disable beacon filtering if we're hopping out of RUN */
4564 iwm_disable_beacon_filter(sc);
4565 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4566 in->in_assoc = 0;
4567 }
4568
4569 if ((vap->iv_state == IEEE80211_S_AUTH ||
4570 vap->iv_state == IEEE80211_S_ASSOC ||
4571 vap->iv_state == IEEE80211_S_RUN) &&
4572 (nstate == IEEE80211_S_INIT ||
4573 nstate == IEEE80211_S_SCAN ||
4574 nstate == IEEE80211_S_AUTH)) {
4575 iwm_stop_session_protection(sc, ivp);
4576 }
4577
4578 if ((vap->iv_state == IEEE80211_S_RUN ||
4579 vap->iv_state == IEEE80211_S_ASSOC) &&
4580 nstate == IEEE80211_S_INIT) {
4581 /*
4582 * In this case, iv_newstate() wants to send an 80211 frame on
4583 * the network that we are leaving. So we need to call it,
4584 * before tearing down all the firmware state.
4585 */
4586 IWM_UNLOCK(sc);
4587 IEEE80211_LOCK(ic);
4588 ivp->iv_newstate(vap, nstate, arg);
4589 IEEE80211_UNLOCK(ic);
4590 IWM_LOCK(sc);
4591 iwm_bring_down_firmware(sc, vap);
4592 IWM_UNLOCK(sc);
4593 IEEE80211_LOCK(ic);
4594 return 0;
4595 }
4596
4597 switch (nstate) {
4598 case IEEE80211_S_INIT:
4599 case IEEE80211_S_SCAN:
4600 break;
4601
4602 case IEEE80211_S_AUTH:
4603 iwm_bring_down_firmware(sc, vap);
4604 if ((error = iwm_auth(vap, sc)) != 0) {
4605 device_printf(sc->sc_dev,
4606 "%s: could not move to auth state: %d\n",
4607 __func__, error);
4608 iwm_bring_down_firmware(sc, vap);
4609 IWM_UNLOCK(sc);
4610 IEEE80211_LOCK(ic);
4611 return 1;
4612 }
4613 break;
4614
4615 case IEEE80211_S_ASSOC:
4616 /*
4617 * EBS may be disabled due to previous failures reported by FW.
4618 * Reset EBS status here assuming environment has been changed.
4619 */
4620 sc->last_ebs_successful = TRUE;
4621 break;
4622
4623 case IEEE80211_S_RUN:
4624 in = IWM_NODE(vap->iv_bss);
4625 /* Update the association state, now we have it all */
4626 /* (eg associd comes in at this point */
4627 error = iwm_update_sta(sc, in);
4628 if (error != 0) {
4629 device_printf(sc->sc_dev,
4630 "%s: failed to update STA\n", __func__);
4631 IWM_UNLOCK(sc);
4632 IEEE80211_LOCK(ic);
4633 return error;
4634 }
4635 in->in_assoc = 1;
4636 error = iwm_mac_ctxt_changed(sc, vap);
4637 if (error != 0) {
4638 device_printf(sc->sc_dev,
4639 "%s: failed to update MAC: %d\n", __func__, error);
4640 }
4641
4642 iwm_sf_update(sc, vap, FALSE);
4643 iwm_enable_beacon_filter(sc, ivp);
4644 iwm_power_update_mac(sc);
4645 iwm_update_quotas(sc, ivp);
4646 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4647 iwm_setrates(sc, in, rix);
4648
4649 if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4650 device_printf(sc->sc_dev,
4651 "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4652 }
4653
4654 iwm_led_enable(sc);
4655 break;
4656
4657 default:
4658 break;
4659 }
4660 IWM_UNLOCK(sc);
4661 IEEE80211_LOCK(ic);
4662
4663 return (ivp->iv_newstate(vap, nstate, arg));
4664 }
4665
4666 void
iwm_endscan_cb(void * arg,int pending)4667 iwm_endscan_cb(void *arg, int pending)
4668 {
4669 struct iwm_softc *sc = arg;
4670 struct ieee80211com *ic = &sc->sc_ic;
4671
4672 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4673 "%s: scan ended\n",
4674 __func__);
4675
4676 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4677 }
4678
4679 static int
iwm_send_bt_init_conf(struct iwm_softc * sc)4680 iwm_send_bt_init_conf(struct iwm_softc *sc)
4681 {
4682 struct iwm_bt_coex_cmd bt_cmd;
4683
4684 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4685 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4686
4687 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4688 &bt_cmd);
4689 }
4690
4691 static boolean_t
iwm_is_lar_supported(struct iwm_softc * sc)4692 iwm_is_lar_supported(struct iwm_softc *sc)
4693 {
4694 boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4695 boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4696
4697 if (iwm_lar_disable)
4698 return FALSE;
4699
4700 /*
4701 * Enable LAR only if it is supported by the FW (TLV) &&
4702 * enabled in the NVM
4703 */
4704 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000)
4705 return nvm_lar && tlv_lar;
4706 else
4707 return tlv_lar;
4708 }
4709
4710 static boolean_t
iwm_is_wifi_mcc_supported(struct iwm_softc * sc)4711 iwm_is_wifi_mcc_supported(struct iwm_softc *sc)
4712 {
4713 return iwm_fw_has_api(sc, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4714 iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4715 }
4716
4717 static int
iwm_send_update_mcc_cmd(struct iwm_softc * sc,const char * alpha2)4718 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4719 {
4720 struct iwm_mcc_update_cmd mcc_cmd;
4721 struct iwm_host_cmd hcmd = {
4722 .id = IWM_MCC_UPDATE_CMD,
4723 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4724 .data = { &mcc_cmd },
4725 };
4726 int ret;
4727 #ifdef IWM_DEBUG
4728 struct iwm_rx_packet *pkt;
4729 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4730 struct iwm_mcc_update_resp_v2 *mcc_resp;
4731 int n_channels;
4732 uint16_t mcc;
4733 #endif
4734 int resp_v2 = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4735
4736 if (!iwm_is_lar_supported(sc)) {
4737 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4738 __func__);
4739 return 0;
4740 }
4741
4742 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4743 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4744 if (iwm_is_wifi_mcc_supported(sc))
4745 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4746 else
4747 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4748
4749 if (resp_v2)
4750 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4751 else
4752 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4753
4754 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4755 "send MCC update to FW with '%c%c' src = %d\n",
4756 alpha2[0], alpha2[1], mcc_cmd.source_id);
4757
4758 ret = iwm_send_cmd(sc, &hcmd);
4759 if (ret)
4760 return ret;
4761
4762 #ifdef IWM_DEBUG
4763 pkt = hcmd.resp_pkt;
4764
4765 /* Extract MCC response */
4766 if (resp_v2) {
4767 mcc_resp = (void *)pkt->data;
4768 mcc = mcc_resp->mcc;
4769 n_channels = le32toh(mcc_resp->n_channels);
4770 } else {
4771 mcc_resp_v1 = (void *)pkt->data;
4772 mcc = mcc_resp_v1->mcc;
4773 n_channels = le32toh(mcc_resp_v1->n_channels);
4774 }
4775
4776 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4777 if (mcc == 0)
4778 mcc = 0x3030; /* "00" - world */
4779
4780 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4781 "regulatory domain '%c%c' (%d channels available)\n",
4782 mcc >> 8, mcc & 0xff, n_channels);
4783 #endif
4784 iwm_free_resp(sc, &hcmd);
4785
4786 return 0;
4787 }
4788
4789 static void
iwm_tt_tx_backoff(struct iwm_softc * sc,uint32_t backoff)4790 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4791 {
4792 struct iwm_host_cmd cmd = {
4793 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4794 .len = { sizeof(uint32_t), },
4795 .data = { &backoff, },
4796 };
4797
4798 if (iwm_send_cmd(sc, &cmd) != 0) {
4799 device_printf(sc->sc_dev,
4800 "failed to change thermal tx backoff\n");
4801 }
4802 }
4803
4804 static int
iwm_init_hw(struct iwm_softc * sc)4805 iwm_init_hw(struct iwm_softc *sc)
4806 {
4807 struct ieee80211com *ic = &sc->sc_ic;
4808 int error, i, ac;
4809
4810 sc->sf_state = IWM_SF_UNINIT;
4811
4812 if ((error = iwm_start_hw(sc)) != 0) {
4813 kprintf("iwm_start_hw: failed %d\n", error);
4814 return error;
4815 }
4816
4817 if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4818 kprintf("iwm_run_init_ucode: failed %d\n", error);
4819 return error;
4820 }
4821
4822 /*
4823 * should stop and start HW since that INIT
4824 * image just loaded
4825 */
4826 iwm_stop_device(sc);
4827 sc->sc_ps_disabled = FALSE;
4828 if ((error = iwm_start_hw(sc)) != 0) {
4829 device_printf(sc->sc_dev, "could not initialize hardware\n");
4830 return error;
4831 }
4832
4833 /* omstart, this time with the regular firmware */
4834 error = iwm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4835 if (error) {
4836 device_printf(sc->sc_dev, "could not load firmware\n");
4837 goto error;
4838 }
4839
4840 error = iwm_sf_update(sc, NULL, FALSE);
4841 if (error)
4842 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4843
4844 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4845 device_printf(sc->sc_dev, "bt init conf failed\n");
4846 goto error;
4847 }
4848
4849 error = iwm_send_tx_ant_cfg(sc, iwm_get_valid_tx_ant(sc));
4850 if (error != 0) {
4851 device_printf(sc->sc_dev, "antenna config failed\n");
4852 goto error;
4853 }
4854
4855 /* Send phy db control command and then phy db calibration */
4856 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4857 goto error;
4858
4859 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4860 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4861 goto error;
4862 }
4863
4864 /* Add auxiliary station for scanning */
4865 if ((error = iwm_add_aux_sta(sc)) != 0) {
4866 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4867 goto error;
4868 }
4869
4870 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4871 /*
4872 * The channel used here isn't relevant as it's
4873 * going to be overwritten in the other flows.
4874 * For now use the first channel we have.
4875 */
4876 if ((error = iwm_phy_ctxt_add(sc,
4877 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4878 goto error;
4879 }
4880
4881 /* Initialize tx backoffs to the minimum. */
4882 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4883 iwm_tt_tx_backoff(sc, 0);
4884
4885 if (iwm_config_ltr(sc) != 0)
4886 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4887
4888 error = iwm_power_update_device(sc);
4889 if (error)
4890 goto error;
4891
4892 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4893 goto error;
4894
4895 if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4896 if ((error = iwm_config_umac_scan(sc)) != 0)
4897 goto error;
4898 }
4899
4900 /* Enable Tx queues. */
4901 for (ac = 0; ac < WME_NUM_AC; ac++) {
4902 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4903 iwm_ac_to_tx_fifo[ac]);
4904 if (error)
4905 goto error;
4906 }
4907
4908 if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4909 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4910 goto error;
4911 }
4912
4913 return 0;
4914
4915 error:
4916 iwm_stop_device(sc);
4917 return error;
4918 }
4919
4920 /* Allow multicast from our BSSID. */
4921 static int
iwm_allow_mcast(struct ieee80211vap * vap,struct iwm_softc * sc)4922 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4923 {
4924 struct ieee80211_node *ni = vap->iv_bss;
4925 struct iwm_mcast_filter_cmd *cmd;
4926 size_t size;
4927 int error;
4928
4929 size = roundup(sizeof(*cmd), 4);
4930 cmd = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
4931 if (cmd == NULL)
4932 return ENOMEM;
4933 cmd->filter_own = 1;
4934 cmd->port_id = 0;
4935 cmd->count = 0;
4936 cmd->pass_all = 1;
4937 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4938
4939 error = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4940 IWM_CMD_SYNC, size, cmd);
4941 kfree(cmd, M_DEVBUF);
4942
4943 return (error);
4944 }
4945
4946 /*
4947 * ifnet interfaces
4948 */
4949
4950 static void
iwm_init(struct iwm_softc * sc)4951 iwm_init(struct iwm_softc *sc)
4952 {
4953 int error;
4954
4955 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4956 return;
4957 }
4958 sc->sc_generation++;
4959 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4960
4961 if ((error = iwm_init_hw(sc)) != 0) {
4962 kprintf("iwm_init_hw failed %d\n", error);
4963 iwm_stop(sc);
4964 return;
4965 }
4966
4967 /*
4968 * Ok, firmware loaded and we are jogging
4969 */
4970 sc->sc_flags |= IWM_FLAG_HW_INITED;
4971 }
4972
4973 static int
iwm_transmit(struct ieee80211com * ic,struct mbuf * m)4974 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4975 {
4976 struct iwm_softc *sc;
4977 int error;
4978
4979 sc = ic->ic_softc;
4980
4981 IWM_LOCK(sc);
4982 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4983 IWM_UNLOCK(sc);
4984 return (ENXIO);
4985 }
4986 error = mbufq_enqueue(&sc->sc_snd, m);
4987 if (error) {
4988 IWM_UNLOCK(sc);
4989 return (error);
4990 }
4991 iwm_start(sc);
4992 IWM_UNLOCK(sc);
4993 return (0);
4994 }
4995
4996 /*
4997 * Dequeue packets from sendq and call send.
4998 */
4999 static void
iwm_start(struct iwm_softc * sc)5000 iwm_start(struct iwm_softc *sc)
5001 {
5002 struct ieee80211_node *ni;
5003 struct mbuf *m;
5004 int ac = 0;
5005
5006 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5007 while (sc->qfullmsk == 0 &&
5008 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5009 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5010 if (iwm_tx(sc, m, ni, ac) != 0) {
5011 if_inc_counter(ni->ni_vap->iv_ifp,
5012 IFCOUNTER_OERRORS, 1);
5013 ieee80211_free_node(ni);
5014 continue;
5015 }
5016 if (sc->sc_tx_timer == 0) {
5017 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
5018 sc);
5019 }
5020 sc->sc_tx_timer = 15;
5021 }
5022 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5023 }
5024
5025 static void
iwm_stop(struct iwm_softc * sc)5026 iwm_stop(struct iwm_softc *sc)
5027 {
5028
5029 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5030 sc->sc_flags |= IWM_FLAG_STOPPED;
5031 sc->sc_generation++;
5032 iwm_led_blink_stop(sc);
5033 sc->sc_tx_timer = 0;
5034 iwm_stop_device(sc);
5035 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5036 }
5037
5038 static void
iwm_watchdog(void * arg)5039 iwm_watchdog(void *arg)
5040 {
5041 struct iwm_softc *sc = arg;
5042 struct ieee80211com *ic = &sc->sc_ic;
5043
5044 if (sc->sc_attached == 0)
5045 return;
5046
5047 if (sc->sc_tx_timer > 0) {
5048 if (--sc->sc_tx_timer == 0) {
5049 device_printf(sc->sc_dev, "device timeout\n");
5050 #ifdef IWM_DEBUG
5051 iwm_nic_error(sc);
5052 #endif
5053 ieee80211_restart_all(ic);
5054 #if defined(__DragonFly__)
5055 ++sc->sc_ic.ic_oerrors;
5056 #else
5057 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5058 #endif
5059 return;
5060 }
5061 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5062 }
5063 }
5064
5065 static void
iwm_parent(struct ieee80211com * ic)5066 iwm_parent(struct ieee80211com *ic)
5067 {
5068 struct iwm_softc *sc = ic->ic_softc;
5069 int startall = 0;
5070 int rfkill = 0;
5071
5072 IWM_LOCK(sc);
5073 if (ic->ic_nrunning > 0) {
5074 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5075 iwm_init(sc);
5076 rfkill = iwm_check_rfkill(sc);
5077 if (!rfkill)
5078 startall = 1;
5079 }
5080 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5081 iwm_stop(sc);
5082 IWM_UNLOCK(sc);
5083 if (startall)
5084 ieee80211_start_all(ic);
5085 else if (rfkill)
5086 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5087 }
5088
5089 static void
iwm_rftoggle_task(void * arg,int npending __unused)5090 iwm_rftoggle_task(void *arg, int npending __unused)
5091 {
5092 struct iwm_softc *sc = arg;
5093 struct ieee80211com *ic = &sc->sc_ic;
5094 int rfkill;
5095
5096 IWM_LOCK(sc);
5097 rfkill = iwm_check_rfkill(sc);
5098 IWM_UNLOCK(sc);
5099 if (rfkill) {
5100 device_printf(sc->sc_dev,
5101 "%s: rfkill switch, disabling interface\n", __func__);
5102 ieee80211_suspend_all(ic);
5103 ieee80211_notify_radio(ic, 0);
5104 } else {
5105 device_printf(sc->sc_dev,
5106 "%s: rfkill cleared, re-enabling interface\n", __func__);
5107 ieee80211_resume_all(ic);
5108 ieee80211_notify_radio(ic, 1);
5109 }
5110 }
5111
5112 /*
5113 * The interrupt side of things
5114 */
5115
5116 /*
5117 * error dumping routines are from iwlwifi/mvm/utils.c
5118 */
5119
5120 /*
5121 * Note: This structure is read from the device with IO accesses,
5122 * and the reading already does the endian conversion. As it is
5123 * read with uint32_t-sized accesses, any members with a different size
5124 * need to be ordered correctly though!
5125 */
5126 struct iwm_error_event_table {
5127 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5128 uint32_t error_id; /* type of error */
5129 uint32_t trm_hw_status0; /* TRM HW status */
5130 uint32_t trm_hw_status1; /* TRM HW status */
5131 uint32_t blink2; /* branch link */
5132 uint32_t ilink1; /* interrupt link */
5133 uint32_t ilink2; /* interrupt link */
5134 uint32_t data1; /* error-specific data */
5135 uint32_t data2; /* error-specific data */
5136 uint32_t data3; /* error-specific data */
5137 uint32_t bcon_time; /* beacon timer */
5138 uint32_t tsf_low; /* network timestamp function timer */
5139 uint32_t tsf_hi; /* network timestamp function timer */
5140 uint32_t gp1; /* GP1 timer register */
5141 uint32_t gp2; /* GP2 timer register */
5142 uint32_t fw_rev_type; /* firmware revision type */
5143 uint32_t major; /* uCode version major */
5144 uint32_t minor; /* uCode version minor */
5145 uint32_t hw_ver; /* HW Silicon version */
5146 uint32_t brd_ver; /* HW board version */
5147 uint32_t log_pc; /* log program counter */
5148 uint32_t frame_ptr; /* frame pointer */
5149 uint32_t stack_ptr; /* stack pointer */
5150 uint32_t hcmd; /* last host command header */
5151 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5152 * rxtx_flag */
5153 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5154 * host_flag */
5155 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5156 * enc_flag */
5157 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5158 * time_flag */
5159 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5160 * wico interrupt */
5161 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
5162 uint32_t wait_event; /* wait event() caller address */
5163 uint32_t l2p_control; /* L2pControlField */
5164 uint32_t l2p_duration; /* L2pDurationField */
5165 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5166 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5167 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5168 * (LMPM_PMG_SEL) */
5169 uint32_t u_timestamp; /* indicate when the date and time of the
5170 * compilation */
5171 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5172 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5173
5174 /*
5175 * UMAC error struct - relevant starting from family 8000 chip.
5176 * Note: This structure is read from the device with IO accesses,
5177 * and the reading already does the endian conversion. As it is
5178 * read with u32-sized accesses, any members with a different size
5179 * need to be ordered correctly though!
5180 */
5181 struct iwm_umac_error_event_table {
5182 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5183 uint32_t error_id; /* type of error */
5184 uint32_t blink1; /* branch link */
5185 uint32_t blink2; /* branch link */
5186 uint32_t ilink1; /* interrupt link */
5187 uint32_t ilink2; /* interrupt link */
5188 uint32_t data1; /* error-specific data */
5189 uint32_t data2; /* error-specific data */
5190 uint32_t data3; /* error-specific data */
5191 uint32_t umac_major;
5192 uint32_t umac_minor;
5193 uint32_t frame_pointer; /* core register 27*/
5194 uint32_t stack_pointer; /* core register 28 */
5195 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5196 uint32_t nic_isr_pref; /* ISR status register */
5197 } __packed;
5198
5199 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5200 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5201
5202 #ifdef IWM_DEBUG
5203 struct {
5204 const char *name;
5205 uint8_t num;
5206 } advanced_lookup[] = {
5207 { "NMI_INTERRUPT_WDG", 0x34 },
5208 { "SYSASSERT", 0x35 },
5209 { "UCODE_VERSION_MISMATCH", 0x37 },
5210 { "BAD_COMMAND", 0x38 },
5211 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5212 { "FATAL_ERROR", 0x3D },
5213 { "NMI_TRM_HW_ERR", 0x46 },
5214 { "NMI_INTERRUPT_TRM", 0x4C },
5215 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5216 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5217 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5218 { "NMI_INTERRUPT_HOST", 0x66 },
5219 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5220 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5221 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5222 { "ADVANCED_SYSASSERT", 0 },
5223 };
5224
5225 static const char *
iwm_desc_lookup(uint32_t num)5226 iwm_desc_lookup(uint32_t num)
5227 {
5228 int i;
5229
5230 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5231 if (advanced_lookup[i].num == num)
5232 return advanced_lookup[i].name;
5233
5234 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5235 return advanced_lookup[i].name;
5236 }
5237
5238 static void
iwm_nic_umac_error(struct iwm_softc * sc)5239 iwm_nic_umac_error(struct iwm_softc *sc)
5240 {
5241 struct iwm_umac_error_event_table table;
5242 uint32_t base;
5243
5244 base = sc->umac_error_event_table;
5245
5246 if (base < 0x800000) {
5247 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5248 base);
5249 return;
5250 }
5251
5252 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5253 device_printf(sc->sc_dev, "reading errlog failed\n");
5254 return;
5255 }
5256
5257 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5258 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5259 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5260 sc->sc_flags, table.valid);
5261 }
5262
5263 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5264 iwm_desc_lookup(table.error_id));
5265 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5266 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5267 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5268 table.ilink1);
5269 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5270 table.ilink2);
5271 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5272 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5273 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5274 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5275 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5276 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5277 table.frame_pointer);
5278 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5279 table.stack_pointer);
5280 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5281 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5282 table.nic_isr_pref);
5283 }
5284
5285 /*
5286 * Support for dumping the error log seemed like a good idea ...
5287 * but it's mostly hex junk and the only sensible thing is the
5288 * hw/ucode revision (which we know anyway). Since it's here,
5289 * I'll just leave it in, just in case e.g. the Intel guys want to
5290 * help us decipher some "ADVANCED_SYSASSERT" later.
5291 */
5292 static void
iwm_nic_error(struct iwm_softc * sc)5293 iwm_nic_error(struct iwm_softc *sc)
5294 {
5295 struct iwm_error_event_table table;
5296 uint32_t base;
5297
5298 device_printf(sc->sc_dev, "dumping device error log\n");
5299 base = sc->error_event_table[0];
5300 if (base < 0x800000) {
5301 device_printf(sc->sc_dev,
5302 "Invalid error log pointer 0x%08x\n", base);
5303 return;
5304 }
5305
5306 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5307 device_printf(sc->sc_dev, "reading errlog failed\n");
5308 return;
5309 }
5310
5311 if (!table.valid) {
5312 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5313 return;
5314 }
5315
5316 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5317 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5318 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5319 sc->sc_flags, table.valid);
5320 }
5321
5322 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5323 iwm_desc_lookup(table.error_id));
5324 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5325 table.trm_hw_status0);
5326 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5327 table.trm_hw_status1);
5328 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5329 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5330 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5331 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5332 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5333 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5334 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5335 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5336 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5337 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5338 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5339 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5340 table.fw_rev_type);
5341 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5342 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5343 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5344 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5345 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5346 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5347 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5348 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5349 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5350 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5351 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5352 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5353 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5354 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5355 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5356 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5357 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5358 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5359 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5360
5361 if (sc->umac_error_event_table)
5362 iwm_nic_umac_error(sc);
5363 }
5364 #endif
5365
5366 static void
iwm_handle_rxb(struct iwm_softc * sc,struct mbuf * m)5367 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5368 {
5369 struct ieee80211com *ic = &sc->sc_ic;
5370 struct iwm_cmd_response *cresp;
5371 struct mbuf *m1;
5372 uint32_t offset = 0;
5373 uint32_t maxoff = IWM_RBUF_SIZE;
5374 uint32_t nextoff;
5375 boolean_t stolen = FALSE;
5376
5377 #define HAVEROOM(a) \
5378 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5379
5380 while (HAVEROOM(offset)) {
5381 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5382 offset);
5383 int qid, idx, code, len;
5384
5385 qid = pkt->hdr.qid;
5386 idx = pkt->hdr.idx;
5387
5388 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5389
5390 /*
5391 * randomly get these from the firmware, no idea why.
5392 * they at least seem harmless, so just ignore them for now
5393 */
5394 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5395 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5396 break;
5397 }
5398
5399 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5400 "rx packet qid=%d idx=%d type=%x\n",
5401 qid & ~0x80, pkt->hdr.idx, code);
5402
5403 len = iwm_rx_packet_len(pkt);
5404 len += sizeof(uint32_t); /* account for status word */
5405 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5406
5407 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5408
5409 switch (code) {
5410 case IWM_REPLY_RX_PHY_CMD:
5411 iwm_rx_rx_phy_cmd(sc, pkt);
5412 break;
5413
5414 case IWM_REPLY_RX_MPDU_CMD: {
5415 /*
5416 * If this is the last frame in the RX buffer, we
5417 * can directly feed the mbuf to the sharks here.
5418 */
5419 struct iwm_rx_packet *nextpkt = mtodoff(m,
5420 struct iwm_rx_packet *, nextoff);
5421 if (!HAVEROOM(nextoff) ||
5422 (nextpkt->hdr.code == 0 &&
5423 (nextpkt->hdr.qid & ~0x80) == 0 &&
5424 nextpkt->hdr.idx == 0) ||
5425 (nextpkt->len_n_flags ==
5426 htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5427 if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5428 stolen = FALSE;
5429 /* Make sure we abort the loop */
5430 nextoff = maxoff;
5431 }
5432 break;
5433 }
5434
5435 /*
5436 * Use m_copym instead of m_split, because that
5437 * makes it easier to keep a valid rx buffer in
5438 * the ring, when iwm_rx_mpdu() fails.
5439 *
5440 * We need to start m_copym() at offset 0, to get the
5441 * M_PKTHDR flag preserved.
5442 */
5443 m1 = m_copym(m, 0, M_COPYALL, M_WAITOK);
5444 if (m1) {
5445 if (iwm_rx_mpdu(sc, m1, offset, stolen))
5446 stolen = TRUE;
5447 else
5448 m_freem(m1);
5449 }
5450 break;
5451 }
5452
5453 case IWM_TX_CMD:
5454 iwm_rx_tx_cmd(sc, pkt);
5455 break;
5456
5457 case IWM_MISSED_BEACONS_NOTIFICATION: {
5458 struct iwm_missed_beacons_notif *resp;
5459 int missed;
5460
5461 /* XXX look at mac_id to determine interface ID */
5462 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5463
5464 resp = (void *)pkt->data;
5465 missed = le32toh(resp->consec_missed_beacons);
5466
5467 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5468 "%s: MISSED_BEACON: mac_id=%d, "
5469 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5470 "num_rx=%d\n",
5471 __func__,
5472 le32toh(resp->mac_id),
5473 le32toh(resp->consec_missed_beacons_since_last_rx),
5474 le32toh(resp->consec_missed_beacons),
5475 le32toh(resp->num_expected_beacons),
5476 le32toh(resp->num_recvd_beacons));
5477
5478 /* Be paranoid */
5479 if (vap == NULL)
5480 break;
5481
5482 /* XXX no net80211 locking? */
5483 if (vap->iv_state == IEEE80211_S_RUN &&
5484 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5485 if (missed > vap->iv_bmissthreshold) {
5486 /* XXX bad locking; turn into task */
5487 IWM_UNLOCK(sc);
5488 ieee80211_beacon_miss(ic);
5489 IWM_LOCK(sc);
5490 }
5491 }
5492
5493 break;
5494 }
5495
5496 case IWM_MFUART_LOAD_NOTIFICATION:
5497 break;
5498
5499 case IWM_ALIVE:
5500 break;
5501
5502 case IWM_CALIB_RES_NOTIF_PHY_DB:
5503 break;
5504
5505 case IWM_STATISTICS_NOTIFICATION:
5506 iwm_handle_rx_statistics(sc, pkt);
5507 break;
5508
5509 case IWM_NVM_ACCESS_CMD:
5510 case IWM_MCC_UPDATE_CMD:
5511 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5512 memcpy(sc->sc_cmd_resp,
5513 pkt, sizeof(sc->sc_cmd_resp));
5514 }
5515 break;
5516
5517 case IWM_MCC_CHUB_UPDATE_CMD: {
5518 struct iwm_mcc_chub_notif *notif;
5519 notif = (void *)pkt->data;
5520
5521 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5522 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5523 sc->sc_fw_mcc[2] = '\0';
5524 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5525 "fw source %d sent CC '%s'\n",
5526 notif->source_id, sc->sc_fw_mcc);
5527 break;
5528 }
5529
5530 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5531 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5532 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5533 struct iwm_dts_measurement_notif_v1 *notif;
5534
5535 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5536 device_printf(sc->sc_dev,
5537 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5538 break;
5539 }
5540 notif = (void *)pkt->data;
5541 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5542 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5543 notif->temp);
5544 break;
5545 }
5546
5547 case IWM_PHY_CONFIGURATION_CMD:
5548 case IWM_TX_ANT_CONFIGURATION_CMD:
5549 case IWM_ADD_STA:
5550 case IWM_MAC_CONTEXT_CMD:
5551 case IWM_REPLY_SF_CFG_CMD:
5552 case IWM_POWER_TABLE_CMD:
5553 case IWM_LTR_CONFIG:
5554 case IWM_PHY_CONTEXT_CMD:
5555 case IWM_BINDING_CONTEXT_CMD:
5556 case IWM_TIME_EVENT_CMD:
5557 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5558 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5559 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5560 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5561 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5562 case IWM_REPLY_BEACON_FILTERING_CMD:
5563 case IWM_MAC_PM_POWER_TABLE:
5564 case IWM_TIME_QUOTA_CMD:
5565 case IWM_REMOVE_STA:
5566 case IWM_TXPATH_FLUSH:
5567 case IWM_LQ_CMD:
5568 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5569 IWM_FW_PAGING_BLOCK_CMD):
5570 case IWM_BT_CONFIG:
5571 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5572 cresp = (void *)pkt->data;
5573 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5574 memcpy(sc->sc_cmd_resp,
5575 pkt, sizeof(*pkt)+sizeof(*cresp));
5576 }
5577 break;
5578
5579 /* ignore */
5580 case IWM_PHY_DB_CMD:
5581 break;
5582
5583 case IWM_INIT_COMPLETE_NOTIF:
5584 break;
5585
5586 case IWM_SCAN_OFFLOAD_COMPLETE:
5587 iwm_rx_lmac_scan_complete_notif(sc, pkt);
5588 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5589 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5590 ieee80211_runtask(ic, &sc->sc_es_task);
5591 }
5592 break;
5593
5594 case IWM_SCAN_ITERATION_COMPLETE: {
5595 break;
5596 }
5597
5598 case IWM_SCAN_COMPLETE_UMAC:
5599 iwm_rx_umac_scan_complete_notif(sc, pkt);
5600 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5601 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5602 ieee80211_runtask(ic, &sc->sc_es_task);
5603 }
5604 break;
5605
5606 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5607 #ifdef IWM_DEBUG
5608 struct iwm_umac_scan_iter_complete_notif *notif;
5609 notif = (void *)pkt->data;
5610
5611 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5612 "complete, status=0x%x, %d channels scanned\n",
5613 notif->status, notif->scanned_channels);
5614 #endif
5615 break;
5616 }
5617
5618 case IWM_REPLY_ERROR: {
5619 struct iwm_error_resp *resp;
5620 resp = (void *)pkt->data;
5621
5622 device_printf(sc->sc_dev,
5623 "firmware error 0x%x, cmd 0x%x\n",
5624 le32toh(resp->error_type),
5625 resp->cmd_id);
5626 break;
5627 }
5628
5629 case IWM_TIME_EVENT_NOTIFICATION:
5630 iwm_rx_time_event_notif(sc, pkt);
5631 break;
5632
5633 /*
5634 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5635 * messages. Just ignore them for now.
5636 */
5637 case IWM_DEBUG_LOG_MSG:
5638 break;
5639
5640 case IWM_MCAST_FILTER_CMD:
5641 break;
5642
5643 case IWM_SCD_QUEUE_CFG: {
5644 #ifdef IWM_DEBUG
5645 struct iwm_scd_txq_cfg_rsp *rsp;
5646 rsp = (void *)pkt->data;
5647
5648 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5649 "queue cfg token=0x%x sta_id=%d "
5650 "tid=%d scd_queue=%d\n",
5651 rsp->token, rsp->sta_id, rsp->tid,
5652 rsp->scd_queue);
5653 #endif
5654 break;
5655 }
5656
5657 default:
5658 device_printf(sc->sc_dev,
5659 "code %x, frame %d/%d %x unhandled\n",
5660 code, qid & ~0x80, idx, pkt->len_n_flags);
5661 break;
5662 }
5663
5664 /*
5665 * Why test bit 0x80? The Linux driver:
5666 *
5667 * There is one exception: uCode sets bit 15 when it
5668 * originates the response/notification, i.e. when the
5669 * response/notification is not a direct response to a
5670 * command sent by the driver. For example, uCode issues
5671 * IWM_REPLY_RX when it sends a received frame to the driver;
5672 * it is not a direct response to any driver command.
5673 *
5674 * Ok, so since when is 7 == 15? Well, the Linux driver
5675 * uses a slightly different format for pkt->hdr, and "qid"
5676 * is actually the upper byte of a two-byte field.
5677 */
5678 if (!(qid & (1 << 7)))
5679 iwm_cmd_done(sc, pkt);
5680
5681 offset = nextoff;
5682 }
5683 if (stolen)
5684 m_freem(m);
5685 #undef HAVEROOM
5686 }
5687
5688 /*
5689 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5690 * Basic structure from if_iwn
5691 */
5692 static void
iwm_notif_intr(struct iwm_softc * sc)5693 iwm_notif_intr(struct iwm_softc *sc)
5694 {
5695 int count;
5696 uint32_t wreg;
5697 uint16_t hw;
5698
5699 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5700 BUS_DMASYNC_POSTREAD);
5701
5702 if (sc->cfg->mqrx_supported) {
5703 count = IWM_RX_MQ_RING_COUNT;
5704 wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
5705 } else {
5706 count = IWM_RX_LEGACY_RING_COUNT;
5707 wreg = IWM_FH_RSCSR_CHNL0_WPTR;
5708 }
5709
5710 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5711
5712 /*
5713 * Process responses
5714 */
5715 while (sc->rxq.cur != hw) {
5716 struct iwm_rx_ring *ring = &sc->rxq;
5717 struct iwm_rx_data *data = &ring->data[ring->cur];
5718
5719 bus_dmamap_sync(ring->data_dmat, data->map,
5720 BUS_DMASYNC_POSTREAD);
5721
5722 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5723 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5724 iwm_handle_rxb(sc, data->m);
5725
5726 ring->cur = (ring->cur + 1) % count;
5727 }
5728
5729 /*
5730 * Tell the firmware that it can reuse the ring entries that
5731 * we have just processed.
5732 * Seems like the hardware gets upset unless we align
5733 * the write by 8??
5734 */
5735 hw = (hw == 0) ? count - 1 : hw - 1;
5736 IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5737 }
5738
5739 static void
iwm_intr(void * arg)5740 iwm_intr(void *arg)
5741 {
5742 struct iwm_softc *sc = arg;
5743 int handled = 0;
5744 int r1, r2;
5745 int isperiodic = 0;
5746
5747 #if defined(__DragonFly__)
5748 if (sc->sc_mem == NULL) {
5749 kprintf("iwm_intr: detached\n");
5750 return;
5751 }
5752 #endif
5753
5754 IWM_LOCK(sc);
5755 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5756
5757 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5758 uint32_t *ict = sc->ict_dma.vaddr;
5759 int tmp;
5760
5761 tmp = htole32(ict[sc->ict_cur]);
5762 if (!tmp)
5763 goto out_ena;
5764
5765 /*
5766 * ok, there was something. keep plowing until we have all.
5767 */
5768 r1 = r2 = 0;
5769 while (tmp) {
5770 r1 |= tmp;
5771 ict[sc->ict_cur] = 0;
5772 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5773 tmp = htole32(ict[sc->ict_cur]);
5774 }
5775
5776 /* this is where the fun begins. don't ask */
5777 if (r1 == 0xffffffff)
5778 r1 = 0;
5779
5780 /* i am not expected to understand this */
5781 if (r1 & 0xc0000)
5782 r1 |= 0x8000;
5783 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5784 } else {
5785 r1 = IWM_READ(sc, IWM_CSR_INT);
5786 /* "hardware gone" (where, fishing?) */
5787 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5788 goto out;
5789 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5790 }
5791 if (r1 == 0 && r2 == 0) {
5792 goto out_ena;
5793 }
5794
5795 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5796
5797 /* Safely ignore these bits for debug checks below */
5798 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5799
5800 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5801 int i;
5802 struct ieee80211com *ic = &sc->sc_ic;
5803 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5804
5805 #ifdef IWM_DEBUG
5806 iwm_nic_error(sc);
5807 #endif
5808 /* Dump driver status (TX and RX rings) while we're here. */
5809 device_printf(sc->sc_dev, "driver status:\n");
5810 for (i = 0; i < IWM_MAX_QUEUES; i++) {
5811 struct iwm_tx_ring *ring = &sc->txq[i];
5812 device_printf(sc->sc_dev,
5813 " tx ring %2d: qid=%-2d cur=%-3d "
5814 "queued=%-3d\n",
5815 i, ring->qid, ring->cur, ring->queued);
5816 }
5817 device_printf(sc->sc_dev,
5818 " rx ring: cur=%d\n", sc->rxq.cur);
5819 device_printf(sc->sc_dev,
5820 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5821
5822 /* Reset our firmware state tracking. */
5823 sc->sc_firmware_state = 0;
5824 /* Don't stop the device; just do a VAP restart */
5825 IWM_UNLOCK(sc);
5826
5827 if (vap == NULL) {
5828 kprintf("%s: null vap\n", __func__);
5829 return;
5830 }
5831
5832 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5833 "restarting\n", __func__, vap->iv_state);
5834
5835 ieee80211_restart_all(ic);
5836 return;
5837 }
5838
5839 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5840 handled |= IWM_CSR_INT_BIT_HW_ERR;
5841 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5842 iwm_stop(sc);
5843 goto out;
5844 }
5845
5846 /* firmware chunk loaded */
5847 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5848 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5849 handled |= IWM_CSR_INT_BIT_FH_TX;
5850 sc->sc_fw_chunk_done = 1;
5851 wakeup(&sc->sc_fw);
5852 }
5853
5854 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5855 handled |= IWM_CSR_INT_BIT_RF_KILL;
5856 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5857 }
5858
5859 /*
5860 * The Linux driver uses periodic interrupts to avoid races.
5861 * We cargo-cult like it's going out of fashion.
5862 */
5863 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5864 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5865 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5866 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5867 IWM_WRITE_1(sc,
5868 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5869 isperiodic = 1;
5870 }
5871
5872 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5873 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5874 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5875
5876 iwm_notif_intr(sc);
5877
5878 /* enable periodic interrupt, see above */
5879 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5880 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5881 IWM_CSR_INT_PERIODIC_ENA);
5882 }
5883
5884 if (__predict_false(r1 & ~handled))
5885 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5886 "%s: unhandled interrupts: %x\n", __func__, r1);
5887 out_ena:
5888 iwm_restore_interrupts(sc);
5889 out:
5890 IWM_UNLOCK(sc);
5891 return;
5892 }
5893
5894 /*
5895 * Autoconf glue-sniffing
5896 */
5897 #define PCI_VENDOR_INTEL 0x8086
5898 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5899 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5900 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5901 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5902 #define PCI_PRODUCT_INTEL_WL_3168_1 0x24fb
5903 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5904 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5905 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5906 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5907 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5908 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5909 #define PCI_PRODUCT_INTEL_WL_8265_1 0x24fd
5910 #define PCI_PRODUCT_INTEL_WL_9560_1 0x9df0
5911 #define PCI_PRODUCT_INTEL_WL_9560_2 0xa370
5912 #define PCI_PRODUCT_INTEL_WL_9560_3 0x31dc
5913 #define PCI_PRODUCT_INTEL_WL_9260_1 0x2526
5914
5915 static const struct iwm_devices {
5916 uint16_t device;
5917 const struct iwm_cfg *cfg;
5918 } iwm_devices[] = {
5919 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5920 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5921 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5922 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5923 { PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5924 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5925 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5926 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5927 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5928 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5929 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5930 { PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5931 { PCI_PRODUCT_INTEL_WL_9560_1, &iwm9560_cfg },
5932 { PCI_PRODUCT_INTEL_WL_9560_2, &iwm9560_cfg },
5933 { PCI_PRODUCT_INTEL_WL_9560_3, &iwm9560_cfg },
5934 { PCI_PRODUCT_INTEL_WL_9260_1, &iwm9260_cfg },
5935 };
5936
5937 static int
iwm_probe(device_t dev)5938 iwm_probe(device_t dev)
5939 {
5940 int i;
5941
5942 for (i = 0; i < nitems(iwm_devices); i++) {
5943 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5944 pci_get_device(dev) == iwm_devices[i].device) {
5945 device_set_desc(dev, iwm_devices[i].cfg->name);
5946 return (BUS_PROBE_DEFAULT);
5947 }
5948 }
5949
5950 return (ENXIO);
5951 }
5952
5953 static int
iwm_dev_check(device_t dev)5954 iwm_dev_check(device_t dev)
5955 {
5956 struct iwm_softc *sc;
5957 uint16_t devid;
5958 int i;
5959
5960 sc = device_get_softc(dev);
5961
5962 devid = pci_get_device(dev);
5963 for (i = 0; i < nitems(iwm_devices); i++) {
5964 if (iwm_devices[i].device == devid) {
5965 sc->cfg = iwm_devices[i].cfg;
5966 return (0);
5967 }
5968 }
5969 device_printf(dev, "unknown adapter type\n");
5970 return ENXIO;
5971 }
5972
5973 /* PCI registers */
5974 #define PCI_CFG_RETRY_TIMEOUT 0x041
5975
5976 static int
iwm_pci_attach(device_t dev)5977 iwm_pci_attach(device_t dev)
5978 {
5979 struct iwm_softc *sc;
5980 int count, error, rid;
5981 uint16_t reg;
5982 #if defined(__DragonFly__)
5983 int irq_flags;
5984 #endif
5985
5986 sc = device_get_softc(dev);
5987
5988 /* We disable the RETRY_TIMEOUT register (0x41) to keep
5989 * PCI Tx retries from interfering with C3 CPU state */
5990 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5991
5992 /* Enable bus-mastering and hardware bug workaround. */
5993 pci_enable_busmaster(dev);
5994 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5995 /* if !MSI */
5996 if (reg & PCIM_STATUS_INTxSTATE) {
5997 reg &= ~PCIM_STATUS_INTxSTATE;
5998 }
5999 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
6000
6001 rid = PCIR_BAR(0);
6002 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
6003 RF_ACTIVE);
6004 if (sc->sc_mem == NULL) {
6005 device_printf(sc->sc_dev, "can't map mem space\n");
6006 return (ENXIO);
6007 }
6008 sc->sc_st = rman_get_bustag(sc->sc_mem);
6009 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
6010
6011 /* Install interrupt handler. */
6012 count = 1;
6013 rid = 0;
6014 #if defined(__DragonFly__)
6015 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
6016 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
6017 #else
6018 if (pci_alloc_msi(dev, &count) == 0)
6019 rid = 1;
6020 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
6021 (rid != 0 ? 0 : RF_SHAREABLE));
6022 #endif
6023 if (sc->sc_irq == NULL) {
6024 device_printf(dev, "can't map interrupt\n");
6025 return (ENXIO);
6026 }
6027 #if defined(__DragonFly__)
6028 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
6029 iwm_intr, sc, &sc->sc_ih,
6030 &wlan_global_serializer);
6031 #else
6032 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
6033 NULL, iwm_intr, sc, &sc->sc_ih);
6034 #endif
6035 if (error != 0) {
6036 device_printf(dev, "can't establish interrupt");
6037 #if defined(__DragonFly__)
6038 pci_release_msi(dev);
6039 #endif
6040 return (error);
6041 }
6042 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
6043
6044 return (0);
6045 }
6046
6047 static void
iwm_pci_detach(device_t dev)6048 iwm_pci_detach(device_t dev)
6049 {
6050 struct iwm_softc *sc = device_get_softc(dev);
6051
6052 if (sc->sc_irq != NULL) {
6053 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6054 bus_release_resource(dev, SYS_RES_IRQ,
6055 rman_get_rid(sc->sc_irq), sc->sc_irq);
6056 pci_release_msi(dev);
6057 #if defined(__DragonFly__)
6058 sc->sc_irq = NULL;
6059 #endif
6060 }
6061 if (sc->sc_mem != NULL) {
6062 bus_release_resource(dev, SYS_RES_MEMORY,
6063 rman_get_rid(sc->sc_mem), sc->sc_mem);
6064 #if defined(__DragonFly__)
6065 sc->sc_mem = NULL;
6066 #endif
6067 }
6068 }
6069
6070 static int
iwm_attach(device_t dev)6071 iwm_attach(device_t dev)
6072 {
6073 struct iwm_softc *sc = device_get_softc(dev);
6074 struct ieee80211com *ic = &sc->sc_ic;
6075 int error;
6076 int txq_i, i;
6077
6078 sc->sc_dev = dev;
6079 sc->sc_attached = 1;
6080 IWM_LOCK_INIT(sc);
6081 mbufq_init(&sc->sc_snd, ifqmaxlen);
6082 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
6083 callout_init_lk(&sc->sc_led_blink_to, &sc->sc_lk);
6084 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6085 TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc);
6086
6087 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
6088 taskqueue_thread_enqueue, &sc->sc_tq);
6089 error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1, "iwm_taskq");
6090 if (error != 0) {
6091 device_printf(dev, "can't start taskq thread, error %d\n",
6092 error);
6093 goto fail;
6094 }
6095
6096 error = iwm_dev_check(dev);
6097 if (error != 0)
6098 goto fail;
6099
6100 sc->sc_notif_wait = iwm_notification_wait_init(sc);
6101 if (sc->sc_notif_wait == NULL) {
6102 device_printf(dev, "failed to init notification wait struct\n");
6103 goto fail;
6104 }
6105
6106 sc->sf_state = IWM_SF_UNINIT;
6107
6108 /* Init phy db */
6109 sc->sc_phy_db = iwm_phy_db_init(sc);
6110 if (!sc->sc_phy_db) {
6111 device_printf(dev, "Cannot init phy_db\n");
6112 goto fail;
6113 }
6114
6115 /* Set EBS as successful as long as not stated otherwise by the FW. */
6116 sc->last_ebs_successful = TRUE;
6117
6118 /* PCI attach */
6119 error = iwm_pci_attach(dev);
6120 if (error != 0)
6121 goto fail;
6122
6123 sc->sc_wantresp = -1;
6124
6125 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6126 /*
6127 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6128 * changed, and now the revision step also includes bit 0-1 (no more
6129 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6130 * in the old format.
6131 */
6132 if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
6133 int ret;
6134 uint32_t hw_step;
6135
6136 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6137 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6138
6139 if (iwm_prepare_card_hw(sc) != 0) {
6140 device_printf(dev, "could not initialize hardware\n");
6141 goto fail;
6142 }
6143
6144 /*
6145 * In order to recognize C step the driver should read the
6146 * chip version id located at the AUX bus MISC address.
6147 */
6148 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6149 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6150 DELAY(2);
6151
6152 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6153 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6154 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6155 25000);
6156 if (!ret) {
6157 device_printf(sc->sc_dev,
6158 "Failed to wake up the nic\n");
6159 goto fail;
6160 }
6161
6162 if (iwm_nic_lock(sc)) {
6163 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6164 hw_step |= IWM_ENABLE_WFPM;
6165 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6166 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6167 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6168 if (hw_step == 0x3)
6169 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6170 (IWM_SILICON_C_STEP << 2);
6171 iwm_nic_unlock(sc);
6172 } else {
6173 device_printf(sc->sc_dev, "Failed to lock the nic\n");
6174 goto fail;
6175 }
6176 }
6177
6178 /* special-case 7265D, it has the same PCI IDs. */
6179 if (sc->cfg == &iwm7265_cfg &&
6180 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6181 sc->cfg = &iwm7265d_cfg;
6182 }
6183
6184 /* Allocate DMA memory for firmware transfers. */
6185 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6186 device_printf(dev, "could not allocate memory for firmware\n");
6187 goto fail;
6188 }
6189
6190 /* Allocate "Keep Warm" page. */
6191 if ((error = iwm_alloc_kw(sc)) != 0) {
6192 device_printf(dev, "could not allocate keep warm page\n");
6193 goto fail;
6194 }
6195
6196 /* We use ICT interrupts */
6197 if ((error = iwm_alloc_ict(sc)) != 0) {
6198 device_printf(dev, "could not allocate ICT table\n");
6199 goto fail;
6200 }
6201
6202 /* Allocate TX scheduler "rings". */
6203 if ((error = iwm_alloc_sched(sc)) != 0) {
6204 device_printf(dev, "could not allocate TX scheduler rings\n");
6205 goto fail;
6206 }
6207
6208 /* Allocate TX rings */
6209 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6210 if ((error = iwm_alloc_tx_ring(sc,
6211 &sc->txq[txq_i], txq_i)) != 0) {
6212 device_printf(dev,
6213 "could not allocate TX ring %d\n",
6214 txq_i);
6215 goto fail;
6216 }
6217 }
6218
6219 /* Allocate RX ring. */
6220 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6221 device_printf(dev, "could not allocate RX ring\n");
6222 goto fail;
6223 }
6224
6225 /* Clear pending interrupts. */
6226 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6227
6228 ic->ic_softc = sc;
6229 ic->ic_name = device_get_nameunit(sc->sc_dev);
6230 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6231 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6232
6233 /* Set device capabilities. */
6234 ic->ic_caps =
6235 IEEE80211_C_STA |
6236 IEEE80211_C_WPA | /* WPA/RSN */
6237 IEEE80211_C_WME |
6238 IEEE80211_C_PMGT |
6239 IEEE80211_C_SHSLOT | /* short slot time supported */
6240 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6241 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6242 ;
6243 /* Advertise full-offload scanning */
6244 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6245 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6246 sc->sc_phyctxt[i].id = i;
6247 sc->sc_phyctxt[i].color = 0;
6248 sc->sc_phyctxt[i].ref = 0;
6249 sc->sc_phyctxt[i].channel = NULL;
6250 }
6251
6252 /* Default noise floor */
6253 sc->sc_noise = -96;
6254
6255 /* Max RSSI */
6256 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6257
6258 #ifdef IWM_DEBUG
6259 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6260 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6261 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6262 #endif
6263
6264 error = iwm_read_firmware(sc);
6265 if (error) {
6266 goto fail;
6267 } else if (sc->sc_fw.fw_fp == NULL) {
6268 /*
6269 * XXX Add a solution for properly deferring firmware load
6270 * during bootup.
6271 */
6272 goto fail;
6273 } else {
6274 sc->sc_preinit_hook.ich_func = iwm_preinit;
6275 sc->sc_preinit_hook.ich_arg = sc;
6276 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6277 device_printf(dev,
6278 "config_intrhook_establish failed\n");
6279 goto fail;
6280 }
6281 }
6282
6283 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6284 "<-%s\n", __func__);
6285
6286 return 0;
6287
6288 /* Free allocated memory if something failed during attachment. */
6289 fail:
6290 iwm_detach_local(sc, 0);
6291
6292 return ENXIO;
6293 }
6294
6295 static int
iwm_is_valid_ether_addr(uint8_t * addr)6296 iwm_is_valid_ether_addr(uint8_t *addr)
6297 {
6298 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6299
6300 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6301 return (FALSE);
6302
6303 return (TRUE);
6304 }
6305
6306 static int
iwm_wme_update(struct ieee80211com * ic)6307 iwm_wme_update(struct ieee80211com *ic)
6308 {
6309 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6310 struct iwm_softc *sc = ic->ic_softc;
6311 #if !defined(__DragonFly__)
6312 struct chanAccParams chp;
6313 #endif
6314 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6315 struct iwm_vap *ivp = IWM_VAP(vap);
6316 struct iwm_node *in;
6317 struct wmeParams tmp[WME_NUM_AC];
6318 int aci, error;
6319
6320 if (vap == NULL)
6321 return (0);
6322
6323 #if !defined(__DragonFly__)
6324 ieee80211_wme_ic_getparams(ic, &chp);
6325
6326 IEEE80211_LOCK(ic);
6327 for (aci = 0; aci < WME_NUM_AC; aci++)
6328 tmp[aci] = chp.cap_wmeParams[aci];
6329 IEEE80211_UNLOCK(ic);
6330 #else
6331 IEEE80211_LOCK(ic);
6332 for (aci = 0; aci < WME_NUM_AC; aci++)
6333 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6334 IEEE80211_UNLOCK(ic);
6335 #endif
6336
6337 IWM_LOCK(sc);
6338 for (aci = 0; aci < WME_NUM_AC; aci++) {
6339 const struct wmeParams *ac = &tmp[aci];
6340 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6341 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6342 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6343 ivp->queue_params[aci].edca_txop =
6344 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6345 }
6346 ivp->have_wme = TRUE;
6347 if (ivp->is_uploaded && vap->iv_bss != NULL) {
6348 in = IWM_NODE(vap->iv_bss);
6349 if (in->in_assoc) {
6350 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6351 device_printf(sc->sc_dev,
6352 "%s: failed to update MAC\n", __func__);
6353 }
6354 }
6355 }
6356 IWM_UNLOCK(sc);
6357
6358 return (0);
6359 #undef IWM_EXP2
6360 }
6361
6362 static void
iwm_preinit(void * arg)6363 iwm_preinit(void *arg)
6364 {
6365 struct iwm_softc *sc = arg;
6366 device_t dev = sc->sc_dev;
6367 struct ieee80211com *ic = &sc->sc_ic;
6368 int error;
6369
6370 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6371 "->%s\n", __func__);
6372
6373 IWM_LOCK(sc);
6374 if ((error = iwm_start_hw(sc)) != 0) {
6375 device_printf(dev, "could not initialize hardware\n");
6376 IWM_UNLOCK(sc);
6377 goto fail;
6378 }
6379
6380 error = iwm_run_init_ucode(sc, 1);
6381 iwm_stop_device(sc);
6382 if (error) {
6383 IWM_UNLOCK(sc);
6384 goto fail;
6385 }
6386 device_printf(dev,
6387 "hw rev 0x%x, fw ver %s, address %s\n",
6388 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6389 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6390
6391 /* not all hardware can do 5GHz band */
6392 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6393 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6394 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6395 IWM_UNLOCK(sc);
6396
6397 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6398 ic->ic_channels);
6399
6400 /*
6401 * At this point we've committed - if we fail to do setup,
6402 * we now also have to tear down the net80211 state.
6403 */
6404 ieee80211_ifattach(ic);
6405 ic->ic_vap_create = iwm_vap_create;
6406 ic->ic_vap_delete = iwm_vap_delete;
6407 ic->ic_raw_xmit = iwm_raw_xmit;
6408 ic->ic_node_alloc = iwm_node_alloc;
6409 ic->ic_scan_start = iwm_scan_start;
6410 ic->ic_scan_end = iwm_scan_end;
6411 ic->ic_update_mcast = iwm_update_mcast;
6412 ic->ic_getradiocaps = iwm_init_channel_map;
6413 ic->ic_set_channel = iwm_set_channel;
6414 ic->ic_scan_curchan = iwm_scan_curchan;
6415 ic->ic_scan_mindwell = iwm_scan_mindwell;
6416 ic->ic_wme.wme_update = iwm_wme_update;
6417 ic->ic_parent = iwm_parent;
6418 ic->ic_transmit = iwm_transmit;
6419 iwm_radiotap_attach(sc);
6420 if (bootverbose)
6421 ieee80211_announce(ic);
6422
6423 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6424 "<-%s\n", __func__);
6425 config_intrhook_disestablish(&sc->sc_preinit_hook);
6426
6427 return;
6428 fail:
6429 config_intrhook_disestablish(&sc->sc_preinit_hook);
6430 iwm_detach_local(sc, 0);
6431 }
6432
6433 /*
6434 * Attach the interface to 802.11 radiotap.
6435 */
6436 static void
iwm_radiotap_attach(struct iwm_softc * sc)6437 iwm_radiotap_attach(struct iwm_softc *sc)
6438 {
6439 struct ieee80211com *ic = &sc->sc_ic;
6440
6441 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6442 "->%s begin\n", __func__);
6443 ieee80211_radiotap_attach(ic,
6444 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6445 IWM_TX_RADIOTAP_PRESENT,
6446 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6447 IWM_RX_RADIOTAP_PRESENT);
6448 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6449 "->%s end\n", __func__);
6450 }
6451
6452 static struct ieee80211vap *
iwm_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])6453 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6454 enum ieee80211_opmode opmode, int flags,
6455 const uint8_t bssid[IEEE80211_ADDR_LEN],
6456 const uint8_t mac[IEEE80211_ADDR_LEN])
6457 {
6458 struct iwm_vap *ivp;
6459 struct ieee80211vap *vap;
6460
6461 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6462 return NULL;
6463 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6464 vap = &ivp->iv_vap;
6465 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6466 vap->iv_bmissthreshold = 10; /* override default */
6467 /* Override with driver methods. */
6468 ivp->iv_newstate = vap->iv_newstate;
6469 vap->iv_newstate = iwm_newstate;
6470
6471 ivp->id = IWM_DEFAULT_MACID;
6472 ivp->color = IWM_DEFAULT_COLOR;
6473
6474 ivp->have_wme = FALSE;
6475 ivp->ps_disabled = FALSE;
6476
6477 ieee80211_ratectl_init(vap);
6478 /* Complete setup. */
6479 ieee80211_vap_attach(vap, ieee80211_media_change,
6480 ieee80211_media_status, mac);
6481 ic->ic_opmode = opmode;
6482
6483 return vap;
6484 }
6485
6486 static void
iwm_vap_delete(struct ieee80211vap * vap)6487 iwm_vap_delete(struct ieee80211vap *vap)
6488 {
6489 struct iwm_vap *ivp = IWM_VAP(vap);
6490
6491 ieee80211_ratectl_deinit(vap);
6492 ieee80211_vap_detach(vap);
6493 kfree(ivp, M_80211_VAP);
6494 }
6495
6496 static void
iwm_xmit_queue_drain(struct iwm_softc * sc)6497 iwm_xmit_queue_drain(struct iwm_softc *sc)
6498 {
6499 struct mbuf *m;
6500 struct ieee80211_node *ni;
6501
6502 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6503 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6504 ieee80211_free_node(ni);
6505 m_freem(m);
6506 }
6507 }
6508
6509 static void
iwm_scan_start(struct ieee80211com * ic)6510 iwm_scan_start(struct ieee80211com *ic)
6511 {
6512 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6513 struct iwm_softc *sc = ic->ic_softc;
6514 int error;
6515
6516 IWM_LOCK(sc);
6517 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6518 /* This should not be possible */
6519 device_printf(sc->sc_dev,
6520 "%s: Previous scan not completed yet\n", __func__);
6521 }
6522 if (iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6523 error = iwm_umac_scan(sc);
6524 else
6525 error = iwm_lmac_scan(sc);
6526 if (error != 0) {
6527 device_printf(sc->sc_dev, "could not initiate scan\n");
6528 IWM_UNLOCK(sc);
6529 ieee80211_cancel_scan(vap);
6530 } else {
6531 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6532 iwm_led_blink_start(sc);
6533 IWM_UNLOCK(sc);
6534 }
6535 }
6536
6537 static void
iwm_scan_end(struct ieee80211com * ic)6538 iwm_scan_end(struct ieee80211com *ic)
6539 {
6540 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6541 struct iwm_softc *sc = ic->ic_softc;
6542
6543 IWM_LOCK(sc);
6544 iwm_led_blink_stop(sc);
6545 if (vap->iv_state == IEEE80211_S_RUN)
6546 iwm_led_enable(sc);
6547 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6548 /*
6549 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6550 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6551 * taskqueue.
6552 */
6553 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6554 iwm_scan_stop_wait(sc);
6555 }
6556 IWM_UNLOCK(sc);
6557
6558 /*
6559 * Make sure we don't race, if sc_es_task is still enqueued here.
6560 * This is to make sure that it won't call ieee80211_scan_done
6561 * when we have already started the next scan.
6562 */
6563 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6564 }
6565
6566 static void
iwm_update_mcast(struct ieee80211com * ic)6567 iwm_update_mcast(struct ieee80211com *ic)
6568 {
6569 }
6570
6571 static void
iwm_set_channel(struct ieee80211com * ic)6572 iwm_set_channel(struct ieee80211com *ic)
6573 {
6574 }
6575
6576 static void
iwm_scan_curchan(struct ieee80211_scan_state * ss,unsigned long maxdwell)6577 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6578 {
6579 }
6580
6581 static void
iwm_scan_mindwell(struct ieee80211_scan_state * ss)6582 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6583 {
6584 }
6585
6586 void
iwm_init_task(void * arg1)6587 iwm_init_task(void *arg1)
6588 {
6589 struct iwm_softc *sc = arg1;
6590
6591 IWM_LOCK(sc);
6592 while (sc->sc_flags & IWM_FLAG_BUSY)
6593 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6594 sc->sc_flags |= IWM_FLAG_BUSY;
6595 iwm_stop(sc);
6596 if (sc->sc_ic.ic_nrunning > 0)
6597 iwm_init(sc);
6598 sc->sc_flags &= ~IWM_FLAG_BUSY;
6599 wakeup(&sc->sc_flags);
6600 IWM_UNLOCK(sc);
6601 }
6602
6603 static int
iwm_resume(device_t dev)6604 iwm_resume(device_t dev)
6605 {
6606 struct iwm_softc *sc = device_get_softc(dev);
6607 int do_reinit = 0;
6608
6609 /*
6610 * We disable the RETRY_TIMEOUT register (0x41) to keep
6611 * PCI Tx retries from interfering with C3 CPU state.
6612 */
6613 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6614
6615 if (!sc->sc_attached)
6616 return 0;
6617
6618 iwm_init_task(device_get_softc(dev));
6619
6620 IWM_LOCK(sc);
6621 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6622 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6623 do_reinit = 1;
6624 }
6625 IWM_UNLOCK(sc);
6626
6627 if (do_reinit)
6628 ieee80211_resume_all(&sc->sc_ic);
6629
6630 return 0;
6631 }
6632
6633 static int
iwm_suspend(device_t dev)6634 iwm_suspend(device_t dev)
6635 {
6636 int do_stop = 0;
6637 struct iwm_softc *sc = device_get_softc(dev);
6638
6639 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6640
6641 if (!sc->sc_attached)
6642 return (0);
6643
6644 ieee80211_suspend_all(&sc->sc_ic);
6645
6646 if (do_stop) {
6647 IWM_LOCK(sc);
6648 iwm_stop(sc);
6649 sc->sc_flags |= IWM_FLAG_SCANNING;
6650 IWM_UNLOCK(sc);
6651 }
6652
6653 return (0);
6654 }
6655
6656 static int
iwm_detach_local(struct iwm_softc * sc,int do_net80211)6657 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6658 {
6659 struct iwm_fw_info *fw = &sc->sc_fw;
6660 device_t dev = sc->sc_dev;
6661 int i;
6662
6663 if (!sc->sc_attached)
6664 return 0;
6665 sc->sc_attached = 0;
6666 if (do_net80211) {
6667 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6668 }
6669 iwm_stop_device(sc);
6670 #if defined(__DragonFly__)
6671 /* doesn't exist for DFly, DFly drains tasks on free */
6672 #else
6673 taskqueue_drain_all(sc->sc_tq);
6674 #endif
6675 taskqueue_free(sc->sc_tq);
6676 if (do_net80211) {
6677 IWM_LOCK(sc);
6678 iwm_xmit_queue_drain(sc);
6679 IWM_UNLOCK(sc);
6680 ieee80211_ifdetach(&sc->sc_ic);
6681 }
6682 callout_drain(&sc->sc_led_blink_to);
6683 callout_drain(&sc->sc_watchdog_to);
6684
6685 iwm_phy_db_free(sc->sc_phy_db);
6686 sc->sc_phy_db = NULL;
6687
6688 iwm_free_nvm_data(sc->nvm_data);
6689
6690 /* Free descriptor rings */
6691 iwm_free_rx_ring(sc, &sc->rxq);
6692 for (i = 0; i < nitems(sc->txq); i++)
6693 iwm_free_tx_ring(sc, &sc->txq[i]);
6694
6695 /* Free firmware */
6696 if (fw->fw_fp != NULL)
6697 iwm_fw_info_free(fw);
6698
6699 /* Free scheduler */
6700 iwm_dma_contig_free(&sc->sched_dma);
6701 iwm_dma_contig_free(&sc->ict_dma);
6702 iwm_dma_contig_free(&sc->kw_dma);
6703 iwm_dma_contig_free(&sc->fw_dma);
6704
6705 iwm_free_fw_paging(sc);
6706
6707 /* Finished with the hardware - detach things */
6708 iwm_pci_detach(dev);
6709
6710 if (sc->sc_notif_wait != NULL) {
6711 iwm_notification_wait_free(sc->sc_notif_wait);
6712 sc->sc_notif_wait = NULL;
6713 }
6714
6715 IWM_LOCK_DESTROY(sc);
6716
6717 return (0);
6718 }
6719
6720 static int
iwm_detach(device_t dev)6721 iwm_detach(device_t dev)
6722 {
6723 struct iwm_softc *sc = device_get_softc(dev);
6724
6725 return (iwm_detach_local(sc, 1));
6726 }
6727
6728 static device_method_t iwm_pci_methods[] = {
6729 /* Device interface */
6730 DEVMETHOD(device_probe, iwm_probe),
6731 DEVMETHOD(device_attach, iwm_attach),
6732 DEVMETHOD(device_detach, iwm_detach),
6733 DEVMETHOD(device_suspend, iwm_suspend),
6734 DEVMETHOD(device_resume, iwm_resume),
6735
6736 DEVMETHOD_END
6737 };
6738
6739 static driver_t iwm_pci_driver = {
6740 "iwm",
6741 iwm_pci_methods,
6742 sizeof (struct iwm_softc)
6743 };
6744
6745 static devclass_t iwm_devclass;
6746
6747 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6748 #if !defined(__DragonFly__)
6749 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6750 iwm_devices, nitems(iwm_devices));
6751 #endif
6752 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6753 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6754 MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6755