1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 2014 genua mbh <info@genua.de> 5 * Copyright (c) 2014 Fixup Software Ltd. 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Based on BSD-licensed source modules in the Linux iwlwifi driver, 22 * which were used as the reference documentation for this implementation. 23 * 24 * Driver version we are currently based off of is 25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd) 26 * 27 *********************************************************************** 28 * 29 * This file is provided under a dual BSD/GPLv2 license. When using or 30 * redistributing this file, you may do so under either license. 31 * 32 * GPL LICENSE SUMMARY 33 * 34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. 35 * 36 * This program is free software; you can redistribute it and/or modify 37 * it under the terms of version 2 of the GNU General Public License as 38 * published by the Free Software Foundation. 39 * 40 * This program is distributed in the hope that it will be useful, but 41 * WITHOUT ANY WARRANTY; without even the implied warranty of 42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 43 * General Public License for more details. 44 * 45 * You should have received a copy of the GNU General Public License 46 * along with this program; if not, write to the Free Software 47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 48 * USA 49 * 50 * The full GNU General Public License is included in this distribution 51 * in the file called COPYING. 52 * 53 * Contact Information: 54 * Intel Linux Wireless <ilw@linux.intel.com> 55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 56 * 57 * 58 * BSD LICENSE 59 * 60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. 61 * All rights reserved. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 67 * * Redistributions of source code must retain the above copyright 68 * notice, this list of conditions and the following disclaimer. 69 * * Redistributions in binary form must reproduce the above copyright 70 * notice, this list of conditions and the following disclaimer in 71 * the documentation and/or other materials provided with the 72 * distribution. 73 * * Neither the name Intel Corporation nor the names of its 74 * contributors may be used to endorse or promote products derived 75 * from this software without specific prior written permission. 76 * 77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 88 */ 89 90 /*- 91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 92 * 93 * Permission to use, copy, modify, and distribute this software for any 94 * purpose with or without fee is hereby granted, provided that the above 95 * copyright notice and this permission notice appear in all copies. 96 * 97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 104 */ 105 /* 106 * DragonFly work 107 * 108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD 109 * changes to remove per-device network interface (DragonFly has not 110 * caught up to that yet on the WLAN side). 111 * 112 * Comprehensive list of adjustments for DragonFly not #ifdef'd: 113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT 114 * specifications to M_INTWAIT. We still don't 115 * understand why FreeBSD uses M_NOWAIT for 116 * critical must-not-fail kmalloc()s). 117 * free -> kfree 118 * printf -> kprintf 119 * (bug fix) memset in iwm_reset_rx_ring. 120 * (debug) added several kprintf()s on error 121 * 122 * header file paths (DFly allows localized path specifications). 123 * minor header file differences. 124 * 125 * Comprehensive list of adjustments for DragonFly #ifdef'd: 126 * (safety) added register read-back serialization in iwm_reset_rx_ring(). 127 * packet counters 128 * msleep -> lksleep 129 * mtx -> lk (mtx functions -> lockmgr functions) 130 * callout differences 131 * taskqueue differences 132 * MSI differences 133 * bus_setup_intr() differences 134 * minor PCI config register naming differences 135 */ 136 #include <sys/cdefs.h> 137 __FBSDID("$FreeBSD$"); 138 139 #include <sys/param.h> 140 #include <sys/bus.h> 141 #include <sys/endian.h> 142 #include <sys/firmware.h> 143 #include <sys/kernel.h> 144 #include <sys/malloc.h> 145 #include <sys/mbuf.h> 146 #include <sys/module.h> 147 #include <sys/rman.h> 148 #include <sys/sysctl.h> 149 #include <sys/linker.h> 150 151 #include <machine/endian.h> 152 153 #include <bus/pci/pcivar.h> 154 #include <bus/pci/pcireg.h> 155 156 #include <net/bpf.h> 157 158 #include <net/if.h> 159 #include <net/if_var.h> 160 #include <net/if_arp.h> 161 #include <net/if_dl.h> 162 #include <net/if_media.h> 163 #include <net/if_types.h> 164 165 #include <netinet/in.h> 166 #include <netinet/in_systm.h> 167 #include <netinet/if_ether.h> 168 #include <netinet/ip.h> 169 170 #include <netproto/802_11/ieee80211_var.h> 171 #include <netproto/802_11/ieee80211_regdomain.h> 172 #include <netproto/802_11/ieee80211_ratectl.h> 173 #include <netproto/802_11/ieee80211_radiotap.h> 174 175 #include "if_iwmreg.h" 176 #include "if_iwmvar.h" 177 #include "if_iwm_config.h" 178 #include "if_iwm_debug.h" 179 #include "if_iwm_notif_wait.h" 180 #include "if_iwm_util.h" 181 #include "if_iwm_binding.h" 182 #include "if_iwm_phy_db.h" 183 #include "if_iwm_mac_ctxt.h" 184 #include "if_iwm_phy_ctxt.h" 185 #include "if_iwm_time_event.h" 186 #include "if_iwm_power.h" 187 #include "if_iwm_scan.h" 188 #include "if_iwm_sf.h" 189 #include "if_iwm_sta.h" 190 #include "if_iwm_pcie_trans.h" 191 #include "if_iwm_led.h" 192 #include "if_iwm_fw.h" 193 194 const uint8_t iwm_nvm_channels[] = { 195 /* 2.4 GHz */ 196 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 197 /* 5 GHz */ 198 36, 40, 44, 48, 52, 56, 60, 64, 199 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 200 149, 153, 157, 161, 165 201 }; 202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS, 203 "IWM_NUM_CHANNELS is too small"); 204 205 const uint8_t iwm_nvm_channels_8000[] = { 206 /* 2.4 GHz */ 207 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 208 /* 5 GHz */ 209 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 210 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 211 149, 153, 157, 161, 165, 169, 173, 177, 181 212 }; 213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000, 214 "IWM_NUM_CHANNELS_8000 is too small"); 215 216 #define IWM_NUM_2GHZ_CHANNELS 14 217 #define IWM_N_HW_ADDR_MASK 0xF 218 219 /* 220 * XXX For now, there's simply a fixed set of rate table entries 221 * that are populated. 222 */ 223 const struct iwm_rate { 224 uint8_t rate; 225 uint8_t plcp; 226 } iwm_rates[] = { 227 { 2, IWM_RATE_1M_PLCP }, 228 { 4, IWM_RATE_2M_PLCP }, 229 { 11, IWM_RATE_5M_PLCP }, 230 { 22, IWM_RATE_11M_PLCP }, 231 { 12, IWM_RATE_6M_PLCP }, 232 { 18, IWM_RATE_9M_PLCP }, 233 { 24, IWM_RATE_12M_PLCP }, 234 { 36, IWM_RATE_18M_PLCP }, 235 { 48, IWM_RATE_24M_PLCP }, 236 { 72, IWM_RATE_36M_PLCP }, 237 { 96, IWM_RATE_48M_PLCP }, 238 { 108, IWM_RATE_54M_PLCP }, 239 }; 240 #define IWM_RIDX_CCK 0 241 #define IWM_RIDX_OFDM 4 242 #define IWM_RIDX_MAX (nitems(iwm_rates)-1) 243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM) 244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM) 245 246 struct iwm_nvm_section { 247 uint16_t length; 248 uint8_t *data; 249 }; 250 251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT hz 252 #define IWM_MVM_UCODE_CALIB_TIMEOUT (2*hz) 253 254 struct iwm_mvm_alive_data { 255 int valid; 256 uint32_t scd_base_addr; 257 }; 258 259 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t); 260 static int iwm_firmware_store_section(struct iwm_softc *, 261 enum iwm_ucode_type, 262 const uint8_t *, size_t); 263 static int iwm_set_default_calib(struct iwm_softc *, const void *); 264 static void iwm_fw_info_free(struct iwm_fw_info *); 265 static int iwm_read_firmware(struct iwm_softc *); 266 #if !defined(__DragonFly__) 267 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int); 268 #endif 269 static int iwm_alloc_fwmem(struct iwm_softc *); 270 static int iwm_alloc_sched(struct iwm_softc *); 271 static int iwm_alloc_kw(struct iwm_softc *); 272 static int iwm_alloc_ict(struct iwm_softc *); 273 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 274 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 275 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 276 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, 277 int); 278 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 279 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 280 static void iwm_enable_interrupts(struct iwm_softc *); 281 static void iwm_restore_interrupts(struct iwm_softc *); 282 static void iwm_disable_interrupts(struct iwm_softc *); 283 static void iwm_ict_reset(struct iwm_softc *); 284 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *); 285 static void iwm_stop_device(struct iwm_softc *); 286 static void iwm_mvm_nic_config(struct iwm_softc *); 287 static int iwm_nic_rx_init(struct iwm_softc *); 288 static int iwm_nic_tx_init(struct iwm_softc *); 289 static int iwm_nic_init(struct iwm_softc *); 290 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t); 291 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, 292 uint16_t, uint8_t *, uint16_t *); 293 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *, 294 uint16_t *, uint32_t); 295 static uint32_t iwm_eeprom_channel_flags(uint16_t); 296 static void iwm_add_channel_band(struct iwm_softc *, 297 struct ieee80211_channel[], int, int *, int, size_t, 298 const uint8_t[]); 299 static void iwm_init_channel_map(struct ieee80211com *, int, int *, 300 struct ieee80211_channel[]); 301 static struct iwm_nvm_data * 302 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *, 303 const uint16_t *, const uint16_t *, 304 const uint16_t *, const uint16_t *, 305 const uint16_t *); 306 static void iwm_free_nvm_data(struct iwm_nvm_data *); 307 static void iwm_set_hw_address_family_8000(struct iwm_softc *, 308 struct iwm_nvm_data *, 309 const uint16_t *, 310 const uint16_t *); 311 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *, 312 const uint16_t *); 313 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *); 314 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *, 315 const uint16_t *); 316 static int iwm_get_n_hw_addrs(const struct iwm_softc *, 317 const uint16_t *); 318 static void iwm_set_radio_cfg(const struct iwm_softc *, 319 struct iwm_nvm_data *, uint32_t); 320 static struct iwm_nvm_data * 321 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *); 322 static int iwm_nvm_init(struct iwm_softc *); 323 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t, 324 const struct iwm_fw_desc *); 325 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t, 326 bus_addr_t, uint32_t); 327 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 328 const struct iwm_fw_img *, 329 int, int *); 330 static int iwm_pcie_load_cpu_sections(struct iwm_softc *, 331 const struct iwm_fw_img *, 332 int, int *); 333 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *, 334 const struct iwm_fw_img *); 335 static int iwm_pcie_load_given_ucode(struct iwm_softc *, 336 const struct iwm_fw_img *); 337 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *); 338 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t); 339 static int iwm_send_phy_cfg_cmd(struct iwm_softc *); 340 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *, 341 enum iwm_ucode_type); 342 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int); 343 static int iwm_mvm_config_ltr(struct iwm_softc *sc); 344 static int iwm_rx_addbuf(struct iwm_softc *, int, int); 345 static int iwm_mvm_get_signal_strength(struct iwm_softc *, 346 struct iwm_rx_phy_info *); 347 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *, 348 struct iwm_rx_packet *); 349 static int iwm_get_noise(struct iwm_softc *, 350 const struct iwm_mvm_statistics_rx_non_phy *); 351 static void iwm_mvm_handle_rx_statistics(struct iwm_softc *, 352 struct iwm_rx_packet *); 353 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *, 354 uint32_t, boolean_t); 355 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *, 356 struct iwm_rx_packet *, 357 struct iwm_node *); 358 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *); 359 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *); 360 #if 0 361 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, 362 uint16_t); 363 #endif 364 static uint8_t iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *, 365 struct mbuf *, struct iwm_tx_cmd *); 366 static int iwm_tx(struct iwm_softc *, struct mbuf *, 367 struct ieee80211_node *, int); 368 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *, 369 const struct ieee80211_bpf_params *); 370 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *); 371 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *); 372 static struct ieee80211_node * 373 iwm_node_alloc(struct ieee80211vap *, 374 const uint8_t[IEEE80211_ADDR_LEN]); 375 static uint8_t iwm_rate_from_ucode_rate(uint32_t); 376 static int iwm_rate2ridx(struct iwm_softc *, uint8_t); 377 static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int); 378 static int iwm_media_change(struct ifnet *); 379 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int); 380 static void iwm_endscan_cb(void *, int); 381 static int iwm_send_bt_init_conf(struct iwm_softc *); 382 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *); 383 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *); 384 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *); 385 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t); 386 static int iwm_init_hw(struct iwm_softc *); 387 static void iwm_init(struct iwm_softc *); 388 static void iwm_start(struct iwm_softc *); 389 static void iwm_stop(struct iwm_softc *); 390 static void iwm_watchdog(void *); 391 static void iwm_parent(struct ieee80211com *); 392 #ifdef IWM_DEBUG 393 static const char * 394 iwm_desc_lookup(uint32_t); 395 static void iwm_nic_error(struct iwm_softc *); 396 static void iwm_nic_umac_error(struct iwm_softc *); 397 #endif 398 static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *); 399 static void iwm_notif_intr(struct iwm_softc *); 400 static void iwm_intr(void *); 401 static int iwm_attach(device_t); 402 static int iwm_is_valid_ether_addr(uint8_t *); 403 static void iwm_preinit(void *); 404 static int iwm_detach_local(struct iwm_softc *sc, int); 405 static void iwm_init_task(void *); 406 static void iwm_radiotap_attach(struct iwm_softc *); 407 static struct ieee80211vap * 408 iwm_vap_create(struct ieee80211com *, 409 const char [IFNAMSIZ], int, 410 enum ieee80211_opmode, int, 411 const uint8_t [IEEE80211_ADDR_LEN], 412 const uint8_t [IEEE80211_ADDR_LEN]); 413 static void iwm_vap_delete(struct ieee80211vap *); 414 static void iwm_xmit_queue_drain(struct iwm_softc *); 415 static void iwm_scan_start(struct ieee80211com *); 416 static void iwm_scan_end(struct ieee80211com *); 417 static void iwm_update_mcast(struct ieee80211com *); 418 static void iwm_set_channel(struct ieee80211com *); 419 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long); 420 static void iwm_scan_mindwell(struct ieee80211_scan_state *); 421 static int iwm_detach(device_t); 422 423 #if defined(__DragonFly__) 424 static int iwm_msi_enable = 1; 425 426 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable); 427 #endif 428 429 static int iwm_lar_disable = 0; 430 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable); 431 432 /* 433 * Firmware parser. 434 */ 435 436 static int 437 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen) 438 { 439 const struct iwm_fw_cscheme_list *l = (const void *)data; 440 441 if (dlen < sizeof(*l) || 442 dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) 443 return EINVAL; 444 445 /* we don't actually store anything for now, always use s/w crypto */ 446 447 return 0; 448 } 449 450 static int 451 iwm_firmware_store_section(struct iwm_softc *sc, 452 enum iwm_ucode_type type, const uint8_t *data, size_t dlen) 453 { 454 struct iwm_fw_img *fws; 455 struct iwm_fw_desc *fwone; 456 457 if (type >= IWM_UCODE_TYPE_MAX) 458 return EINVAL; 459 if (dlen < sizeof(uint32_t)) 460 return EINVAL; 461 462 fws = &sc->sc_fw.img[type]; 463 if (fws->fw_count >= IWM_UCODE_SECTION_MAX) 464 return EINVAL; 465 466 fwone = &fws->sec[fws->fw_count]; 467 468 /* first 32bit are device load offset */ 469 memcpy(&fwone->offset, data, sizeof(uint32_t)); 470 471 /* rest is data */ 472 fwone->data = data + sizeof(uint32_t); 473 fwone->len = dlen - sizeof(uint32_t); 474 475 fws->fw_count++; 476 477 return 0; 478 } 479 480 #define IWM_DEFAULT_SCAN_CHANNELS 40 481 482 struct iwm_tlv_calib_data { 483 uint32_t ucode_type; 484 struct iwm_tlv_calib_ctrl calib; 485 } __packed; 486 487 static int 488 iwm_set_default_calib(struct iwm_softc *sc, const void *data) 489 { 490 const struct iwm_tlv_calib_data *def_calib = data; 491 uint32_t ucode_type = le32toh(def_calib->ucode_type); 492 493 if (ucode_type >= IWM_UCODE_TYPE_MAX) { 494 device_printf(sc->sc_dev, 495 "Wrong ucode_type %u for default " 496 "calibration.\n", ucode_type); 497 return EINVAL; 498 } 499 500 sc->sc_default_calib[ucode_type].flow_trigger = 501 def_calib->calib.flow_trigger; 502 sc->sc_default_calib[ucode_type].event_trigger = 503 def_calib->calib.event_trigger; 504 505 return 0; 506 } 507 508 static int 509 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data, 510 struct iwm_ucode_capabilities *capa) 511 { 512 const struct iwm_ucode_api *ucode_api = (const void *)data; 513 uint32_t api_index = le32toh(ucode_api->api_index); 514 uint32_t api_flags = le32toh(ucode_api->api_flags); 515 int i; 516 517 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) { 518 device_printf(sc->sc_dev, 519 "api flags index %d larger than supported by driver\n", 520 api_index); 521 /* don't return an error so we can load FW that has more bits */ 522 return 0; 523 } 524 525 for (i = 0; i < 32; i++) { 526 if (api_flags & (1U << i)) 527 setbit(capa->enabled_api, i + 32 * api_index); 528 } 529 530 return 0; 531 } 532 533 static int 534 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data, 535 struct iwm_ucode_capabilities *capa) 536 { 537 const struct iwm_ucode_capa *ucode_capa = (const void *)data; 538 uint32_t api_index = le32toh(ucode_capa->api_index); 539 uint32_t api_flags = le32toh(ucode_capa->api_capa); 540 int i; 541 542 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) { 543 device_printf(sc->sc_dev, 544 "capa flags index %d larger than supported by driver\n", 545 api_index); 546 /* don't return an error so we can load FW that has more bits */ 547 return 0; 548 } 549 550 for (i = 0; i < 32; i++) { 551 if (api_flags & (1U << i)) 552 setbit(capa->enabled_capa, i + 32 * api_index); 553 } 554 555 return 0; 556 } 557 558 static void 559 iwm_fw_info_free(struct iwm_fw_info *fw) 560 { 561 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD); 562 fw->fw_fp = NULL; 563 memset(fw->img, 0, sizeof(fw->img)); 564 } 565 566 static int 567 iwm_read_firmware(struct iwm_softc *sc) 568 { 569 struct iwm_fw_info *fw = &sc->sc_fw; 570 const struct iwm_tlv_ucode_header *uhdr; 571 const struct iwm_ucode_tlv *tlv; 572 struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa; 573 enum iwm_ucode_tlv_type tlv_type; 574 const struct firmware *fwp; 575 const uint8_t *data; 576 uint32_t tlv_len; 577 uint32_t usniffer_img; 578 const uint8_t *tlv_data; 579 uint32_t paging_mem_size; 580 int num_of_cpus; 581 int error = 0; 582 size_t len; 583 584 /* 585 * Load firmware into driver memory. 586 * fw_fp will be set. 587 */ 588 fwp = firmware_get(sc->cfg->fw_name); 589 if (fwp == NULL) { 590 device_printf(sc->sc_dev, 591 "could not read firmware %s (error %d)\n", 592 sc->cfg->fw_name, error); 593 goto out; 594 } 595 fw->fw_fp = fwp; 596 597 /* (Re-)Initialize default values. */ 598 capa->flags = 0; 599 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH; 600 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS; 601 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa)); 602 memset(capa->enabled_api, 0, sizeof(capa->enabled_api)); 603 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc)); 604 605 /* 606 * Parse firmware contents 607 */ 608 609 uhdr = (const void *)fw->fw_fp->data; 610 if (*(const uint32_t *)fw->fw_fp->data != 0 611 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) { 612 device_printf(sc->sc_dev, "invalid firmware %s\n", 613 sc->cfg->fw_name); 614 error = EINVAL; 615 goto out; 616 } 617 618 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)", 619 IWM_UCODE_MAJOR(le32toh(uhdr->ver)), 620 IWM_UCODE_MINOR(le32toh(uhdr->ver)), 621 IWM_UCODE_API(le32toh(uhdr->ver))); 622 data = uhdr->data; 623 len = fw->fw_fp->datasize - sizeof(*uhdr); 624 625 while (len >= sizeof(*tlv)) { 626 len -= sizeof(*tlv); 627 tlv = (const void *)data; 628 629 tlv_len = le32toh(tlv->length); 630 tlv_type = le32toh(tlv->type); 631 tlv_data = tlv->data; 632 633 if (len < tlv_len) { 634 device_printf(sc->sc_dev, 635 "firmware too short: %zu bytes\n", 636 len); 637 error = EINVAL; 638 goto parse_out; 639 } 640 len -= roundup2(tlv_len, 4); 641 data += sizeof(tlv) + roundup2(tlv_len, 4); 642 643 switch ((int)tlv_type) { 644 case IWM_UCODE_TLV_PROBE_MAX_LEN: 645 if (tlv_len != sizeof(uint32_t)) { 646 device_printf(sc->sc_dev, 647 "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n", 648 __func__, tlv_len); 649 error = EINVAL; 650 goto parse_out; 651 } 652 capa->max_probe_length = 653 le32_to_cpup((const uint32_t *)tlv_data); 654 /* limit it to something sensible */ 655 if (capa->max_probe_length > 656 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) { 657 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 658 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN " 659 "ridiculous\n", __func__); 660 error = EINVAL; 661 goto parse_out; 662 } 663 break; 664 case IWM_UCODE_TLV_PAN: 665 if (tlv_len) { 666 device_printf(sc->sc_dev, 667 "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n", 668 __func__, tlv_len); 669 error = EINVAL; 670 goto parse_out; 671 } 672 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN; 673 break; 674 case IWM_UCODE_TLV_FLAGS: 675 if (tlv_len < sizeof(uint32_t)) { 676 device_printf(sc->sc_dev, 677 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n", 678 __func__, tlv_len); 679 error = EINVAL; 680 goto parse_out; 681 } 682 if (tlv_len % sizeof(uint32_t)) { 683 device_printf(sc->sc_dev, 684 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n", 685 __func__, tlv_len); 686 error = EINVAL; 687 goto parse_out; 688 } 689 /* 690 * Apparently there can be many flags, but Linux driver 691 * parses only the first one, and so do we. 692 * 693 * XXX: why does this override IWM_UCODE_TLV_PAN? 694 * Intentional or a bug? Observations from 695 * current firmware file: 696 * 1) TLV_PAN is parsed first 697 * 2) TLV_FLAGS contains TLV_FLAGS_PAN 698 * ==> this resets TLV_PAN to itself... hnnnk 699 */ 700 capa->flags = le32_to_cpup((const uint32_t *)tlv_data); 701 break; 702 case IWM_UCODE_TLV_CSCHEME: 703 if ((error = iwm_store_cscheme(sc, 704 tlv_data, tlv_len)) != 0) { 705 device_printf(sc->sc_dev, 706 "%s: iwm_store_cscheme(): returned %d\n", 707 __func__, error); 708 goto parse_out; 709 } 710 break; 711 case IWM_UCODE_TLV_NUM_OF_CPU: 712 if (tlv_len != sizeof(uint32_t)) { 713 device_printf(sc->sc_dev, 714 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n", 715 __func__, tlv_len); 716 error = EINVAL; 717 goto parse_out; 718 } 719 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data); 720 if (num_of_cpus == 2) { 721 fw->img[IWM_UCODE_REGULAR].is_dual_cpus = 722 TRUE; 723 fw->img[IWM_UCODE_INIT].is_dual_cpus = 724 TRUE; 725 fw->img[IWM_UCODE_WOWLAN].is_dual_cpus = 726 TRUE; 727 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { 728 device_printf(sc->sc_dev, 729 "%s: Driver supports only 1 or 2 CPUs\n", 730 __func__); 731 error = EINVAL; 732 goto parse_out; 733 } 734 break; 735 case IWM_UCODE_TLV_SEC_RT: 736 if ((error = iwm_firmware_store_section(sc, 737 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) { 738 device_printf(sc->sc_dev, 739 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n", 740 __func__, error); 741 goto parse_out; 742 } 743 break; 744 case IWM_UCODE_TLV_SEC_INIT: 745 if ((error = iwm_firmware_store_section(sc, 746 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) { 747 device_printf(sc->sc_dev, 748 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n", 749 __func__, error); 750 goto parse_out; 751 } 752 break; 753 case IWM_UCODE_TLV_SEC_WOWLAN: 754 if ((error = iwm_firmware_store_section(sc, 755 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) { 756 device_printf(sc->sc_dev, 757 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n", 758 __func__, error); 759 goto parse_out; 760 } 761 break; 762 case IWM_UCODE_TLV_DEF_CALIB: 763 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) { 764 device_printf(sc->sc_dev, 765 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n", 766 __func__, tlv_len, 767 sizeof(struct iwm_tlv_calib_data)); 768 error = EINVAL; 769 goto parse_out; 770 } 771 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) { 772 device_printf(sc->sc_dev, 773 "%s: iwm_set_default_calib() failed: %d\n", 774 __func__, error); 775 goto parse_out; 776 } 777 break; 778 case IWM_UCODE_TLV_PHY_SKU: 779 if (tlv_len != sizeof(uint32_t)) { 780 error = EINVAL; 781 device_printf(sc->sc_dev, 782 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n", 783 __func__, tlv_len); 784 goto parse_out; 785 } 786 sc->sc_fw.phy_config = 787 le32_to_cpup((const uint32_t *)tlv_data); 788 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config & 789 IWM_FW_PHY_CFG_TX_CHAIN) >> 790 IWM_FW_PHY_CFG_TX_CHAIN_POS; 791 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config & 792 IWM_FW_PHY_CFG_RX_CHAIN) >> 793 IWM_FW_PHY_CFG_RX_CHAIN_POS; 794 break; 795 796 case IWM_UCODE_TLV_API_CHANGES_SET: { 797 if (tlv_len != sizeof(struct iwm_ucode_api)) { 798 error = EINVAL; 799 goto parse_out; 800 } 801 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) { 802 error = EINVAL; 803 goto parse_out; 804 } 805 break; 806 } 807 808 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: { 809 if (tlv_len != sizeof(struct iwm_ucode_capa)) { 810 error = EINVAL; 811 goto parse_out; 812 } 813 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) { 814 error = EINVAL; 815 goto parse_out; 816 } 817 break; 818 } 819 820 case 48: /* undocumented TLV */ 821 case IWM_UCODE_TLV_SDIO_ADMA_ADDR: 822 case IWM_UCODE_TLV_FW_GSCAN_CAPA: 823 /* ignore, not used by current driver */ 824 break; 825 826 case IWM_UCODE_TLV_SEC_RT_USNIFFER: 827 if ((error = iwm_firmware_store_section(sc, 828 IWM_UCODE_REGULAR_USNIFFER, tlv_data, 829 tlv_len)) != 0) 830 goto parse_out; 831 break; 832 833 case IWM_UCODE_TLV_PAGING: 834 if (tlv_len != sizeof(uint32_t)) { 835 error = EINVAL; 836 goto parse_out; 837 } 838 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data); 839 840 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 841 "%s: Paging: paging enabled (size = %u bytes)\n", 842 __func__, paging_mem_size); 843 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) { 844 device_printf(sc->sc_dev, 845 "%s: Paging: driver supports up to %u bytes for paging image\n", 846 __func__, IWM_MAX_PAGING_IMAGE_SIZE); 847 error = EINVAL; 848 goto out; 849 } 850 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) { 851 device_printf(sc->sc_dev, 852 "%s: Paging: image isn't multiple %u\n", 853 __func__, IWM_FW_PAGING_SIZE); 854 error = EINVAL; 855 goto out; 856 } 857 858 sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size = 859 paging_mem_size; 860 usniffer_img = IWM_UCODE_REGULAR_USNIFFER; 861 sc->sc_fw.img[usniffer_img].paging_mem_size = 862 paging_mem_size; 863 break; 864 865 case IWM_UCODE_TLV_N_SCAN_CHANNELS: 866 if (tlv_len != sizeof(uint32_t)) { 867 error = EINVAL; 868 goto parse_out; 869 } 870 capa->n_scan_channels = 871 le32_to_cpup((const uint32_t *)tlv_data); 872 break; 873 874 case IWM_UCODE_TLV_FW_VERSION: 875 if (tlv_len != sizeof(uint32_t) * 3) { 876 error = EINVAL; 877 goto parse_out; 878 } 879 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), 880 "%d.%d.%d", 881 le32toh(((const uint32_t *)tlv_data)[0]), 882 le32toh(((const uint32_t *)tlv_data)[1]), 883 le32toh(((const uint32_t *)tlv_data)[2])); 884 break; 885 886 case IWM_UCODE_TLV_FW_MEM_SEG: 887 break; 888 889 default: 890 device_printf(sc->sc_dev, 891 "%s: unknown firmware section %d, abort\n", 892 __func__, tlv_type); 893 error = EINVAL; 894 goto parse_out; 895 } 896 } 897 898 KASSERT(error == 0, ("unhandled error")); 899 900 parse_out: 901 if (error) { 902 device_printf(sc->sc_dev, "firmware parse error %d, " 903 "section type %d\n", error, tlv_type); 904 } 905 906 out: 907 if (error) { 908 if (fw->fw_fp != NULL) 909 iwm_fw_info_free(fw); 910 } 911 912 return error; 913 } 914 915 /* 916 * DMA resource routines 917 */ 918 919 /* fwmem is used to load firmware onto the card */ 920 static int 921 iwm_alloc_fwmem(struct iwm_softc *sc) 922 { 923 /* Must be aligned on a 16-byte boundary. */ 924 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, 925 IWM_FH_MEM_TB_MAX_LENGTH, 16); 926 } 927 928 /* tx scheduler rings. not used? */ 929 static int 930 iwm_alloc_sched(struct iwm_softc *sc) 931 { 932 /* TX scheduler rings must be aligned on a 1KB boundary. */ 933 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 934 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024); 935 } 936 937 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */ 938 static int 939 iwm_alloc_kw(struct iwm_softc *sc) 940 { 941 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096); 942 } 943 944 /* interrupt cause table */ 945 static int 946 iwm_alloc_ict(struct iwm_softc *sc) 947 { 948 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 949 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT); 950 } 951 952 static int 953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 954 { 955 bus_size_t size; 956 int i, error; 957 958 ring->cur = 0; 959 960 /* Allocate RX descriptors (256-byte aligned). */ 961 size = IWM_RX_RING_COUNT * sizeof(uint32_t); 962 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 963 if (error != 0) { 964 device_printf(sc->sc_dev, 965 "could not allocate RX ring DMA memory\n"); 966 goto fail; 967 } 968 ring->desc = ring->desc_dma.vaddr; 969 970 /* Allocate RX status area (16-byte aligned). */ 971 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 972 sizeof(*ring->stat), 16); 973 if (error != 0) { 974 device_printf(sc->sc_dev, 975 "could not allocate RX status DMA memory\n"); 976 goto fail; 977 } 978 ring->stat = ring->stat_dma.vaddr; 979 980 /* Create RX buffer DMA tag. */ 981 #if defined(__DragonFly__) 982 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE, 983 0, 984 BUS_SPACE_MAXADDR_32BIT, 985 BUS_SPACE_MAXADDR, 986 NULL, NULL, 987 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 988 BUS_DMA_NOWAIT, &ring->data_dmat); 989 #else 990 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 991 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 992 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat); 993 #endif 994 if (error != 0) { 995 device_printf(sc->sc_dev, 996 "%s: could not create RX buf DMA tag, error %d\n", 997 __func__, error); 998 goto fail; 999 } 1000 1001 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */ 1002 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map); 1003 if (error != 0) { 1004 device_printf(sc->sc_dev, 1005 "%s: could not create RX buf DMA map, error %d\n", 1006 __func__, error); 1007 goto fail; 1008 } 1009 /* 1010 * Allocate and map RX buffers. 1011 */ 1012 for (i = 0; i < IWM_RX_RING_COUNT; i++) { 1013 struct iwm_rx_data *data = &ring->data[i]; 1014 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1015 if (error != 0) { 1016 device_printf(sc->sc_dev, 1017 "%s: could not create RX buf DMA map, error %d\n", 1018 __func__, error); 1019 goto fail; 1020 } 1021 data->m = NULL; 1022 1023 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) { 1024 goto fail; 1025 } 1026 } 1027 return 0; 1028 1029 fail: iwm_free_rx_ring(sc, ring); 1030 return error; 1031 } 1032 1033 static void 1034 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1035 { 1036 /* Reset the ring state */ 1037 ring->cur = 0; 1038 1039 /* 1040 * The hw rx ring index in shared memory must also be cleared, 1041 * otherwise the discrepancy can cause reprocessing chaos. 1042 */ 1043 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1044 } 1045 1046 static void 1047 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1048 { 1049 int i; 1050 1051 iwm_dma_contig_free(&ring->desc_dma); 1052 iwm_dma_contig_free(&ring->stat_dma); 1053 1054 for (i = 0; i < IWM_RX_RING_COUNT; i++) { 1055 struct iwm_rx_data *data = &ring->data[i]; 1056 1057 if (data->m != NULL) { 1058 bus_dmamap_sync(ring->data_dmat, data->map, 1059 BUS_DMASYNC_POSTREAD); 1060 bus_dmamap_unload(ring->data_dmat, data->map); 1061 m_freem(data->m); 1062 data->m = NULL; 1063 } 1064 if (data->map != NULL) { 1065 bus_dmamap_destroy(ring->data_dmat, data->map); 1066 data->map = NULL; 1067 } 1068 } 1069 if (ring->spare_map != NULL) { 1070 bus_dmamap_destroy(ring->data_dmat, ring->spare_map); 1071 ring->spare_map = NULL; 1072 } 1073 if (ring->data_dmat != NULL) { 1074 bus_dma_tag_destroy(ring->data_dmat); 1075 ring->data_dmat = NULL; 1076 } 1077 } 1078 1079 static int 1080 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid) 1081 { 1082 bus_addr_t paddr; 1083 bus_size_t size; 1084 size_t maxsize; 1085 int nsegments; 1086 int i, error; 1087 1088 ring->qid = qid; 1089 ring->queued = 0; 1090 ring->cur = 0; 1091 1092 /* Allocate TX descriptors (256-byte aligned). */ 1093 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd); 1094 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 1095 if (error != 0) { 1096 device_printf(sc->sc_dev, 1097 "could not allocate TX ring DMA memory\n"); 1098 goto fail; 1099 } 1100 ring->desc = ring->desc_dma.vaddr; 1101 1102 /* 1103 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need 1104 * to allocate commands space for other rings. 1105 */ 1106 if (qid > IWM_MVM_CMD_QUEUE) 1107 return 0; 1108 1109 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd); 1110 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4); 1111 if (error != 0) { 1112 device_printf(sc->sc_dev, 1113 "could not allocate TX cmd DMA memory\n"); 1114 goto fail; 1115 } 1116 ring->cmd = ring->cmd_dma.vaddr; 1117 1118 /* FW commands may require more mapped space than packets. */ 1119 if (qid == IWM_MVM_CMD_QUEUE) { 1120 maxsize = IWM_RBUF_SIZE; 1121 nsegments = 1; 1122 } else { 1123 maxsize = MCLBYTES; 1124 nsegments = IWM_MAX_SCATTER - 2; 1125 } 1126 1127 #if defined(__DragonFly__) 1128 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE, 1129 0, 1130 BUS_SPACE_MAXADDR_32BIT, 1131 BUS_SPACE_MAXADDR, 1132 NULL, NULL, 1133 maxsize, nsegments, maxsize, 1134 BUS_DMA_NOWAIT, &ring->data_dmat); 1135 #else 1136 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1137 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1138 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat); 1139 #endif 1140 if (error != 0) { 1141 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n"); 1142 goto fail; 1143 } 1144 1145 paddr = ring->cmd_dma.paddr; 1146 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1147 struct iwm_tx_data *data = &ring->data[i]; 1148 1149 data->cmd_paddr = paddr; 1150 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header) 1151 + offsetof(struct iwm_tx_cmd, scratch); 1152 paddr += sizeof(struct iwm_device_cmd); 1153 1154 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1155 if (error != 0) { 1156 device_printf(sc->sc_dev, 1157 "could not create TX buf DMA map\n"); 1158 goto fail; 1159 } 1160 } 1161 KASSERT(paddr == ring->cmd_dma.paddr + size, 1162 ("invalid physical address")); 1163 return 0; 1164 1165 fail: iwm_free_tx_ring(sc, ring); 1166 return error; 1167 } 1168 1169 static void 1170 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1171 { 1172 int i; 1173 1174 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1175 struct iwm_tx_data *data = &ring->data[i]; 1176 1177 if (data->m != NULL) { 1178 bus_dmamap_sync(ring->data_dmat, data->map, 1179 BUS_DMASYNC_POSTWRITE); 1180 bus_dmamap_unload(ring->data_dmat, data->map); 1181 m_freem(data->m); 1182 data->m = NULL; 1183 } 1184 } 1185 /* Clear TX descriptors. */ 1186 memset(ring->desc, 0, ring->desc_dma.size); 1187 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1188 BUS_DMASYNC_PREWRITE); 1189 sc->qfullmsk &= ~(1 << ring->qid); 1190 ring->queued = 0; 1191 ring->cur = 0; 1192 1193 if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake) 1194 iwm_pcie_clear_cmd_in_flight(sc); 1195 } 1196 1197 static void 1198 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1199 { 1200 int i; 1201 1202 iwm_dma_contig_free(&ring->desc_dma); 1203 iwm_dma_contig_free(&ring->cmd_dma); 1204 1205 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1206 struct iwm_tx_data *data = &ring->data[i]; 1207 1208 if (data->m != NULL) { 1209 bus_dmamap_sync(ring->data_dmat, data->map, 1210 BUS_DMASYNC_POSTWRITE); 1211 bus_dmamap_unload(ring->data_dmat, data->map); 1212 m_freem(data->m); 1213 data->m = NULL; 1214 } 1215 if (data->map != NULL) { 1216 bus_dmamap_destroy(ring->data_dmat, data->map); 1217 data->map = NULL; 1218 } 1219 } 1220 if (ring->data_dmat != NULL) { 1221 bus_dma_tag_destroy(ring->data_dmat); 1222 ring->data_dmat = NULL; 1223 } 1224 } 1225 1226 /* 1227 * High-level hardware frobbing routines 1228 */ 1229 1230 static void 1231 iwm_enable_interrupts(struct iwm_softc *sc) 1232 { 1233 sc->sc_intmask = IWM_CSR_INI_SET_MASK; 1234 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1235 } 1236 1237 static void 1238 iwm_restore_interrupts(struct iwm_softc *sc) 1239 { 1240 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1241 } 1242 1243 static void 1244 iwm_disable_interrupts(struct iwm_softc *sc) 1245 { 1246 /* disable interrupts */ 1247 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 1248 1249 /* acknowledge all interrupts */ 1250 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1251 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0); 1252 } 1253 1254 static void 1255 iwm_ict_reset(struct iwm_softc *sc) 1256 { 1257 iwm_disable_interrupts(sc); 1258 1259 /* Reset ICT table. */ 1260 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE); 1261 sc->ict_cur = 0; 1262 1263 /* Set physical address of ICT table (4KB aligned). */ 1264 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG, 1265 IWM_CSR_DRAM_INT_TBL_ENABLE 1266 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER 1267 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK 1268 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT); 1269 1270 /* Switch to ICT interrupt mode in driver. */ 1271 sc->sc_flags |= IWM_FLAG_USE_ICT; 1272 1273 /* Re-enable interrupts. */ 1274 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1275 iwm_enable_interrupts(sc); 1276 } 1277 1278 /* 1279 * Since this .. hard-resets things, it's time to actually 1280 * mark the first vap (if any) as having no mac context. 1281 * It's annoying, but since the driver is potentially being 1282 * stop/start'ed whilst active (thanks openbsd port!) we 1283 * have to correctly track this. 1284 */ 1285 static void 1286 iwm_stop_device(struct iwm_softc *sc) 1287 { 1288 struct ieee80211com *ic = &sc->sc_ic; 1289 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 1290 int chnl, qid; 1291 uint32_t mask = 0; 1292 1293 /* tell the device to stop sending interrupts */ 1294 iwm_disable_interrupts(sc); 1295 1296 /* 1297 * FreeBSD-local: mark the first vap as not-uploaded, 1298 * so the next transition through auth/assoc 1299 * will correctly populate the MAC context. 1300 */ 1301 if (vap) { 1302 struct iwm_vap *iv = IWM_VAP(vap); 1303 iv->phy_ctxt = NULL; 1304 iv->is_uploaded = 0; 1305 } 1306 sc->sc_firmware_state = 0; 1307 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE; 1308 1309 /* device going down, Stop using ICT table */ 1310 sc->sc_flags &= ~IWM_FLAG_USE_ICT; 1311 1312 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */ 1313 1314 if (iwm_nic_lock(sc)) { 1315 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1316 1317 /* Stop each Tx DMA channel */ 1318 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1319 IWM_WRITE(sc, 1320 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0); 1321 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl); 1322 } 1323 1324 /* Wait for DMA channels to be idle */ 1325 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask, 1326 5000)) { 1327 device_printf(sc->sc_dev, 1328 "Failing on timeout while stopping DMA channel: [0x%08x]\n", 1329 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG)); 1330 } 1331 iwm_nic_unlock(sc); 1332 } 1333 iwm_pcie_rx_stop(sc); 1334 1335 /* Stop RX ring. */ 1336 iwm_reset_rx_ring(sc, &sc->rxq); 1337 1338 /* Reset all TX rings. */ 1339 for (qid = 0; qid < nitems(sc->txq); qid++) 1340 iwm_reset_tx_ring(sc, &sc->txq[qid]); 1341 1342 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1343 /* Power-down device's busmaster DMA clocks */ 1344 if (iwm_nic_lock(sc)) { 1345 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, 1346 IWM_APMG_CLK_VAL_DMA_CLK_RQT); 1347 iwm_nic_unlock(sc); 1348 } 1349 DELAY(5); 1350 } 1351 1352 /* Make sure (redundant) we've released our request to stay awake */ 1353 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL, 1354 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1355 1356 /* Stop the device, and put it in low power state */ 1357 iwm_apm_stop(sc); 1358 1359 /* stop and reset the on-board processor */ 1360 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET); 1361 DELAY(1000); 1362 1363 /* 1364 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1365 * This is a bug in certain verions of the hardware. 1366 * Certain devices also keep sending HW RF kill interrupt all 1367 * the time, unless the interrupt is ACKed even if the interrupt 1368 * should be masked. Re-ACK all the interrupts here. 1369 */ 1370 iwm_disable_interrupts(sc); 1371 1372 /* 1373 * Even if we stop the HW, we still want the RF kill 1374 * interrupt 1375 */ 1376 iwm_enable_rfkill_int(sc); 1377 iwm_check_rfkill(sc); 1378 } 1379 1380 static void 1381 iwm_mvm_nic_config(struct iwm_softc *sc) 1382 { 1383 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; 1384 uint32_t reg_val = 0; 1385 uint32_t phy_config = iwm_mvm_get_phy_config(sc); 1386 1387 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >> 1388 IWM_FW_PHY_CFG_RADIO_TYPE_POS; 1389 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >> 1390 IWM_FW_PHY_CFG_RADIO_STEP_POS; 1391 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >> 1392 IWM_FW_PHY_CFG_RADIO_DASH_POS; 1393 1394 /* SKU control */ 1395 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) << 1396 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; 1397 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) << 1398 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; 1399 1400 /* radio configuration */ 1401 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; 1402 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; 1403 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 1404 1405 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val); 1406 1407 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1408 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, 1409 radio_cfg_step, radio_cfg_dash); 1410 1411 /* 1412 * W/A : NIC is stuck in a reset state after Early PCIe power off 1413 * (PCIe power is lost before PERST# is asserted), causing ME FW 1414 * to lose ownership and not being able to obtain it back. 1415 */ 1416 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1417 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG, 1418 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 1419 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 1420 } 1421 } 1422 1423 static int 1424 iwm_nic_rx_init(struct iwm_softc *sc) 1425 { 1426 /* 1427 * Initialize RX ring. This is from the iwn driver. 1428 */ 1429 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1430 1431 /* Stop Rx DMA */ 1432 iwm_pcie_rx_stop(sc); 1433 1434 if (!iwm_nic_lock(sc)) 1435 return EBUSY; 1436 1437 /* reset and flush pointers */ 1438 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 1439 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 1440 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0); 1441 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 1442 1443 /* Set physical address of RX ring (256-byte aligned). */ 1444 IWM_WRITE(sc, 1445 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8); 1446 1447 /* Set physical address of RX status (16-byte aligned). */ 1448 IWM_WRITE(sc, 1449 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4); 1450 1451 #if defined(__DragonFly__) 1452 /* Force serialization (probably not needed but don't trust the HW) */ 1453 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG); 1454 #endif 1455 1456 /* Enable Rx DMA 1457 * XXX 5000 HW isn't supported by the iwm(4) driver. 1458 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 1459 * the credit mechanism in 5000 HW RX FIFO 1460 * Direct rx interrupts to hosts 1461 * Rx buffer size 4 or 8k or 12k 1462 * RB timeout 0x10 1463 * 256 RBDs 1464 */ 1465 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 1466 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 1467 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */ 1468 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 1469 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 1470 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 1471 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS); 1472 1473 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF); 1474 1475 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 1476 if (sc->cfg->host_interrupt_operation_mode) 1477 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE); 1478 1479 /* 1480 * Thus sayeth el jefe (iwlwifi) via a comment: 1481 * 1482 * This value should initially be 0 (before preparing any 1483 * RBs), should be 8 after preparing the first 8 RBs (for example) 1484 */ 1485 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8); 1486 1487 iwm_nic_unlock(sc); 1488 1489 return 0; 1490 } 1491 1492 static int 1493 iwm_nic_tx_init(struct iwm_softc *sc) 1494 { 1495 int qid; 1496 1497 if (!iwm_nic_lock(sc)) 1498 return EBUSY; 1499 1500 /* Deactivate TX scheduler. */ 1501 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1502 1503 /* Set physical address of "keep warm" page (16-byte aligned). */ 1504 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4); 1505 1506 /* Initialize TX rings. */ 1507 for (qid = 0; qid < nitems(sc->txq); qid++) { 1508 struct iwm_tx_ring *txq = &sc->txq[qid]; 1509 1510 /* Set physical address of TX ring (256-byte aligned). */ 1511 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid), 1512 txq->desc_dma.paddr >> 8); 1513 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 1514 "%s: loading ring %d descriptors (%p) at %lx\n", 1515 __func__, 1516 qid, txq->desc, 1517 (unsigned long) (txq->desc_dma.paddr >> 8)); 1518 } 1519 1520 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1521 1522 iwm_nic_unlock(sc); 1523 1524 return 0; 1525 } 1526 1527 static int 1528 iwm_nic_init(struct iwm_softc *sc) 1529 { 1530 int error; 1531 1532 iwm_apm_init(sc); 1533 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1534 iwm_set_pwr(sc); 1535 1536 iwm_mvm_nic_config(sc); 1537 1538 if ((error = iwm_nic_rx_init(sc)) != 0) 1539 return error; 1540 1541 /* 1542 * Ditto for TX, from iwn 1543 */ 1544 if ((error = iwm_nic_tx_init(sc)) != 0) 1545 return error; 1546 1547 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1548 "%s: shadow registers enabled\n", __func__); 1549 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff); 1550 1551 return 0; 1552 } 1553 1554 int 1555 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo) 1556 { 1557 if (!iwm_nic_lock(sc)) { 1558 device_printf(sc->sc_dev, 1559 "%s: cannot enable txq %d\n", 1560 __func__, 1561 qid); 1562 return EBUSY; 1563 } 1564 1565 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0); 1566 1567 if (qid == IWM_MVM_CMD_QUEUE) { 1568 /* unactivate before configuration */ 1569 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1570 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) 1571 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1572 1573 iwm_nic_unlock(sc); 1574 1575 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid)); 1576 1577 if (!iwm_nic_lock(sc)) { 1578 device_printf(sc->sc_dev, 1579 "%s: cannot enable txq %d\n", __func__, qid); 1580 return EBUSY; 1581 } 1582 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0); 1583 iwm_nic_unlock(sc); 1584 1585 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0); 1586 /* Set scheduler window size and frame limit. */ 1587 iwm_write_mem32(sc, 1588 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) + 1589 sizeof(uint32_t), 1590 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1591 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1592 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1593 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1594 1595 if (!iwm_nic_lock(sc)) { 1596 device_printf(sc->sc_dev, 1597 "%s: cannot enable txq %d\n", __func__, qid); 1598 return EBUSY; 1599 } 1600 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1601 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1602 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) | 1603 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) | 1604 IWM_SCD_QUEUE_STTS_REG_MSK); 1605 } else { 1606 struct iwm_scd_txq_cfg_cmd cmd; 1607 int error; 1608 1609 iwm_nic_unlock(sc); 1610 1611 memset(&cmd, 0, sizeof(cmd)); 1612 cmd.scd_queue = qid; 1613 cmd.enable = 1; 1614 cmd.sta_id = sta_id; 1615 cmd.tx_fifo = fifo; 1616 cmd.aggregate = 0; 1617 cmd.window = IWM_FRAME_LIMIT; 1618 1619 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC, 1620 sizeof(cmd), &cmd); 1621 if (error) { 1622 device_printf(sc->sc_dev, 1623 "cannot enable txq %d\n", qid); 1624 return error; 1625 } 1626 1627 if (!iwm_nic_lock(sc)) 1628 return EBUSY; 1629 } 1630 1631 iwm_write_prph(sc, IWM_SCD_EN_CTRL, 1632 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid); 1633 1634 iwm_nic_unlock(sc); 1635 1636 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n", 1637 __func__, qid, fifo); 1638 1639 return 0; 1640 } 1641 1642 static int 1643 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr) 1644 { 1645 int error, chnl; 1646 1647 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND - 1648 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t); 1649 1650 if (!iwm_nic_lock(sc)) 1651 return EBUSY; 1652 1653 iwm_ict_reset(sc); 1654 1655 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR); 1656 if (scd_base_addr != 0 && 1657 scd_base_addr != sc->scd_base_addr) { 1658 device_printf(sc->sc_dev, 1659 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n", 1660 __func__, sc->scd_base_addr, scd_base_addr); 1661 } 1662 1663 iwm_nic_unlock(sc); 1664 1665 /* reset context data, TX status and translation data */ 1666 error = iwm_write_mem(sc, 1667 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, 1668 NULL, clear_dwords); 1669 if (error) 1670 return EBUSY; 1671 1672 if (!iwm_nic_lock(sc)) 1673 return EBUSY; 1674 1675 /* Set physical address of TX scheduler rings (1KB aligned). */ 1676 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10); 1677 1678 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0); 1679 1680 iwm_nic_unlock(sc); 1681 1682 /* enable command channel */ 1683 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7); 1684 if (error) 1685 return error; 1686 1687 if (!iwm_nic_lock(sc)) 1688 return EBUSY; 1689 1690 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff); 1691 1692 /* Enable DMA channels. */ 1693 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1694 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 1695 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 1696 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 1697 } 1698 1699 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG, 1700 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 1701 1702 iwm_nic_unlock(sc); 1703 1704 /* Enable L1-Active */ 1705 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 1706 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG, 1707 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1708 } 1709 1710 return error; 1711 } 1712 1713 /* 1714 * NVM read access and content parsing. We do not support 1715 * external NVM or writing NVM. 1716 * iwlwifi/mvm/nvm.c 1717 */ 1718 1719 /* Default NVM size to read */ 1720 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024) 1721 1722 #define IWM_NVM_WRITE_OPCODE 1 1723 #define IWM_NVM_READ_OPCODE 0 1724 1725 /* load nvm chunk response */ 1726 enum { 1727 IWM_READ_NVM_CHUNK_SUCCEED = 0, 1728 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 1729 }; 1730 1731 static int 1732 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, 1733 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len) 1734 { 1735 struct iwm_nvm_access_cmd nvm_access_cmd = { 1736 .offset = htole16(offset), 1737 .length = htole16(length), 1738 .type = htole16(section), 1739 .op_code = IWM_NVM_READ_OPCODE, 1740 }; 1741 struct iwm_nvm_access_resp *nvm_resp; 1742 struct iwm_rx_packet *pkt; 1743 struct iwm_host_cmd cmd = { 1744 .id = IWM_NVM_ACCESS_CMD, 1745 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL, 1746 .data = { &nvm_access_cmd, }, 1747 }; 1748 int ret, bytes_read, offset_read; 1749 uint8_t *resp_data; 1750 1751 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd); 1752 1753 ret = iwm_send_cmd(sc, &cmd); 1754 if (ret) { 1755 device_printf(sc->sc_dev, 1756 "Could not send NVM_ACCESS command (error=%d)\n", ret); 1757 return ret; 1758 } 1759 1760 pkt = cmd.resp_pkt; 1761 1762 /* Extract NVM response */ 1763 nvm_resp = (void *)pkt->data; 1764 ret = le16toh(nvm_resp->status); 1765 bytes_read = le16toh(nvm_resp->length); 1766 offset_read = le16toh(nvm_resp->offset); 1767 resp_data = nvm_resp->data; 1768 if (ret) { 1769 if ((offset != 0) && 1770 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { 1771 /* 1772 * meaning of NOT_VALID_ADDRESS: 1773 * driver try to read chunk from address that is 1774 * multiple of 2K and got an error since addr is empty. 1775 * meaning of (offset != 0): driver already 1776 * read valid data from another chunk so this case 1777 * is not an error. 1778 */ 1779 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1780 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", 1781 offset); 1782 *len = 0; 1783 ret = 0; 1784 } else { 1785 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1786 "NVM access command failed with status %d\n", ret); 1787 ret = EIO; 1788 } 1789 goto exit; 1790 } 1791 1792 if (offset_read != offset) { 1793 device_printf(sc->sc_dev, 1794 "NVM ACCESS response with invalid offset %d\n", 1795 offset_read); 1796 ret = EINVAL; 1797 goto exit; 1798 } 1799 1800 if (bytes_read > length) { 1801 device_printf(sc->sc_dev, 1802 "NVM ACCESS response with too much data " 1803 "(%d bytes requested, %d bytes received)\n", 1804 length, bytes_read); 1805 ret = EINVAL; 1806 goto exit; 1807 } 1808 1809 /* Write data to NVM */ 1810 memcpy(data + offset, resp_data, bytes_read); 1811 *len = bytes_read; 1812 1813 exit: 1814 iwm_free_resp(sc, &cmd); 1815 return ret; 1816 } 1817 1818 /* 1819 * Reads an NVM section completely. 1820 * NICs prior to 7000 family don't have a real NVM, but just read 1821 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited 1822 * by uCode, we need to manually check in this case that we don't 1823 * overflow and try to read more than the EEPROM size. 1824 * For 7000 family NICs, we supply the maximal size we can read, and 1825 * the uCode fills the response with as much data as we can, 1826 * without overflowing, so no check is needed. 1827 */ 1828 static int 1829 iwm_nvm_read_section(struct iwm_softc *sc, 1830 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read) 1831 { 1832 uint16_t seglen, length, offset = 0; 1833 int ret; 1834 1835 /* Set nvm section read length */ 1836 length = IWM_NVM_DEFAULT_CHUNK_SIZE; 1837 1838 seglen = length; 1839 1840 /* Read the NVM until exhausted (reading less than requested) */ 1841 while (seglen == length) { 1842 /* Check no memory assumptions fail and cause an overflow */ 1843 if ((size_read + offset + length) > 1844 sc->cfg->eeprom_size) { 1845 device_printf(sc->sc_dev, 1846 "EEPROM size is too small for NVM\n"); 1847 return ENOBUFS; 1848 } 1849 1850 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen); 1851 if (ret) { 1852 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1853 "Cannot read NVM from section %d offset %d, length %d\n", 1854 section, offset, length); 1855 return ret; 1856 } 1857 offset += seglen; 1858 } 1859 1860 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1861 "NVM section %d read completed\n", section); 1862 *len = offset; 1863 return 0; 1864 } 1865 1866 /* NVM offsets (in words) definitions */ 1867 enum iwm_nvm_offsets { 1868 /* NVM HW-Section offset (in words) definitions */ 1869 IWM_HW_ADDR = 0x15, 1870 1871 /* NVM SW-Section offset (in words) definitions */ 1872 IWM_NVM_SW_SECTION = 0x1C0, 1873 IWM_NVM_VERSION = 0, 1874 IWM_RADIO_CFG = 1, 1875 IWM_SKU = 2, 1876 IWM_N_HW_ADDRS = 3, 1877 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION, 1878 1879 /* NVM calibration section offset (in words) definitions */ 1880 IWM_NVM_CALIB_SECTION = 0x2B8, 1881 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION 1882 }; 1883 1884 enum iwm_8000_nvm_offsets { 1885 /* NVM HW-Section offset (in words) definitions */ 1886 IWM_HW_ADDR0_WFPM_8000 = 0x12, 1887 IWM_HW_ADDR1_WFPM_8000 = 0x16, 1888 IWM_HW_ADDR0_PCIE_8000 = 0x8A, 1889 IWM_HW_ADDR1_PCIE_8000 = 0x8E, 1890 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1, 1891 1892 /* NVM SW-Section offset (in words) definitions */ 1893 IWM_NVM_SW_SECTION_8000 = 0x1C0, 1894 IWM_NVM_VERSION_8000 = 0, 1895 IWM_RADIO_CFG_8000 = 0, 1896 IWM_SKU_8000 = 2, 1897 IWM_N_HW_ADDRS_8000 = 3, 1898 1899 /* NVM REGULATORY -Section offset (in words) definitions */ 1900 IWM_NVM_CHANNELS_8000 = 0, 1901 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7, 1902 IWM_NVM_LAR_OFFSET_8000 = 0x507, 1903 IWM_NVM_LAR_ENABLED_8000 = 0x7, 1904 1905 /* NVM calibration section offset (in words) definitions */ 1906 IWM_NVM_CALIB_SECTION_8000 = 0x2B8, 1907 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000 1908 }; 1909 1910 /* SKU Capabilities (actual values from NVM definition) */ 1911 enum nvm_sku_bits { 1912 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0), 1913 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1), 1914 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2), 1915 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3), 1916 }; 1917 1918 /* radio config bits (actual values from NVM definition) */ 1919 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ 1920 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ 1921 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ 1922 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ 1923 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ 1924 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ 1925 1926 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF) 1927 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF) 1928 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF) 1929 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF) 1930 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF) 1931 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF) 1932 1933 /** 1934 * enum iwm_nvm_channel_flags - channel flags in NVM 1935 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo 1936 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel 1937 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed 1938 * @IWM_NVM_CHANNEL_RADAR: radar detection required 1939 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c 1940 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate 1941 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?) 1942 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) 1943 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?) 1944 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?) 1945 */ 1946 enum iwm_nvm_channel_flags { 1947 IWM_NVM_CHANNEL_VALID = (1 << 0), 1948 IWM_NVM_CHANNEL_IBSS = (1 << 1), 1949 IWM_NVM_CHANNEL_ACTIVE = (1 << 3), 1950 IWM_NVM_CHANNEL_RADAR = (1 << 4), 1951 IWM_NVM_CHANNEL_DFS = (1 << 7), 1952 IWM_NVM_CHANNEL_WIDE = (1 << 8), 1953 IWM_NVM_CHANNEL_40MHZ = (1 << 9), 1954 IWM_NVM_CHANNEL_80MHZ = (1 << 10), 1955 IWM_NVM_CHANNEL_160MHZ = (1 << 11), 1956 }; 1957 1958 /* 1959 * Translate EEPROM flags to net80211. 1960 */ 1961 static uint32_t 1962 iwm_eeprom_channel_flags(uint16_t ch_flags) 1963 { 1964 uint32_t nflags; 1965 1966 nflags = 0; 1967 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0) 1968 nflags |= IEEE80211_CHAN_PASSIVE; 1969 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0) 1970 nflags |= IEEE80211_CHAN_NOADHOC; 1971 if (ch_flags & IWM_NVM_CHANNEL_RADAR) { 1972 nflags |= IEEE80211_CHAN_DFS; 1973 /* Just in case. */ 1974 nflags |= IEEE80211_CHAN_NOADHOC; 1975 } 1976 1977 return (nflags); 1978 } 1979 1980 static void 1981 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[], 1982 int maxchans, int *nchans, int ch_idx, size_t ch_num, 1983 const uint8_t bands[]) 1984 { 1985 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags; 1986 uint32_t nflags; 1987 uint16_t ch_flags; 1988 uint8_t ieee; 1989 int error; 1990 1991 for (; ch_idx < ch_num; ch_idx++) { 1992 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx); 1993 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1994 ieee = iwm_nvm_channels[ch_idx]; 1995 else 1996 ieee = iwm_nvm_channels_8000[ch_idx]; 1997 1998 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) { 1999 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 2000 "Ch. %d Flags %x [%sGHz] - No traffic\n", 2001 ieee, ch_flags, 2002 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 2003 "5.2" : "2.4"); 2004 continue; 2005 } 2006 2007 nflags = iwm_eeprom_channel_flags(ch_flags); 2008 error = ieee80211_add_channel(chans, maxchans, nchans, 2009 ieee, 0, 0, nflags, bands); 2010 if (error != 0) 2011 break; 2012 2013 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 2014 "Ch. %d Flags %x [%sGHz] - Added\n", 2015 ieee, ch_flags, 2016 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 2017 "5.2" : "2.4"); 2018 } 2019 } 2020 2021 static void 2022 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans, 2023 struct ieee80211_channel chans[]) 2024 { 2025 struct iwm_softc *sc = ic->ic_softc; 2026 struct iwm_nvm_data *data = sc->nvm_data; 2027 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)]; 2028 size_t ch_num; 2029 2030 memset(bands, 0, sizeof(bands)); 2031 /* 1-13: 11b/g channels. */ 2032 setbit(bands, IEEE80211_MODE_11B); 2033 setbit(bands, IEEE80211_MODE_11G); 2034 iwm_add_channel_band(sc, chans, maxchans, nchans, 0, 2035 IWM_NUM_2GHZ_CHANNELS - 1, bands); 2036 2037 /* 14: 11b channel only. */ 2038 clrbit(bands, IEEE80211_MODE_11G); 2039 iwm_add_channel_band(sc, chans, maxchans, nchans, 2040 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands); 2041 2042 if (data->sku_cap_band_52GHz_enable) { 2043 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 2044 ch_num = nitems(iwm_nvm_channels); 2045 else 2046 ch_num = nitems(iwm_nvm_channels_8000); 2047 memset(bands, 0, sizeof(bands)); 2048 setbit(bands, IEEE80211_MODE_11A); 2049 iwm_add_channel_band(sc, chans, maxchans, nchans, 2050 IWM_NUM_2GHZ_CHANNELS, ch_num, bands); 2051 } 2052 } 2053 2054 static void 2055 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data, 2056 const uint16_t *mac_override, const uint16_t *nvm_hw) 2057 { 2058 const uint8_t *hw_addr; 2059 2060 if (mac_override) { 2061 static const uint8_t reserved_mac[] = { 2062 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 2063 }; 2064 2065 hw_addr = (const uint8_t *)(mac_override + 2066 IWM_MAC_ADDRESS_OVERRIDE_8000); 2067 2068 /* 2069 * Store the MAC address from MAO section. 2070 * No byte swapping is required in MAO section 2071 */ 2072 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr); 2073 2074 /* 2075 * Force the use of the OTP MAC address in case of reserved MAC 2076 * address in the NVM, or if address is given but invalid. 2077 */ 2078 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) && 2079 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) && 2080 iwm_is_valid_ether_addr(data->hw_addr) && 2081 !IEEE80211_IS_MULTICAST(data->hw_addr)) 2082 return; 2083 2084 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2085 "%s: mac address from nvm override section invalid\n", 2086 __func__); 2087 } 2088 2089 if (nvm_hw) { 2090 /* read the mac address from WFMP registers */ 2091 uint32_t mac_addr0 = 2092 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0)); 2093 uint32_t mac_addr1 = 2094 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1)); 2095 2096 hw_addr = (const uint8_t *)&mac_addr0; 2097 data->hw_addr[0] = hw_addr[3]; 2098 data->hw_addr[1] = hw_addr[2]; 2099 data->hw_addr[2] = hw_addr[1]; 2100 data->hw_addr[3] = hw_addr[0]; 2101 2102 hw_addr = (const uint8_t *)&mac_addr1; 2103 data->hw_addr[4] = hw_addr[1]; 2104 data->hw_addr[5] = hw_addr[0]; 2105 2106 return; 2107 } 2108 2109 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__); 2110 memset(data->hw_addr, 0, sizeof(data->hw_addr)); 2111 } 2112 2113 static int 2114 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2115 const uint16_t *phy_sku) 2116 { 2117 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2118 return le16_to_cpup(nvm_sw + IWM_SKU); 2119 2120 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000)); 2121 } 2122 2123 static int 2124 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2125 { 2126 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2127 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION); 2128 else 2129 return le32_to_cpup((const uint32_t *)(nvm_sw + 2130 IWM_NVM_VERSION_8000)); 2131 } 2132 2133 static int 2134 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2135 const uint16_t *phy_sku) 2136 { 2137 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2138 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG); 2139 2140 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000)); 2141 } 2142 2143 static int 2144 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2145 { 2146 int n_hw_addr; 2147 2148 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2149 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS); 2150 2151 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000)); 2152 2153 return n_hw_addr & IWM_N_HW_ADDR_MASK; 2154 } 2155 2156 static void 2157 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data, 2158 uint32_t radio_cfg) 2159 { 2160 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 2161 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg); 2162 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg); 2163 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg); 2164 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg); 2165 return; 2166 } 2167 2168 /* set the radio configuration for family 8000 */ 2169 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg); 2170 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg); 2171 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg); 2172 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg); 2173 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg); 2174 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg); 2175 } 2176 2177 static int 2178 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data, 2179 const uint16_t *nvm_hw, const uint16_t *mac_override) 2180 { 2181 #ifdef notyet /* for FAMILY 9000 */ 2182 if (cfg->mac_addr_from_csr) { 2183 iwm_set_hw_address_from_csr(sc, data); 2184 } else 2185 #endif 2186 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 2187 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR); 2188 2189 /* The byte order is little endian 16 bit, meaning 214365 */ 2190 data->hw_addr[0] = hw_addr[1]; 2191 data->hw_addr[1] = hw_addr[0]; 2192 data->hw_addr[2] = hw_addr[3]; 2193 data->hw_addr[3] = hw_addr[2]; 2194 data->hw_addr[4] = hw_addr[5]; 2195 data->hw_addr[5] = hw_addr[4]; 2196 } else { 2197 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw); 2198 } 2199 2200 if (!iwm_is_valid_ether_addr(data->hw_addr)) { 2201 device_printf(sc->sc_dev, "no valid mac address was found\n"); 2202 return EINVAL; 2203 } 2204 2205 return 0; 2206 } 2207 2208 static struct iwm_nvm_data * 2209 iwm_parse_nvm_data(struct iwm_softc *sc, 2210 const uint16_t *nvm_hw, const uint16_t *nvm_sw, 2211 const uint16_t *nvm_calib, const uint16_t *mac_override, 2212 const uint16_t *phy_sku, const uint16_t *regulatory) 2213 { 2214 struct iwm_nvm_data *data; 2215 uint32_t sku, radio_cfg; 2216 uint16_t lar_config; 2217 2218 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 2219 data = kmalloc(sizeof(*data) + 2220 IWM_NUM_CHANNELS * sizeof(uint16_t), 2221 M_DEVBUF, M_WAITOK | M_ZERO); 2222 } else { 2223 data = kmalloc(sizeof(*data) + 2224 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t), 2225 M_DEVBUF, M_WAITOK | M_ZERO); 2226 } 2227 if (!data) 2228 return NULL; 2229 2230 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw); 2231 2232 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku); 2233 iwm_set_radio_cfg(sc, data, radio_cfg); 2234 2235 sku = iwm_get_sku(sc, nvm_sw, phy_sku); 2236 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ; 2237 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ; 2238 data->sku_cap_11n_enable = 0; 2239 2240 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw); 2241 2242 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 2243 uint16_t lar_offset = data->nvm_version < 0xE39 ? 2244 IWM_NVM_LAR_OFFSET_8000_OLD : 2245 IWM_NVM_LAR_OFFSET_8000; 2246 2247 lar_config = le16_to_cpup(regulatory + lar_offset); 2248 data->lar_enabled = !!(lar_config & 2249 IWM_NVM_LAR_ENABLED_8000); 2250 } 2251 2252 /* If no valid mac address was found - bail out */ 2253 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) { 2254 kfree(data, M_DEVBUF); 2255 return NULL; 2256 } 2257 2258 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2259 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS], 2260 IWM_NUM_CHANNELS * sizeof(uint16_t)); 2261 } else { 2262 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000], 2263 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t)); 2264 } 2265 2266 return data; 2267 } 2268 2269 static void 2270 iwm_free_nvm_data(struct iwm_nvm_data *data) 2271 { 2272 if (data != NULL) 2273 kfree(data, M_DEVBUF); 2274 } 2275 2276 static struct iwm_nvm_data * 2277 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections) 2278 { 2279 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; 2280 2281 /* Checking for required sections */ 2282 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2283 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2284 !sections[sc->cfg->nvm_hw_section_num].data) { 2285 device_printf(sc->sc_dev, 2286 "Can't parse empty OTP/NVM sections\n"); 2287 return NULL; 2288 } 2289 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 2290 /* SW and REGULATORY sections are mandatory */ 2291 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2292 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) { 2293 device_printf(sc->sc_dev, 2294 "Can't parse empty OTP/NVM sections\n"); 2295 return NULL; 2296 } 2297 /* MAC_OVERRIDE or at least HW section must exist */ 2298 if (!sections[sc->cfg->nvm_hw_section_num].data && 2299 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) { 2300 device_printf(sc->sc_dev, 2301 "Can't parse mac_address, empty sections\n"); 2302 return NULL; 2303 } 2304 2305 /* PHY_SKU section is mandatory in B0 */ 2306 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) { 2307 device_printf(sc->sc_dev, 2308 "Can't parse phy_sku in B0, empty sections\n"); 2309 return NULL; 2310 } 2311 } else { 2312 panic("unknown device family %d\n", sc->cfg->device_family); 2313 } 2314 2315 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data; 2316 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data; 2317 calib = (const uint16_t *) 2318 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data; 2319 regulatory = (const uint16_t *) 2320 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data; 2321 mac_override = (const uint16_t *) 2322 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data; 2323 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data; 2324 2325 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override, 2326 phy_sku, regulatory); 2327 } 2328 2329 static int 2330 iwm_nvm_init(struct iwm_softc *sc) 2331 { 2332 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS]; 2333 int i, ret, section; 2334 uint32_t size_read = 0; 2335 uint8_t *nvm_buffer, *temp; 2336 uint16_t len; 2337 2338 memset(nvm_sections, 0, sizeof(nvm_sections)); 2339 2340 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS) 2341 return EINVAL; 2342 2343 /* load NVM values from nic */ 2344 /* Read From FW NVM */ 2345 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n"); 2346 2347 nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF, 2348 M_INTWAIT | M_ZERO); 2349 if (!nvm_buffer) 2350 return ENOMEM; 2351 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) { 2352 /* we override the constness for initial read */ 2353 ret = iwm_nvm_read_section(sc, section, nvm_buffer, 2354 &len, size_read); 2355 if (ret) 2356 continue; 2357 size_read += len; 2358 temp = kmalloc(len, M_DEVBUF, M_INTWAIT); 2359 if (!temp) { 2360 ret = ENOMEM; 2361 break; 2362 } 2363 memcpy(temp, nvm_buffer, len); 2364 2365 nvm_sections[section].data = temp; 2366 nvm_sections[section].length = len; 2367 } 2368 if (!size_read) 2369 device_printf(sc->sc_dev, "OTP is blank\n"); 2370 kfree(nvm_buffer, M_DEVBUF); 2371 2372 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections); 2373 if (!sc->nvm_data) 2374 return EINVAL; 2375 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 2376 "nvm version = %x\n", sc->nvm_data->nvm_version); 2377 2378 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) { 2379 if (nvm_sections[i].data != NULL) 2380 kfree(nvm_sections[i].data, M_DEVBUF); 2381 } 2382 2383 return 0; 2384 } 2385 2386 static int 2387 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num, 2388 const struct iwm_fw_desc *section) 2389 { 2390 struct iwm_dma_info *dma = &sc->fw_dma; 2391 uint8_t *v_addr; 2392 bus_addr_t p_addr; 2393 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len); 2394 int ret = 0; 2395 2396 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2397 "%s: [%d] uCode section being loaded...\n", 2398 __func__, section_num); 2399 2400 v_addr = dma->vaddr; 2401 p_addr = dma->paddr; 2402 2403 for (offset = 0; offset < section->len; offset += chunk_sz) { 2404 uint32_t copy_size, dst_addr; 2405 int extended_addr = FALSE; 2406 2407 copy_size = MIN(chunk_sz, section->len - offset); 2408 dst_addr = section->offset + offset; 2409 2410 if (dst_addr >= IWM_FW_MEM_EXTENDED_START && 2411 dst_addr <= IWM_FW_MEM_EXTENDED_END) 2412 extended_addr = TRUE; 2413 2414 if (extended_addr) 2415 iwm_set_bits_prph(sc, IWM_LMPM_CHICK, 2416 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2417 2418 memcpy(v_addr, (const uint8_t *)section->data + offset, 2419 copy_size); 2420 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 2421 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr, 2422 copy_size); 2423 2424 if (extended_addr) 2425 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK, 2426 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2427 2428 if (ret) { 2429 device_printf(sc->sc_dev, 2430 "%s: Could not load the [%d] uCode section\n", 2431 __func__, section_num); 2432 break; 2433 } 2434 } 2435 2436 return ret; 2437 } 2438 2439 /* 2440 * ucode 2441 */ 2442 static int 2443 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr, 2444 bus_addr_t phy_addr, uint32_t byte_cnt) 2445 { 2446 int ret; 2447 2448 sc->sc_fw_chunk_done = 0; 2449 2450 if (!iwm_nic_lock(sc)) 2451 return EBUSY; 2452 2453 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2454 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 2455 2456 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL), 2457 dst_addr); 2458 2459 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL), 2460 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 2461 2462 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL), 2463 (iwm_get_dma_hi_addr(phy_addr) 2464 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 2465 2466 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL), 2467 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 2468 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 2469 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 2470 2471 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2472 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 2473 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 2474 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 2475 2476 iwm_nic_unlock(sc); 2477 2478 /* wait up to 5s for this segment to load */ 2479 ret = 0; 2480 while (!sc->sc_fw_chunk_done) { 2481 #if defined(__DragonFly__) 2482 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz); 2483 #else 2484 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz); 2485 #endif 2486 if (ret) 2487 break; 2488 } 2489 2490 if (ret != 0) { 2491 device_printf(sc->sc_dev, 2492 "fw chunk addr 0x%x len %d failed to load\n", 2493 dst_addr, byte_cnt); 2494 return ETIMEDOUT; 2495 } 2496 2497 return 0; 2498 } 2499 2500 static int 2501 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 2502 const struct iwm_fw_img *image, int cpu, int *first_ucode_section) 2503 { 2504 int shift_param; 2505 int i, ret = 0, sec_num = 0x1; 2506 uint32_t val, last_read_idx = 0; 2507 2508 if (cpu == 1) { 2509 shift_param = 0; 2510 *first_ucode_section = 0; 2511 } else { 2512 shift_param = 16; 2513 (*first_ucode_section)++; 2514 } 2515 2516 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2517 last_read_idx = i; 2518 2519 /* 2520 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2521 * CPU1 to CPU2. 2522 * PAGING_SEPARATOR_SECTION delimiter - separate between 2523 * CPU2 non paged to CPU2 paging sec. 2524 */ 2525 if (!image->sec[i].data || 2526 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2527 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2528 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2529 "Break since Data not valid or Empty section, sec = %d\n", 2530 i); 2531 break; 2532 } 2533 ret = iwm_pcie_load_section(sc, i, &image->sec[i]); 2534 if (ret) 2535 return ret; 2536 2537 /* Notify the ucode of the loaded section number and status */ 2538 if (iwm_nic_lock(sc)) { 2539 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS); 2540 val = val | (sec_num << shift_param); 2541 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val); 2542 sec_num = (sec_num << 1) | 0x1; 2543 iwm_nic_unlock(sc); 2544 } 2545 } 2546 2547 *first_ucode_section = last_read_idx; 2548 2549 iwm_enable_interrupts(sc); 2550 2551 if (iwm_nic_lock(sc)) { 2552 if (cpu == 1) 2553 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF); 2554 else 2555 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); 2556 iwm_nic_unlock(sc); 2557 } 2558 2559 return 0; 2560 } 2561 2562 static int 2563 iwm_pcie_load_cpu_sections(struct iwm_softc *sc, 2564 const struct iwm_fw_img *image, int cpu, int *first_ucode_section) 2565 { 2566 int shift_param; 2567 int i, ret = 0; 2568 uint32_t last_read_idx = 0; 2569 2570 if (cpu == 1) { 2571 shift_param = 0; 2572 *first_ucode_section = 0; 2573 } else { 2574 shift_param = 16; 2575 (*first_ucode_section)++; 2576 } 2577 2578 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2579 last_read_idx = i; 2580 2581 /* 2582 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2583 * CPU1 to CPU2. 2584 * PAGING_SEPARATOR_SECTION delimiter - separate between 2585 * CPU2 non paged to CPU2 paging sec. 2586 */ 2587 if (!image->sec[i].data || 2588 image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2589 image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2590 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2591 "Break since Data not valid or Empty section, sec = %d\n", 2592 i); 2593 break; 2594 } 2595 2596 ret = iwm_pcie_load_section(sc, i, &image->sec[i]); 2597 if (ret) 2598 return ret; 2599 } 2600 2601 *first_ucode_section = last_read_idx; 2602 2603 return 0; 2604 2605 } 2606 2607 static int 2608 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image) 2609 { 2610 int ret = 0; 2611 int first_ucode_section; 2612 2613 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2614 image->is_dual_cpus ? "Dual" : "Single"); 2615 2616 /* load to FW the binary non secured sections of CPU1 */ 2617 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section); 2618 if (ret) 2619 return ret; 2620 2621 if (image->is_dual_cpus) { 2622 /* set CPU2 header address */ 2623 if (iwm_nic_lock(sc)) { 2624 iwm_write_prph(sc, 2625 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 2626 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE); 2627 iwm_nic_unlock(sc); 2628 } 2629 2630 /* load to FW the binary sections of CPU2 */ 2631 ret = iwm_pcie_load_cpu_sections(sc, image, 2, 2632 &first_ucode_section); 2633 if (ret) 2634 return ret; 2635 } 2636 2637 iwm_enable_interrupts(sc); 2638 2639 /* release CPU reset */ 2640 IWM_WRITE(sc, IWM_CSR_RESET, 0); 2641 2642 return 0; 2643 } 2644 2645 int 2646 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc, 2647 const struct iwm_fw_img *image) 2648 { 2649 int ret = 0; 2650 int first_ucode_section; 2651 2652 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2653 image->is_dual_cpus ? "Dual" : "Single"); 2654 2655 /* configure the ucode to be ready to get the secured image */ 2656 /* release CPU reset */ 2657 if (iwm_nic_lock(sc)) { 2658 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, 2659 IWM_RELEASE_CPU_RESET_BIT); 2660 iwm_nic_unlock(sc); 2661 } 2662 2663 /* load to FW the binary Secured sections of CPU1 */ 2664 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1, 2665 &first_ucode_section); 2666 if (ret) 2667 return ret; 2668 2669 /* load to FW the binary sections of CPU2 */ 2670 return iwm_pcie_load_cpu_sections_8000(sc, image, 2, 2671 &first_ucode_section); 2672 } 2673 2674 /* XXX Get rid of this definition */ 2675 static inline void 2676 iwm_enable_fw_load_int(struct iwm_softc *sc) 2677 { 2678 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n"); 2679 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX; 2680 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 2681 } 2682 2683 /* XXX Add proper rfkill support code */ 2684 static int 2685 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw) 2686 { 2687 int ret; 2688 2689 /* This may fail if AMT took ownership of the device */ 2690 if (iwm_prepare_card_hw(sc)) { 2691 device_printf(sc->sc_dev, 2692 "%s: Exit HW not ready\n", __func__); 2693 ret = EIO; 2694 goto out; 2695 } 2696 2697 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2698 2699 iwm_disable_interrupts(sc); 2700 2701 /* make sure rfkill handshake bits are cleared */ 2702 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2703 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, 2704 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2705 2706 /* clear (again), then enable host interrupts */ 2707 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2708 2709 ret = iwm_nic_init(sc); 2710 if (ret) { 2711 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__); 2712 goto out; 2713 } 2714 2715 /* 2716 * Now, we load the firmware and don't want to be interrupted, even 2717 * by the RF-Kill interrupt (hence mask all the interrupt besides the 2718 * FH_TX interrupt which is needed to load the firmware). If the 2719 * RF-Kill switch is toggled, we will find out after having loaded 2720 * the firmware and return the proper value to the caller. 2721 */ 2722 iwm_enable_fw_load_int(sc); 2723 2724 /* really make sure rfkill handshake bits are cleared */ 2725 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2726 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2727 2728 /* Load the given image to the HW */ 2729 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) 2730 ret = iwm_pcie_load_given_ucode_8000(sc, fw); 2731 else 2732 ret = iwm_pcie_load_given_ucode(sc, fw); 2733 2734 /* XXX re-check RF-Kill state */ 2735 2736 out: 2737 return ret; 2738 } 2739 2740 static int 2741 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant) 2742 { 2743 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = { 2744 .valid = htole32(valid_tx_ant), 2745 }; 2746 2747 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 2748 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd); 2749 } 2750 2751 static int 2752 iwm_send_phy_cfg_cmd(struct iwm_softc *sc) 2753 { 2754 struct iwm_phy_cfg_cmd phy_cfg_cmd; 2755 enum iwm_ucode_type ucode_type = sc->cur_ucode; 2756 2757 /* Set parameters */ 2758 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc)); 2759 phy_cfg_cmd.calib_control.event_trigger = 2760 sc->sc_default_calib[ucode_type].event_trigger; 2761 phy_cfg_cmd.calib_control.flow_trigger = 2762 sc->sc_default_calib[ucode_type].flow_trigger; 2763 2764 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET, 2765 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg); 2766 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC, 2767 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 2768 } 2769 2770 static int 2771 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data) 2772 { 2773 struct iwm_mvm_alive_data *alive_data = data; 2774 struct iwm_mvm_alive_resp_ver1 *palive1; 2775 struct iwm_mvm_alive_resp_ver2 *palive2; 2776 struct iwm_mvm_alive_resp *palive; 2777 2778 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) { 2779 palive1 = (void *)pkt->data; 2780 2781 sc->support_umac_log = FALSE; 2782 sc->error_event_table = 2783 le32toh(palive1->error_event_table_ptr); 2784 sc->log_event_table = 2785 le32toh(palive1->log_event_table_ptr); 2786 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr); 2787 2788 alive_data->valid = le16toh(palive1->status) == 2789 IWM_ALIVE_STATUS_OK; 2790 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2791 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 2792 le16toh(palive1->status), palive1->ver_type, 2793 palive1->ver_subtype, palive1->flags); 2794 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) { 2795 palive2 = (void *)pkt->data; 2796 sc->error_event_table = 2797 le32toh(palive2->error_event_table_ptr); 2798 sc->log_event_table = 2799 le32toh(palive2->log_event_table_ptr); 2800 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr); 2801 sc->umac_error_event_table = 2802 le32toh(palive2->error_info_addr); 2803 2804 alive_data->valid = le16toh(palive2->status) == 2805 IWM_ALIVE_STATUS_OK; 2806 if (sc->umac_error_event_table) 2807 sc->support_umac_log = TRUE; 2808 2809 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2810 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 2811 le16toh(palive2->status), palive2->ver_type, 2812 palive2->ver_subtype, palive2->flags); 2813 2814 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2815 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 2816 palive2->umac_major, palive2->umac_minor); 2817 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) { 2818 palive = (void *)pkt->data; 2819 2820 sc->error_event_table = 2821 le32toh(palive->error_event_table_ptr); 2822 sc->log_event_table = 2823 le32toh(palive->log_event_table_ptr); 2824 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr); 2825 sc->umac_error_event_table = 2826 le32toh(palive->error_info_addr); 2827 2828 alive_data->valid = le16toh(palive->status) == 2829 IWM_ALIVE_STATUS_OK; 2830 if (sc->umac_error_event_table) 2831 sc->support_umac_log = TRUE; 2832 2833 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2834 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 2835 le16toh(palive->status), palive->ver_type, 2836 palive->ver_subtype, palive->flags); 2837 2838 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2839 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 2840 le32toh(palive->umac_major), 2841 le32toh(palive->umac_minor)); 2842 } 2843 2844 return TRUE; 2845 } 2846 2847 static int 2848 iwm_wait_phy_db_entry(struct iwm_softc *sc, 2849 struct iwm_rx_packet *pkt, void *data) 2850 { 2851 struct iwm_phy_db *phy_db = data; 2852 2853 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) { 2854 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) { 2855 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n", 2856 __func__, pkt->hdr.code); 2857 } 2858 return TRUE; 2859 } 2860 2861 if (iwm_phy_db_set_section(phy_db, pkt)) { 2862 device_printf(sc->sc_dev, 2863 "%s: iwm_phy_db_set_section failed\n", __func__); 2864 } 2865 2866 return FALSE; 2867 } 2868 2869 static int 2870 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc, 2871 enum iwm_ucode_type ucode_type) 2872 { 2873 struct iwm_notification_wait alive_wait; 2874 struct iwm_mvm_alive_data alive_data; 2875 const struct iwm_fw_img *fw; 2876 enum iwm_ucode_type old_type = sc->cur_ucode; 2877 int error; 2878 static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE }; 2879 2880 fw = &sc->sc_fw.img[ucode_type]; 2881 sc->cur_ucode = ucode_type; 2882 sc->ucode_loaded = FALSE; 2883 2884 memset(&alive_data, 0, sizeof(alive_data)); 2885 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait, 2886 alive_cmd, NELEM(alive_cmd), 2887 iwm_alive_fn, &alive_data); 2888 2889 error = iwm_start_fw(sc, fw); 2890 if (error) { 2891 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error); 2892 sc->cur_ucode = old_type; 2893 iwm_remove_notification(sc->sc_notif_wait, &alive_wait); 2894 return error; 2895 } 2896 2897 /* 2898 * Some things may run in the background now, but we 2899 * just wait for the ALIVE notification here. 2900 */ 2901 IWM_UNLOCK(sc); 2902 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait, 2903 IWM_MVM_UCODE_ALIVE_TIMEOUT); 2904 IWM_LOCK(sc); 2905 if (error) { 2906 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 2907 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a; 2908 if (iwm_nic_lock(sc)) { 2909 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS); 2910 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS); 2911 iwm_nic_unlock(sc); 2912 } 2913 device_printf(sc->sc_dev, 2914 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 2915 a, b); 2916 } 2917 sc->cur_ucode = old_type; 2918 return error; 2919 } 2920 2921 if (!alive_data.valid) { 2922 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n", 2923 __func__); 2924 sc->cur_ucode = old_type; 2925 return EIO; 2926 } 2927 2928 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr); 2929 2930 /* 2931 * configure and operate fw paging mechanism. 2932 * driver configures the paging flow only once, CPU2 paging image 2933 * included in the IWM_UCODE_INIT image. 2934 */ 2935 if (fw->paging_mem_size) { 2936 error = iwm_save_fw_paging(sc, fw); 2937 if (error) { 2938 device_printf(sc->sc_dev, 2939 "%s: failed to save the FW paging image\n", 2940 __func__); 2941 return error; 2942 } 2943 2944 error = iwm_send_paging_cmd(sc, fw); 2945 if (error) { 2946 device_printf(sc->sc_dev, 2947 "%s: failed to send the paging cmd\n", __func__); 2948 iwm_free_fw_paging(sc); 2949 return error; 2950 } 2951 } 2952 2953 if (!error) 2954 sc->ucode_loaded = TRUE; 2955 return error; 2956 } 2957 2958 /* 2959 * mvm misc bits 2960 */ 2961 2962 static int 2963 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm) 2964 { 2965 struct iwm_notification_wait calib_wait; 2966 static const uint16_t init_complete[] = { 2967 IWM_INIT_COMPLETE_NOTIF, 2968 IWM_CALIB_RES_NOTIF_PHY_DB 2969 }; 2970 int ret; 2971 2972 /* do not operate with rfkill switch turned on */ 2973 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) { 2974 device_printf(sc->sc_dev, 2975 "radio is disabled by hardware switch\n"); 2976 return EPERM; 2977 } 2978 2979 iwm_init_notification_wait(sc->sc_notif_wait, 2980 &calib_wait, 2981 init_complete, 2982 NELEM(init_complete), 2983 iwm_wait_phy_db_entry, 2984 sc->sc_phy_db); 2985 2986 /* Will also start the device */ 2987 ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT); 2988 if (ret) { 2989 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n", 2990 ret); 2991 goto error; 2992 } 2993 2994 if (justnvm) { 2995 /* Read nvm */ 2996 ret = iwm_nvm_init(sc); 2997 if (ret) { 2998 device_printf(sc->sc_dev, "failed to read nvm\n"); 2999 goto error; 3000 } 3001 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr); 3002 goto error; 3003 } 3004 3005 ret = iwm_send_bt_init_conf(sc); 3006 if (ret) { 3007 device_printf(sc->sc_dev, 3008 "failed to send bt coex configuration: %d\n", ret); 3009 goto error; 3010 } 3011 3012 /* Send TX valid antennas before triggering calibrations */ 3013 ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc)); 3014 if (ret) { 3015 device_printf(sc->sc_dev, 3016 "failed to send antennas before calibration: %d\n", ret); 3017 goto error; 3018 } 3019 3020 /* 3021 * Send phy configurations command to init uCode 3022 * to start the 16.0 uCode init image internal calibrations. 3023 */ 3024 ret = iwm_send_phy_cfg_cmd(sc); 3025 if (ret) { 3026 device_printf(sc->sc_dev, 3027 "%s: Failed to run INIT calibrations: %d\n", 3028 __func__, ret); 3029 goto error; 3030 } 3031 3032 /* 3033 * Nothing to do but wait for the init complete notification 3034 * from the firmware. 3035 */ 3036 IWM_UNLOCK(sc); 3037 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait, 3038 IWM_MVM_UCODE_CALIB_TIMEOUT); 3039 IWM_LOCK(sc); 3040 3041 3042 goto out; 3043 3044 error: 3045 iwm_remove_notification(sc->sc_notif_wait, &calib_wait); 3046 out: 3047 return ret; 3048 } 3049 3050 static int 3051 iwm_mvm_config_ltr(struct iwm_softc *sc) 3052 { 3053 struct iwm_ltr_config_cmd cmd = { 3054 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE), 3055 }; 3056 3057 if (!sc->sc_ltr_enabled) 3058 return 0; 3059 3060 return iwm_mvm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd); 3061 } 3062 3063 /* 3064 * receive side 3065 */ 3066 3067 /* (re)stock rx ring, called at init-time and at runtime */ 3068 static int 3069 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx) 3070 { 3071 struct iwm_rx_ring *ring = &sc->rxq; 3072 struct iwm_rx_data *data = &ring->data[idx]; 3073 struct mbuf *m; 3074 bus_dmamap_t dmamap; 3075 bus_dma_segment_t seg; 3076 int nsegs, error; 3077 3078 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE); 3079 if (m == NULL) 3080 return ENOBUFS; 3081 3082 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3083 #if defined(__DragonFly__) 3084 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map, 3085 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 3086 #else 3087 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m, 3088 &seg, &nsegs, BUS_DMA_NOWAIT); 3089 #endif 3090 if (error != 0) { 3091 device_printf(sc->sc_dev, 3092 "%s: can't map mbuf, error %d\n", __func__, error); 3093 m_freem(m); 3094 return error; 3095 } 3096 3097 if (data->m != NULL) 3098 bus_dmamap_unload(ring->data_dmat, data->map); 3099 3100 /* Swap ring->spare_map with data->map */ 3101 dmamap = data->map; 3102 data->map = ring->spare_map; 3103 ring->spare_map = dmamap; 3104 3105 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 3106 data->m = m; 3107 3108 /* Update RX descriptor. */ 3109 KKASSERT((seg.ds_addr & 255) == 0); 3110 ring->desc[idx] = htole32(seg.ds_addr >> 8); 3111 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3112 BUS_DMASYNC_PREWRITE); 3113 3114 return 0; 3115 } 3116 3117 /* 3118 * iwm_mvm_get_signal_strength - use new rx PHY INFO API 3119 * values are reported by the fw as positive values - need to negate 3120 * to obtain their dBM. Account for missing antennas by replacing 0 3121 * values by -256dBm: practically 0 power and a non-feasible 8 bit value. 3122 */ 3123 static int 3124 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info) 3125 { 3126 int energy_a, energy_b, energy_c, max_energy; 3127 uint32_t val; 3128 3129 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]); 3130 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >> 3131 IWM_RX_INFO_ENERGY_ANT_A_POS; 3132 energy_a = energy_a ? -energy_a : -256; 3133 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >> 3134 IWM_RX_INFO_ENERGY_ANT_B_POS; 3135 energy_b = energy_b ? -energy_b : -256; 3136 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >> 3137 IWM_RX_INFO_ENERGY_ANT_C_POS; 3138 energy_c = energy_c ? -energy_c : -256; 3139 max_energy = MAX(energy_a, energy_b); 3140 max_energy = MAX(max_energy, energy_c); 3141 3142 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3143 "energy In A %d B %d C %d , and max %d\n", 3144 energy_a, energy_b, energy_c, max_energy); 3145 3146 return max_energy; 3147 } 3148 3149 static void 3150 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3151 { 3152 struct iwm_rx_phy_info *phy_info = (void *)pkt->data; 3153 3154 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n"); 3155 3156 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info)); 3157 } 3158 3159 /* 3160 * Retrieve the average noise (in dBm) among receivers. 3161 */ 3162 static int 3163 iwm_get_noise(struct iwm_softc *sc, 3164 const struct iwm_mvm_statistics_rx_non_phy *stats) 3165 { 3166 int i, total, nbant, noise; 3167 3168 total = nbant = noise = 0; 3169 for (i = 0; i < 3; i++) { 3170 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff; 3171 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n", 3172 __func__, i, noise); 3173 3174 if (noise) { 3175 total += noise; 3176 nbant++; 3177 } 3178 } 3179 3180 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n", 3181 __func__, nbant, total); 3182 #if 0 3183 /* There should be at least one antenna but check anyway. */ 3184 return (nbant == 0) ? -127 : (total / nbant) - 107; 3185 #else 3186 /* For now, just hard-code it to -96 to be safe */ 3187 return (-96); 3188 #endif 3189 } 3190 3191 static void 3192 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3193 { 3194 struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data; 3195 3196 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats)); 3197 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general); 3198 } 3199 3200 /* 3201 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler 3202 * 3203 * Handles the actual data of the Rx packet from the fw 3204 */ 3205 static boolean_t 3206 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, 3207 boolean_t stolen) 3208 { 3209 struct ieee80211com *ic = &sc->sc_ic; 3210 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3211 struct ieee80211_frame *wh; 3212 struct ieee80211_node *ni; 3213 struct ieee80211_rx_stats rxs; 3214 struct iwm_rx_phy_info *phy_info; 3215 struct iwm_rx_mpdu_res_start *rx_res; 3216 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset); 3217 uint32_t len; 3218 uint32_t rx_pkt_status; 3219 int rssi; 3220 3221 phy_info = &sc->sc_last_phy_info; 3222 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data; 3223 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res)); 3224 len = le16toh(rx_res->byte_count); 3225 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len)); 3226 3227 if (__predict_false(phy_info->cfg_phy_cnt > 20)) { 3228 device_printf(sc->sc_dev, 3229 "dsp size out of range [0,20]: %d\n", 3230 phy_info->cfg_phy_cnt); 3231 return FALSE; 3232 } 3233 3234 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) || 3235 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) { 3236 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3237 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); 3238 return FALSE; /* drop */ 3239 } 3240 3241 rssi = iwm_mvm_get_signal_strength(sc, phy_info); 3242 /* Note: RSSI is absolute (ie a -ve value) */ 3243 if (rssi < IWM_MIN_DBM) 3244 rssi = IWM_MIN_DBM; 3245 else if (rssi > IWM_MAX_DBM) 3246 rssi = IWM_MAX_DBM; 3247 3248 /* Map it to relative value */ 3249 rssi = rssi - sc->sc_noise; 3250 3251 /* replenish ring for the buffer we're going to feed to the sharks */ 3252 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) { 3253 device_printf(sc->sc_dev, "%s: unable to add more buffers\n", 3254 __func__); 3255 return FALSE; 3256 } 3257 3258 m->m_data = pkt->data + sizeof(*rx_res); 3259 m->m_pkthdr.len = m->m_len = len; 3260 3261 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3262 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise); 3263 3264 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 3265 3266 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3267 "%s: phy_info: channel=%d, flags=0x%08x\n", 3268 __func__, 3269 le16toh(phy_info->channel), 3270 le16toh(phy_info->phy_flags)); 3271 3272 /* 3273 * Populate an RX state struct with the provided information. 3274 */ 3275 bzero(&rxs, sizeof(rxs)); 3276 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 3277 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 3278 rxs.c_ieee = le16toh(phy_info->channel); 3279 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) { 3280 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ); 3281 } else { 3282 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ); 3283 } 3284 /* rssi is in 1/2db units */ 3285 rxs.rssi = rssi * 2; 3286 rxs.nf = sc->sc_noise; 3287 3288 if (ieee80211_radiotap_active_vap(vap)) { 3289 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap; 3290 3291 tap->wr_flags = 0; 3292 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE)) 3293 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3294 tap->wr_chan_freq = htole16(rxs.c_freq); 3295 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */ 3296 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); 3297 tap->wr_dbm_antsignal = (int8_t)rssi; 3298 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 3299 tap->wr_tsft = phy_info->system_timestamp; 3300 switch (phy_info->rate) { 3301 /* CCK rates. */ 3302 case 10: tap->wr_rate = 2; break; 3303 case 20: tap->wr_rate = 4; break; 3304 case 55: tap->wr_rate = 11; break; 3305 case 110: tap->wr_rate = 22; break; 3306 /* OFDM rates. */ 3307 case 0xd: tap->wr_rate = 12; break; 3308 case 0xf: tap->wr_rate = 18; break; 3309 case 0x5: tap->wr_rate = 24; break; 3310 case 0x7: tap->wr_rate = 36; break; 3311 case 0x9: tap->wr_rate = 48; break; 3312 case 0xb: tap->wr_rate = 72; break; 3313 case 0x1: tap->wr_rate = 96; break; 3314 case 0x3: tap->wr_rate = 108; break; 3315 /* Unknown rate: should not happen. */ 3316 default: tap->wr_rate = 0; 3317 } 3318 } 3319 3320 IWM_UNLOCK(sc); 3321 if (ni != NULL) { 3322 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m); 3323 ieee80211_input_mimo(ni, m, &rxs); 3324 ieee80211_free_node(ni); 3325 } else { 3326 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m); 3327 ieee80211_input_mimo_all(ic, m, &rxs); 3328 } 3329 IWM_LOCK(sc); 3330 3331 return TRUE; 3332 } 3333 3334 static int 3335 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt, 3336 struct iwm_node *in) 3337 { 3338 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data; 3339 struct ieee80211_node *ni = &in->in_ni; 3340 struct ieee80211vap *vap = ni->ni_vap; 3341 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK; 3342 int failack = tx_resp->failure_frame; 3343 int new_rate, cur_rate = vap->iv_bss->ni_txrate; 3344 boolean_t rate_matched; 3345 uint8_t tx_resp_rate; 3346 int ret; 3347 3348 KASSERT(tx_resp->frame_count == 1, ("too many frames")); 3349 3350 /* Update rate control statistics. */ 3351 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n", 3352 __func__, 3353 (int) le16toh(tx_resp->status.status), 3354 (int) le16toh(tx_resp->status.sequence), 3355 tx_resp->frame_count, 3356 tx_resp->bt_kill_count, 3357 tx_resp->failure_rts, 3358 tx_resp->failure_frame, 3359 le32toh(tx_resp->initial_rate), 3360 (int) le16toh(tx_resp->wireless_media_time)); 3361 3362 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate)); 3363 3364 /* For rate control, ignore frames sent at different initial rate */ 3365 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate); 3366 3367 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) { 3368 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3369 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u " 3370 "ni_txrate=%d)\n", tx_resp_rate, cur_rate); 3371 } 3372 3373 if (status != IWM_TX_STATUS_SUCCESS && 3374 status != IWM_TX_STATUS_DIRECT_DONE) { 3375 if (rate_matched) { 3376 ieee80211_ratectl_tx_complete(vap, ni, 3377 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL); 3378 } 3379 ret = 1; 3380 } else { 3381 if (rate_matched) { 3382 ieee80211_ratectl_tx_complete(vap, ni, 3383 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL); 3384 } 3385 ret = 0; 3386 } 3387 3388 if (rate_matched) { 3389 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0); 3390 new_rate = vap->iv_bss->ni_txrate; 3391 if (new_rate != 0 && new_rate != cur_rate) { 3392 struct iwm_node *in = IWM_NODE(vap->iv_bss); 3393 iwm_setrates(sc, in, rix); 3394 iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE); 3395 } 3396 } 3397 3398 return ret; 3399 } 3400 3401 static void 3402 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3403 { 3404 struct iwm_cmd_header *cmd_hdr = &pkt->hdr; 3405 int idx = cmd_hdr->idx; 3406 int qid = cmd_hdr->qid; 3407 struct iwm_tx_ring *ring = &sc->txq[qid]; 3408 struct iwm_tx_data *txd = &ring->data[idx]; 3409 struct iwm_node *in = txd->in; 3410 struct mbuf *m = txd->m; 3411 int status; 3412 3413 KASSERT(txd->done == 0, ("txd not done")); 3414 KASSERT(txd->in != NULL, ("txd without node")); 3415 KASSERT(txd->m != NULL, ("txd without mbuf")); 3416 3417 sc->sc_tx_timer = 0; 3418 3419 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in); 3420 3421 /* Unmap and free mbuf. */ 3422 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE); 3423 bus_dmamap_unload(ring->data_dmat, txd->map); 3424 3425 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3426 "free txd %p, in %p\n", txd, txd->in); 3427 txd->done = 1; 3428 txd->m = NULL; 3429 txd->in = NULL; 3430 3431 ieee80211_tx_complete(&in->in_ni, m, status); 3432 3433 if (--ring->queued < IWM_TX_RING_LOMARK) { 3434 sc->qfullmsk &= ~(1 << ring->qid); 3435 if (sc->qfullmsk == 0) { 3436 iwm_start(sc); 3437 } 3438 } 3439 } 3440 3441 /* 3442 * transmit side 3443 */ 3444 3445 /* 3446 * Process a "command done" firmware notification. This is where we wakeup 3447 * processes waiting for a synchronous command completion. 3448 * from if_iwn 3449 */ 3450 static void 3451 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3452 { 3453 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE]; 3454 struct iwm_tx_data *data; 3455 3456 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) { 3457 return; /* Not a command ack. */ 3458 } 3459 3460 data = &ring->data[pkt->hdr.idx]; 3461 3462 /* If the command was mapped in an mbuf, free it. */ 3463 if (data->m != NULL) { 3464 bus_dmamap_sync(ring->data_dmat, data->map, 3465 BUS_DMASYNC_POSTWRITE); 3466 bus_dmamap_unload(ring->data_dmat, data->map); 3467 m_freem(data->m); 3468 data->m = NULL; 3469 } 3470 wakeup(&ring->desc[pkt->hdr.idx]); 3471 3472 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) { 3473 device_printf(sc->sc_dev, 3474 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n", 3475 __func__, pkt->hdr.idx, ring->queued, ring->cur); 3476 /* XXX call iwm_force_nmi() */ 3477 } 3478 3479 KKASSERT(ring->queued > 0); 3480 ring->queued--; 3481 if (ring->queued == 0) 3482 iwm_pcie_clear_cmd_in_flight(sc); 3483 } 3484 3485 #if 0 3486 /* 3487 * necessary only for block ack mode 3488 */ 3489 void 3490 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id, 3491 uint16_t len) 3492 { 3493 struct iwm_agn_scd_bc_tbl *scd_bc_tbl; 3494 uint16_t w_val; 3495 3496 scd_bc_tbl = sc->sched_dma.vaddr; 3497 3498 len += 8; /* magic numbers came naturally from paris */ 3499 len = roundup(len, 4) / 4; 3500 3501 w_val = htole16(sta_id << 12 | len); 3502 3503 /* Update TX scheduler. */ 3504 scd_bc_tbl[qid].tfd_offset[idx] = w_val; 3505 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3506 BUS_DMASYNC_PREWRITE); 3507 3508 /* I really wonder what this is ?!? */ 3509 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) { 3510 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val; 3511 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3512 BUS_DMASYNC_PREWRITE); 3513 } 3514 } 3515 #endif 3516 3517 /* 3518 * Fill in the rate related information for a transmit command. 3519 */ 3520 static uint8_t 3521 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in, 3522 struct mbuf *m, struct iwm_tx_cmd *tx) 3523 { 3524 struct ieee80211com *ic = &sc->sc_ic; 3525 struct ieee80211_node *ni = &in->in_ni; 3526 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 3527 const struct ieee80211_txparam *tp = ni->ni_txparms; 3528 const struct iwm_rate *rinfo; 3529 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3530 int ridx, rate_flags; 3531 3532 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT; 3533 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY; 3534 3535 if (type == IEEE80211_FC0_TYPE_MGT) { 3536 ridx = iwm_rate2ridx(sc, tp->mgmtrate); 3537 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3538 "%s: MGT (%d)\n", __func__, tp->mgmtrate); 3539 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3540 ridx = iwm_rate2ridx(sc, tp->mcastrate); 3541 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3542 "%s: MCAST (%d)\n", __func__, tp->mcastrate); 3543 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 3544 ridx = iwm_rate2ridx(sc, tp->ucastrate); 3545 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3546 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate); 3547 } else if (m->m_flags & M_EAPOL) { 3548 ridx = iwm_rate2ridx(sc, tp->mgmtrate); 3549 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3550 "%s: EAPOL (%d)\n", __func__, tp->mgmtrate); 3551 } else if (type == IEEE80211_FC0_TYPE_DATA) { 3552 /* This is the index into the programmed table */ 3553 tx->initial_rate_index = 0; 3554 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE); 3555 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA (%d)\n", 3556 __func__, ni->ni_txrate); 3557 return ni->ni_txrate; 3558 } else { 3559 ridx = iwm_rate2ridx(sc, tp->mgmtrate); 3560 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3561 "%s: DEFAULT (%d)\n", __func__, tp->mgmtrate); 3562 } 3563 3564 /* 3565 * Sanity check ridx, and provide fallback. If the rate lookup 3566 * ever fails, iwm_rate2ridx() will already print an error message. 3567 */ 3568 if (ridx < 0 || ridx > IWM_RIDX_MAX) { 3569 if (ic->ic_curmode == IEEE80211_MODE_11A) { 3570 /* 3571 * XXX this assumes the mode is either 11a or not 11a; 3572 * definitely won't work for 11n. 3573 */ 3574 ridx = IWM_RIDX_OFDM; 3575 } else { 3576 ridx = IWM_RIDX_CCK; 3577 } 3578 } 3579 3580 rinfo = &iwm_rates[ridx]; 3581 3582 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3583 "%s: frame type=%d, ridx=%d, rate=%d, CCK=%d\n", 3584 __func__, type, ridx, rinfo->rate, !! (IWM_RIDX_IS_CCK(ridx))); 3585 3586 /* XXX TODO: hard-coded TX antenna? */ 3587 rate_flags = 1 << IWM_RATE_MCS_ANT_POS; 3588 if (IWM_RIDX_IS_CCK(ridx)) 3589 rate_flags |= IWM_RATE_MCS_CCK_MSK; 3590 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp); 3591 3592 return rinfo->rate; 3593 } 3594 3595 #define TB0_SIZE 16 3596 static int 3597 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 3598 { 3599 struct ieee80211com *ic = &sc->sc_ic; 3600 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3601 struct iwm_node *in = IWM_NODE(ni); 3602 struct iwm_tx_ring *ring; 3603 struct iwm_tx_data *data; 3604 struct iwm_tfd *desc; 3605 struct iwm_device_cmd *cmd; 3606 struct iwm_tx_cmd *tx; 3607 struct ieee80211_frame *wh; 3608 struct ieee80211_key *k = NULL; 3609 #if !defined(__DragonFly__) 3610 struct mbuf *m1; 3611 #endif 3612 uint32_t flags; 3613 u_int hdrlen; 3614 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER]; 3615 int nsegs; 3616 uint8_t rate, tid, type; 3617 int i, totlen, error, pad; 3618 3619 wh = mtod(m, struct ieee80211_frame *); 3620 hdrlen = ieee80211_anyhdrsize(wh); 3621 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3622 tid = 0; 3623 ring = &sc->txq[ac]; 3624 desc = &ring->desc[ring->cur]; 3625 memset(desc, 0, sizeof(*desc)); 3626 data = &ring->data[ring->cur]; 3627 3628 /* Fill out iwm_tx_cmd to send to the firmware */ 3629 cmd = &ring->cmd[ring->cur]; 3630 cmd->hdr.code = IWM_TX_CMD; 3631 cmd->hdr.flags = 0; 3632 cmd->hdr.qid = ring->qid; 3633 cmd->hdr.idx = ring->cur; 3634 3635 tx = (void *)cmd->data; 3636 memset(tx, 0, sizeof(*tx)); 3637 3638 rate = iwm_tx_fill_cmd(sc, in, m, tx); 3639 3640 /* Encrypt the frame if need be. */ 3641 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 3642 /* Retrieve key for TX && do software encryption. */ 3643 k = ieee80211_crypto_encap(ni, m); 3644 if (k == NULL) { 3645 m_freem(m); 3646 return (ENOBUFS); 3647 } 3648 /* 802.11 header may have moved. */ 3649 wh = mtod(m, struct ieee80211_frame *); 3650 } 3651 3652 if (ieee80211_radiotap_active_vap(vap)) { 3653 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap; 3654 3655 tap->wt_flags = 0; 3656 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 3657 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 3658 tap->wt_rate = rate; 3659 if (k != NULL) 3660 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3661 ieee80211_radiotap_tx(vap, m); 3662 } 3663 3664 3665 totlen = m->m_pkthdr.len; 3666 3667 flags = 0; 3668 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3669 flags |= IWM_TX_CMD_FLG_ACK; 3670 } 3671 3672 if (type == IEEE80211_FC0_TYPE_DATA 3673 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) 3674 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3675 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE; 3676 } 3677 3678 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3679 type != IEEE80211_FC0_TYPE_DATA) 3680 tx->sta_id = sc->sc_aux_sta.sta_id; 3681 else 3682 tx->sta_id = IWM_STATION_ID; 3683 3684 if (type == IEEE80211_FC0_TYPE_MGT) { 3685 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3686 3687 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3688 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { 3689 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC); 3690 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) { 3691 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3692 } else { 3693 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT); 3694 } 3695 } else { 3696 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3697 } 3698 3699 if (hdrlen & 3) { 3700 /* First segment length must be a multiple of 4. */ 3701 flags |= IWM_TX_CMD_FLG_MH_PAD; 3702 pad = 4 - (hdrlen & 3); 3703 } else 3704 pad = 0; 3705 3706 tx->driver_txop = 0; 3707 tx->next_frame_len = 0; 3708 3709 tx->len = htole16(totlen); 3710 tx->tid_tspec = tid; 3711 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE); 3712 3713 /* Set physical address of "scratch area". */ 3714 tx->dram_lsb_ptr = htole32(data->scratch_paddr); 3715 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr); 3716 3717 /* Copy 802.11 header in TX command. */ 3718 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen); 3719 3720 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL; 3721 3722 tx->sec_ctl = 0; 3723 tx->tx_flags |= htole32(flags); 3724 3725 /* Trim 802.11 header. */ 3726 m_adj(m, hdrlen); 3727 #if defined(__DragonFly__) 3728 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m, 3729 segs, IWM_MAX_SCATTER - 2, 3730 &nsegs, BUS_DMA_NOWAIT); 3731 #else 3732 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3733 segs, &nsegs, BUS_DMA_NOWAIT); 3734 #endif 3735 if (error != 0) { 3736 #if defined(__DragonFly__) 3737 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3738 error); 3739 m_freem(m); 3740 return error; 3741 #else 3742 if (error != EFBIG) { 3743 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3744 error); 3745 m_freem(m); 3746 return error; 3747 } 3748 /* Too many DMA segments, linearize mbuf. */ 3749 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2); 3750 if (m1 == NULL) { 3751 device_printf(sc->sc_dev, 3752 "%s: could not defrag mbuf\n", __func__); 3753 m_freem(m); 3754 return (ENOBUFS); 3755 } 3756 m = m1; 3757 3758 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3759 segs, &nsegs, BUS_DMA_NOWAIT); 3760 if (error != 0) { 3761 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3762 error); 3763 m_freem(m); 3764 return error; 3765 } 3766 #endif 3767 } 3768 data->m = m; 3769 data->in = in; 3770 data->done = 0; 3771 3772 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3773 "sending txd %p, in %p\n", data, data->in); 3774 KASSERT(data->in != NULL, ("node is NULL")); 3775 3776 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3777 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n", 3778 ring->qid, ring->cur, totlen, nsegs, 3779 le32toh(tx->tx_flags), 3780 le32toh(tx->rate_n_flags), 3781 tx->initial_rate_index 3782 ); 3783 3784 /* Fill TX descriptor. */ 3785 desc->num_tbs = 2 + nsegs; 3786 3787 desc->tbs[0].lo = htole32(data->cmd_paddr); 3788 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | 3789 (TB0_SIZE << 4); 3790 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE); 3791 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | 3792 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) 3793 + hdrlen + pad - TB0_SIZE) << 4); 3794 3795 /* Other DMA segments are for data payload. */ 3796 for (i = 0; i < nsegs; i++) { 3797 seg = &segs[i]; 3798 desc->tbs[i+2].lo = htole32(seg->ds_addr); 3799 desc->tbs[i+2].hi_n_len = \ 3800 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) 3801 | ((seg->ds_len) << 4); 3802 } 3803 3804 bus_dmamap_sync(ring->data_dmat, data->map, 3805 BUS_DMASYNC_PREWRITE); 3806 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 3807 BUS_DMASYNC_PREWRITE); 3808 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3809 BUS_DMASYNC_PREWRITE); 3810 3811 #if 0 3812 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len)); 3813 #endif 3814 3815 /* Kick TX ring. */ 3816 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT; 3817 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3818 3819 /* Mark TX ring as full if we reach a certain threshold. */ 3820 if (++ring->queued > IWM_TX_RING_HIMARK) { 3821 sc->qfullmsk |= 1 << ring->qid; 3822 } 3823 3824 return 0; 3825 } 3826 3827 static int 3828 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3829 const struct ieee80211_bpf_params *params) 3830 { 3831 struct ieee80211com *ic = ni->ni_ic; 3832 struct iwm_softc *sc = ic->ic_softc; 3833 int error = 0; 3834 3835 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3836 "->%s begin\n", __func__); 3837 3838 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 3839 m_freem(m); 3840 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3841 "<-%s not RUNNING\n", __func__); 3842 return (ENETDOWN); 3843 } 3844 3845 IWM_LOCK(sc); 3846 /* XXX fix this */ 3847 if (params == NULL) { 3848 error = iwm_tx(sc, m, ni, 0); 3849 } else { 3850 error = iwm_tx(sc, m, ni, 0); 3851 } 3852 sc->sc_tx_timer = 5; 3853 IWM_UNLOCK(sc); 3854 3855 return (error); 3856 } 3857 3858 /* 3859 * mvm/tx.c 3860 */ 3861 3862 /* 3863 * Note that there are transports that buffer frames before they reach 3864 * the firmware. This means that after flush_tx_path is called, the 3865 * queue might not be empty. The race-free way to handle this is to: 3866 * 1) set the station as draining 3867 * 2) flush the Tx path 3868 * 3) wait for the transport queues to be empty 3869 */ 3870 int 3871 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags) 3872 { 3873 int ret; 3874 struct iwm_tx_path_flush_cmd flush_cmd = { 3875 .queues_ctl = htole32(tfd_msk), 3876 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH), 3877 }; 3878 3879 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags, 3880 sizeof(flush_cmd), &flush_cmd); 3881 if (ret) 3882 device_printf(sc->sc_dev, 3883 "Flushing tx queue failed: %d\n", ret); 3884 return ret; 3885 } 3886 3887 static int 3888 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp) 3889 { 3890 struct iwm_time_quota_cmd cmd; 3891 int i, idx, ret, num_active_macs, quota, quota_rem; 3892 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, }; 3893 int n_ifs[IWM_MAX_BINDINGS] = {0, }; 3894 uint16_t id; 3895 3896 memset(&cmd, 0, sizeof(cmd)); 3897 3898 /* currently, PHY ID == binding ID */ 3899 if (ivp) { 3900 id = ivp->phy_ctxt->id; 3901 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id")); 3902 colors[id] = ivp->phy_ctxt->color; 3903 3904 if (1) 3905 n_ifs[id] = 1; 3906 } 3907 3908 /* 3909 * The FW's scheduling session consists of 3910 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments 3911 * equally between all the bindings that require quota 3912 */ 3913 num_active_macs = 0; 3914 for (i = 0; i < IWM_MAX_BINDINGS; i++) { 3915 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID); 3916 num_active_macs += n_ifs[i]; 3917 } 3918 3919 quota = 0; 3920 quota_rem = 0; 3921 if (num_active_macs) { 3922 quota = IWM_MVM_MAX_QUOTA / num_active_macs; 3923 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs; 3924 } 3925 3926 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) { 3927 if (colors[i] < 0) 3928 continue; 3929 3930 cmd.quotas[idx].id_and_color = 3931 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i])); 3932 3933 if (n_ifs[i] <= 0) { 3934 cmd.quotas[idx].quota = htole32(0); 3935 cmd.quotas[idx].max_duration = htole32(0); 3936 } else { 3937 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]); 3938 cmd.quotas[idx].max_duration = htole32(0); 3939 } 3940 idx++; 3941 } 3942 3943 /* Give the remainder of the session to the first binding */ 3944 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem); 3945 3946 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC, 3947 sizeof(cmd), &cmd); 3948 if (ret) 3949 device_printf(sc->sc_dev, 3950 "%s: Failed to send quota: %d\n", __func__, ret); 3951 return ret; 3952 } 3953 3954 /* 3955 * ieee80211 routines 3956 */ 3957 3958 /* 3959 * Change to AUTH state in 80211 state machine. Roughly matches what 3960 * Linux does in bss_info_changed(). 3961 */ 3962 static int 3963 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc) 3964 { 3965 struct ieee80211_node *ni; 3966 struct iwm_node *in; 3967 struct iwm_vap *iv = IWM_VAP(vap); 3968 uint32_t duration; 3969 int error; 3970 3971 /* 3972 * XXX i have a feeling that the vap node is being 3973 * freed from underneath us. Grr. 3974 */ 3975 ni = ieee80211_ref_node(vap->iv_bss); 3976 in = IWM_NODE(ni); 3977 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE, 3978 "%s: called; vap=%p, bss ni=%p\n", 3979 __func__, 3980 vap, 3981 ni); 3982 IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n", 3983 __func__, ether_sprintf(ni->ni_bssid)); 3984 3985 in->in_assoc = 0; 3986 iv->iv_auth = 1; 3987 3988 /* 3989 * Firmware bug - it'll crash if the beacon interval is less 3990 * than 16. We can't avoid connecting at all, so refuse the 3991 * station state change, this will cause net80211 to abandon 3992 * attempts to connect to this AP, and eventually wpa_s will 3993 * blacklist the AP... 3994 */ 3995 if (ni->ni_intval < 16) { 3996 device_printf(sc->sc_dev, 3997 "AP %s beacon interval is %d, refusing due to firmware bug!\n", 3998 ether_sprintf(ni->ni_bssid), ni->ni_intval); 3999 error = EINVAL; 4000 goto out; 4001 } 4002 4003 error = iwm_allow_mcast(vap, sc); 4004 if (error) { 4005 device_printf(sc->sc_dev, 4006 "%s: failed to set multicast\n", __func__); 4007 goto out; 4008 } 4009 4010 /* 4011 * This is where it deviates from what Linux does. 4012 * 4013 * Linux iwlwifi doesn't reset the nic each time, nor does it 4014 * call ctxt_add() here. Instead, it adds it during vap creation, 4015 * and always does a mac_ctx_changed(). 4016 * 4017 * The openbsd port doesn't attempt to do that - it reset things 4018 * at odd states and does the add here. 4019 * 4020 * So, until the state handling is fixed (ie, we never reset 4021 * the NIC except for a firmware failure, which should drag 4022 * the NIC back to IDLE, re-setup and re-add all the mac/phy 4023 * contexts that are required), let's do a dirty hack here. 4024 */ 4025 if (iv->is_uploaded) { 4026 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 4027 device_printf(sc->sc_dev, 4028 "%s: failed to update MAC\n", __func__); 4029 goto out; 4030 } 4031 } else { 4032 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) { 4033 device_printf(sc->sc_dev, 4034 "%s: failed to add MAC\n", __func__); 4035 goto out; 4036 } 4037 } 4038 sc->sc_firmware_state = 1; 4039 4040 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0], 4041 in->in_ni.ni_chan, 1, 1)) != 0) { 4042 device_printf(sc->sc_dev, 4043 "%s: failed update phy ctxt\n", __func__); 4044 goto out; 4045 } 4046 iv->phy_ctxt = &sc->sc_phyctxt[0]; 4047 4048 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) { 4049 device_printf(sc->sc_dev, 4050 "%s: binding update cmd\n", __func__); 4051 goto out; 4052 } 4053 sc->sc_firmware_state = 2; 4054 /* 4055 * Authentication becomes unreliable when powersaving is left enabled 4056 * here. Powersaving will be activated again when association has 4057 * finished or is aborted. 4058 */ 4059 iv->ps_disabled = TRUE; 4060 error = iwm_mvm_power_update_mac(sc); 4061 iv->ps_disabled = FALSE; 4062 if (error != 0) { 4063 device_printf(sc->sc_dev, 4064 "%s: failed to update power management\n", 4065 __func__); 4066 goto out; 4067 } 4068 if ((error = iwm_mvm_add_sta(sc, in)) != 0) { 4069 device_printf(sc->sc_dev, 4070 "%s: failed to add sta\n", __func__); 4071 goto out; 4072 } 4073 sc->sc_firmware_state = 3; 4074 4075 /* 4076 * Prevent the FW from wandering off channel during association 4077 * by "protecting" the session with a time event. 4078 */ 4079 /* XXX duration is in units of TU, not MS */ 4080 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 4081 iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE); 4082 4083 error = 0; 4084 out: 4085 if (error != 0) 4086 iv->iv_auth = 0; 4087 ieee80211_free_node(ni); 4088 return (error); 4089 } 4090 4091 static struct ieee80211_node * 4092 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 4093 { 4094 return kmalloc(sizeof (struct iwm_node), M_80211_NODE, 4095 M_INTWAIT | M_ZERO); 4096 } 4097 4098 static uint8_t 4099 iwm_rate_from_ucode_rate(uint32_t rate_n_flags) 4100 { 4101 uint8_t plcp = rate_n_flags & 0xff; 4102 int i; 4103 4104 for (i = 0; i <= IWM_RIDX_MAX; i++) { 4105 if (iwm_rates[i].plcp == plcp) 4106 return iwm_rates[i].rate; 4107 } 4108 return 0; 4109 } 4110 4111 uint8_t 4112 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx) 4113 { 4114 int i; 4115 uint8_t rval; 4116 4117 for (i = 0; i < rs->rs_nrates; i++) { 4118 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL); 4119 if (rval == iwm_rates[ridx].rate) 4120 return rs->rs_rates[i]; 4121 } 4122 4123 return 0; 4124 } 4125 4126 static int 4127 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate) 4128 { 4129 int i; 4130 4131 for (i = 0; i <= IWM_RIDX_MAX; i++) { 4132 if (iwm_rates[i].rate == rate) 4133 return i; 4134 } 4135 4136 device_printf(sc->sc_dev, 4137 "%s: WARNING: device rate for %u not found!\n", 4138 __func__, rate); 4139 4140 return -1; 4141 } 4142 4143 static void 4144 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix) 4145 { 4146 struct ieee80211_node *ni = &in->in_ni; 4147 struct iwm_lq_cmd *lq = &in->in_lq; 4148 struct ieee80211_rateset *rs = &ni->ni_rates; 4149 int nrates = rs->rs_nrates; 4150 int i, ridx, tab = 0; 4151 int txant = 0; 4152 4153 KKASSERT(rix >= 0 && rix < nrates); 4154 4155 if (nrates > nitems(lq->rs_table)) { 4156 device_printf(sc->sc_dev, 4157 "%s: node supports %d rates, driver handles " 4158 "only %zu\n", __func__, nrates, nitems(lq->rs_table)); 4159 return; 4160 } 4161 if (nrates == 0) { 4162 device_printf(sc->sc_dev, 4163 "%s: node supports 0 rates, odd!\n", __func__); 4164 return; 4165 } 4166 nrates = imin(rix + 1, nrates); 4167 4168 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4169 "%s: nrates=%d\n", __func__, nrates); 4170 4171 /* then construct a lq_cmd based on those */ 4172 memset(lq, 0, sizeof(*lq)); 4173 lq->sta_id = IWM_STATION_ID; 4174 4175 /* For HT, always enable RTS/CTS to avoid excessive retries. */ 4176 if (ni->ni_flags & IEEE80211_NODE_HT) 4177 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK; 4178 4179 /* 4180 * are these used? (we don't do SISO or MIMO) 4181 * need to set them to non-zero, though, or we get an error. 4182 */ 4183 lq->single_stream_ant_msk = 1; 4184 lq->dual_stream_ant_msk = 1; 4185 4186 /* 4187 * Build the actual rate selection table. 4188 * The lowest bits are the rates. Additionally, 4189 * CCK needs bit 9 to be set. The rest of the bits 4190 * we add to the table select the tx antenna 4191 * Note that we add the rates in the highest rate first 4192 * (opposite of ni_rates). 4193 */ 4194 for (i = 0; i < nrates; i++) { 4195 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL; 4196 int nextant; 4197 4198 /* Map 802.11 rate to HW rate index. */ 4199 ridx = iwm_rate2ridx(sc, rate); 4200 if (ridx == -1) 4201 continue; 4202 4203 if (txant == 0) 4204 txant = iwm_mvm_get_valid_tx_ant(sc); 4205 nextant = 1<<(ffs(txant)-1); 4206 txant &= ~nextant; 4207 4208 tab = iwm_rates[ridx].plcp; 4209 tab |= nextant << IWM_RATE_MCS_ANT_POS; 4210 if (IWM_RIDX_IS_CCK(ridx)) 4211 tab |= IWM_RATE_MCS_CCK_MSK; 4212 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4213 "station rate i=%d, rate=%d, hw=%x\n", 4214 i, iwm_rates[ridx].rate, tab); 4215 lq->rs_table[i] = htole32(tab); 4216 } 4217 /* then fill the rest with the lowest possible rate */ 4218 for (i = nrates; i < nitems(lq->rs_table); i++) { 4219 KASSERT(tab != 0, ("invalid tab")); 4220 lq->rs_table[i] = htole32(tab); 4221 } 4222 } 4223 4224 static int 4225 iwm_media_change(struct ifnet *ifp) 4226 { 4227 struct ieee80211vap *vap = ifp->if_softc; 4228 struct ieee80211com *ic = vap->iv_ic; 4229 struct iwm_softc *sc = ic->ic_softc; 4230 int error; 4231 4232 error = ieee80211_media_change(ifp); 4233 if (error != ENETRESET) 4234 return error; 4235 4236 IWM_LOCK(sc); 4237 if (ic->ic_nrunning > 0) { 4238 iwm_stop(sc); 4239 iwm_init(sc); 4240 } 4241 IWM_UNLOCK(sc); 4242 return error; 4243 } 4244 4245 static void 4246 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap) 4247 { 4248 struct iwm_vap *ivp = IWM_VAP(vap); 4249 int error; 4250 4251 /* Avoid Tx watchdog triggering, when transfers get dropped here. */ 4252 sc->sc_tx_timer = 0; 4253 4254 ivp->iv_auth = 0; 4255 if (sc->sc_firmware_state == 3) { 4256 iwm_xmit_queue_drain(sc); 4257 // iwm_mvm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC); 4258 error = iwm_mvm_rm_sta(sc, vap, TRUE); 4259 if (error) { 4260 device_printf(sc->sc_dev, 4261 "%s: Failed to remove station: %d\n", 4262 __func__, error); 4263 } 4264 } 4265 if (sc->sc_firmware_state == 3) { 4266 error = iwm_mvm_mac_ctxt_changed(sc, vap); 4267 if (error) { 4268 device_printf(sc->sc_dev, 4269 "%s: Failed to change mac context: %d\n", 4270 __func__, error); 4271 } 4272 } 4273 if (sc->sc_firmware_state == 3) { 4274 error = iwm_mvm_sf_update(sc, vap, FALSE); 4275 if (error) { 4276 device_printf(sc->sc_dev, 4277 "%s: Failed to update smart FIFO: %d\n", 4278 __func__, error); 4279 } 4280 } 4281 if (sc->sc_firmware_state == 3) { 4282 error = iwm_mvm_rm_sta_id(sc, vap); 4283 if (error) { 4284 device_printf(sc->sc_dev, 4285 "%s: Failed to remove station id: %d\n", 4286 __func__, error); 4287 } 4288 } 4289 if (sc->sc_firmware_state == 3) { 4290 error = iwm_mvm_update_quotas(sc, NULL); 4291 if (error) { 4292 device_printf(sc->sc_dev, 4293 "%s: Failed to update PHY quota: %d\n", 4294 __func__, error); 4295 } 4296 } 4297 if (sc->sc_firmware_state == 3) { 4298 /* XXX Might need to specify bssid correctly. */ 4299 error = iwm_mvm_mac_ctxt_changed(sc, vap); 4300 if (error) { 4301 device_printf(sc->sc_dev, 4302 "%s: Failed to change mac context: %d\n", 4303 __func__, error); 4304 } 4305 } 4306 if (sc->sc_firmware_state == 3) { 4307 sc->sc_firmware_state = 2; 4308 } 4309 if (sc->sc_firmware_state > 1) { 4310 error = iwm_mvm_binding_remove_vif(sc, ivp); 4311 if (error) { 4312 device_printf(sc->sc_dev, 4313 "%s: Failed to remove channel ctx: %d\n", 4314 __func__, error); 4315 } 4316 } 4317 if (sc->sc_firmware_state > 1) { 4318 sc->sc_firmware_state = 1; 4319 } 4320 ivp->phy_ctxt = NULL; 4321 if (sc->sc_firmware_state > 0) { 4322 error = iwm_mvm_mac_ctxt_changed(sc, vap); 4323 if (error) { 4324 device_printf(sc->sc_dev, 4325 "%s: Failed to change mac context: %d\n", 4326 __func__, error); 4327 } 4328 } 4329 if (sc->sc_firmware_state > 0) { 4330 error = iwm_mvm_power_update_mac(sc); 4331 if (error != 0) { 4332 device_printf(sc->sc_dev, 4333 "%s: failed to update power management\n", 4334 __func__); 4335 } 4336 } 4337 sc->sc_firmware_state = 0; 4338 } 4339 4340 static int 4341 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4342 { 4343 struct iwm_vap *ivp = IWM_VAP(vap); 4344 struct ieee80211com *ic = vap->iv_ic; 4345 struct iwm_softc *sc = ic->ic_softc; 4346 struct iwm_node *in; 4347 int error; 4348 4349 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4350 "switching state %s -> %s arg=0x%x\n", 4351 ieee80211_state_name[vap->iv_state], 4352 ieee80211_state_name[nstate], 4353 arg); 4354 4355 IEEE80211_UNLOCK(ic); 4356 IWM_LOCK(sc); 4357 4358 if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) && 4359 (nstate == IEEE80211_S_AUTH || 4360 nstate == IEEE80211_S_ASSOC || 4361 nstate == IEEE80211_S_RUN)) { 4362 /* Stop blinking for a scan, when authenticating. */ 4363 iwm_led_blink_stop(sc); 4364 } 4365 4366 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { 4367 iwm_mvm_led_disable(sc); 4368 /* disable beacon filtering if we're hopping out of RUN */ 4369 iwm_mvm_disable_beacon_filter(sc); 4370 if (((in = IWM_NODE(vap->iv_bss)) != NULL)) 4371 in->in_assoc = 0; 4372 } 4373 4374 if ((vap->iv_state == IEEE80211_S_AUTH || 4375 vap->iv_state == IEEE80211_S_ASSOC || 4376 vap->iv_state == IEEE80211_S_RUN) && 4377 (nstate == IEEE80211_S_INIT || 4378 nstate == IEEE80211_S_SCAN || 4379 nstate == IEEE80211_S_AUTH)) { 4380 iwm_mvm_stop_session_protection(sc, ivp); 4381 } 4382 4383 if ((vap->iv_state == IEEE80211_S_RUN || 4384 vap->iv_state == IEEE80211_S_ASSOC) && 4385 nstate == IEEE80211_S_INIT) { 4386 /* 4387 * In this case, iv_newstate() wants to send an 80211 frame on 4388 * the network that we are leaving. So we need to call it, 4389 * before tearing down all the firmware state. 4390 */ 4391 IWM_UNLOCK(sc); 4392 IEEE80211_LOCK(ic); 4393 ivp->iv_newstate(vap, nstate, arg); 4394 IEEE80211_UNLOCK(ic); 4395 IWM_LOCK(sc); 4396 iwm_bring_down_firmware(sc, vap); 4397 IWM_UNLOCK(sc); 4398 IEEE80211_LOCK(ic); 4399 return 0; 4400 } 4401 4402 switch (nstate) { 4403 case IEEE80211_S_INIT: 4404 case IEEE80211_S_SCAN: 4405 break; 4406 4407 case IEEE80211_S_AUTH: 4408 iwm_bring_down_firmware(sc, vap); 4409 if ((error = iwm_auth(vap, sc)) != 0) { 4410 device_printf(sc->sc_dev, 4411 "%s: could not move to auth state: %d\n", 4412 __func__, error); 4413 iwm_bring_down_firmware(sc, vap); 4414 IWM_UNLOCK(sc); 4415 IEEE80211_LOCK(ic); 4416 return 1; 4417 } 4418 break; 4419 4420 case IEEE80211_S_ASSOC: 4421 /* 4422 * EBS may be disabled due to previous failures reported by FW. 4423 * Reset EBS status here assuming environment has been changed. 4424 */ 4425 sc->last_ebs_successful = TRUE; 4426 break; 4427 4428 case IEEE80211_S_RUN: 4429 in = IWM_NODE(vap->iv_bss); 4430 /* Update the association state, now we have it all */ 4431 /* (eg associd comes in at this point */ 4432 error = iwm_mvm_update_sta(sc, in); 4433 if (error != 0) { 4434 device_printf(sc->sc_dev, 4435 "%s: failed to update STA\n", __func__); 4436 IWM_UNLOCK(sc); 4437 IEEE80211_LOCK(ic); 4438 return error; 4439 } 4440 in->in_assoc = 1; 4441 error = iwm_mvm_mac_ctxt_changed(sc, vap); 4442 if (error != 0) { 4443 device_printf(sc->sc_dev, 4444 "%s: failed to update MAC: %d\n", __func__, error); 4445 } 4446 4447 iwm_mvm_sf_update(sc, vap, FALSE); 4448 iwm_mvm_enable_beacon_filter(sc, ivp); 4449 iwm_mvm_power_update_mac(sc); 4450 iwm_mvm_update_quotas(sc, ivp); 4451 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0); 4452 iwm_setrates(sc, in, rix); 4453 4454 if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) { 4455 device_printf(sc->sc_dev, 4456 "%s: IWM_LQ_CMD failed: %d\n", __func__, error); 4457 } 4458 4459 iwm_mvm_led_enable(sc); 4460 break; 4461 4462 default: 4463 break; 4464 } 4465 IWM_UNLOCK(sc); 4466 IEEE80211_LOCK(ic); 4467 4468 return (ivp->iv_newstate(vap, nstate, arg)); 4469 } 4470 4471 void 4472 iwm_endscan_cb(void *arg, int pending) 4473 { 4474 struct iwm_softc *sc = arg; 4475 struct ieee80211com *ic = &sc->sc_ic; 4476 4477 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE, 4478 "%s: scan ended\n", 4479 __func__); 4480 4481 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 4482 } 4483 4484 static int 4485 iwm_send_bt_init_conf(struct iwm_softc *sc) 4486 { 4487 struct iwm_bt_coex_cmd bt_cmd; 4488 4489 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI); 4490 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET); 4491 4492 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), 4493 &bt_cmd); 4494 } 4495 4496 static boolean_t 4497 iwm_mvm_is_lar_supported(struct iwm_softc *sc) 4498 { 4499 boolean_t nvm_lar = sc->nvm_data->lar_enabled; 4500 boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa, 4501 IWM_UCODE_TLV_CAPA_LAR_SUPPORT); 4502 4503 if (iwm_lar_disable) 4504 return FALSE; 4505 4506 /* 4507 * Enable LAR only if it is supported by the FW (TLV) && 4508 * enabled in the NVM 4509 */ 4510 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) 4511 return nvm_lar && tlv_lar; 4512 else 4513 return tlv_lar; 4514 } 4515 4516 static boolean_t 4517 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc) 4518 { 4519 return fw_has_api(&sc->sc_fw.ucode_capa, 4520 IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) || 4521 fw_has_capa(&sc->sc_fw.ucode_capa, 4522 IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC); 4523 } 4524 4525 static int 4526 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2) 4527 { 4528 struct iwm_mcc_update_cmd mcc_cmd; 4529 struct iwm_host_cmd hcmd = { 4530 .id = IWM_MCC_UPDATE_CMD, 4531 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB), 4532 .data = { &mcc_cmd }, 4533 }; 4534 int ret; 4535 #ifdef IWM_DEBUG 4536 struct iwm_rx_packet *pkt; 4537 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL; 4538 struct iwm_mcc_update_resp *mcc_resp; 4539 int n_channels; 4540 uint16_t mcc; 4541 #endif 4542 int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa, 4543 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2); 4544 4545 if (!iwm_mvm_is_lar_supported(sc)) { 4546 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n", 4547 __func__); 4548 return 0; 4549 } 4550 4551 memset(&mcc_cmd, 0, sizeof(mcc_cmd)); 4552 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]); 4553 if (iwm_mvm_is_wifi_mcc_supported(sc)) 4554 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT; 4555 else 4556 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW; 4557 4558 if (resp_v2) 4559 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd); 4560 else 4561 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1); 4562 4563 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4564 "send MCC update to FW with '%c%c' src = %d\n", 4565 alpha2[0], alpha2[1], mcc_cmd.source_id); 4566 4567 ret = iwm_send_cmd(sc, &hcmd); 4568 if (ret) 4569 return ret; 4570 4571 #ifdef IWM_DEBUG 4572 pkt = hcmd.resp_pkt; 4573 4574 /* Extract MCC response */ 4575 if (resp_v2) { 4576 mcc_resp = (void *)pkt->data; 4577 mcc = mcc_resp->mcc; 4578 n_channels = le32toh(mcc_resp->n_channels); 4579 } else { 4580 mcc_resp_v1 = (void *)pkt->data; 4581 mcc = mcc_resp_v1->mcc; 4582 n_channels = le32toh(mcc_resp_v1->n_channels); 4583 } 4584 4585 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ 4586 if (mcc == 0) 4587 mcc = 0x3030; /* "00" - world */ 4588 4589 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4590 "regulatory domain '%c%c' (%d channels available)\n", 4591 mcc >> 8, mcc & 0xff, n_channels); 4592 #endif 4593 iwm_free_resp(sc, &hcmd); 4594 4595 return 0; 4596 } 4597 4598 static void 4599 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff) 4600 { 4601 struct iwm_host_cmd cmd = { 4602 .id = IWM_REPLY_THERMAL_MNG_BACKOFF, 4603 .len = { sizeof(uint32_t), }, 4604 .data = { &backoff, }, 4605 }; 4606 4607 if (iwm_send_cmd(sc, &cmd) != 0) { 4608 device_printf(sc->sc_dev, 4609 "failed to change thermal tx backoff\n"); 4610 } 4611 } 4612 4613 static int 4614 iwm_init_hw(struct iwm_softc *sc) 4615 { 4616 struct ieee80211com *ic = &sc->sc_ic; 4617 int error, i, ac; 4618 4619 sc->sf_state = IWM_SF_UNINIT; 4620 4621 if ((error = iwm_start_hw(sc)) != 0) { 4622 kprintf("iwm_start_hw: failed %d\n", error); 4623 return error; 4624 } 4625 4626 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) { 4627 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error); 4628 return error; 4629 } 4630 4631 /* 4632 * should stop and start HW since that INIT 4633 * image just loaded 4634 */ 4635 iwm_stop_device(sc); 4636 sc->sc_ps_disabled = FALSE; 4637 if ((error = iwm_start_hw(sc)) != 0) { 4638 device_printf(sc->sc_dev, "could not initialize hardware\n"); 4639 return error; 4640 } 4641 4642 /* omstart, this time with the regular firmware */ 4643 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR); 4644 if (error) { 4645 device_printf(sc->sc_dev, "could not load firmware\n"); 4646 goto error; 4647 } 4648 4649 error = iwm_mvm_sf_update(sc, NULL, FALSE); 4650 if (error) 4651 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n"); 4652 4653 if ((error = iwm_send_bt_init_conf(sc)) != 0) { 4654 device_printf(sc->sc_dev, "bt init conf failed\n"); 4655 goto error; 4656 } 4657 4658 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc)); 4659 if (error != 0) { 4660 device_printf(sc->sc_dev, "antenna config failed\n"); 4661 goto error; 4662 } 4663 4664 /* Send phy db control command and then phy db calibration */ 4665 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0) 4666 goto error; 4667 4668 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) { 4669 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n"); 4670 goto error; 4671 } 4672 4673 /* Add auxiliary station for scanning */ 4674 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) { 4675 device_printf(sc->sc_dev, "add_aux_sta failed\n"); 4676 goto error; 4677 } 4678 4679 for (i = 0; i < IWM_NUM_PHY_CTX; i++) { 4680 /* 4681 * The channel used here isn't relevant as it's 4682 * going to be overwritten in the other flows. 4683 * For now use the first channel we have. 4684 */ 4685 if ((error = iwm_mvm_phy_ctxt_add(sc, 4686 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0) 4687 goto error; 4688 } 4689 4690 /* Initialize tx backoffs to the minimum. */ 4691 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 4692 iwm_mvm_tt_tx_backoff(sc, 0); 4693 4694 if (iwm_mvm_config_ltr(sc) != 0) 4695 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n"); 4696 4697 error = iwm_mvm_power_update_device(sc); 4698 if (error) 4699 goto error; 4700 4701 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0) 4702 goto error; 4703 4704 if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) { 4705 if ((error = iwm_mvm_config_umac_scan(sc)) != 0) 4706 goto error; 4707 } 4708 4709 /* Enable Tx queues. */ 4710 for (ac = 0; ac < WME_NUM_AC; ac++) { 4711 error = iwm_enable_txq(sc, IWM_STATION_ID, ac, 4712 iwm_mvm_ac_to_tx_fifo[ac]); 4713 if (error) 4714 goto error; 4715 } 4716 4717 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) { 4718 device_printf(sc->sc_dev, "failed to disable beacon filter\n"); 4719 goto error; 4720 } 4721 4722 return 0; 4723 4724 error: 4725 iwm_stop_device(sc); 4726 return error; 4727 } 4728 4729 /* Allow multicast from our BSSID. */ 4730 static int 4731 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc) 4732 { 4733 struct ieee80211_node *ni = vap->iv_bss; 4734 struct iwm_mcast_filter_cmd *cmd; 4735 size_t size; 4736 int error; 4737 4738 size = roundup(sizeof(*cmd), 4); 4739 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO); 4740 if (cmd == NULL) 4741 return ENOMEM; 4742 cmd->filter_own = 1; 4743 cmd->port_id = 0; 4744 cmd->count = 0; 4745 cmd->pass_all = 1; 4746 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid); 4747 4748 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 4749 IWM_CMD_SYNC, size, cmd); 4750 kfree(cmd, M_DEVBUF); 4751 4752 return (error); 4753 } 4754 4755 /* 4756 * ifnet interfaces 4757 */ 4758 4759 static void 4760 iwm_init(struct iwm_softc *sc) 4761 { 4762 int error; 4763 4764 if (sc->sc_flags & IWM_FLAG_HW_INITED) { 4765 return; 4766 } 4767 sc->sc_generation++; 4768 sc->sc_flags &= ~IWM_FLAG_STOPPED; 4769 4770 if ((error = iwm_init_hw(sc)) != 0) { 4771 kprintf("iwm_init_hw failed %d\n", error); 4772 iwm_stop(sc); 4773 return; 4774 } 4775 4776 /* 4777 * Ok, firmware loaded and we are jogging 4778 */ 4779 sc->sc_flags |= IWM_FLAG_HW_INITED; 4780 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 4781 } 4782 4783 static int 4784 iwm_transmit(struct ieee80211com *ic, struct mbuf *m) 4785 { 4786 struct iwm_softc *sc; 4787 int error; 4788 4789 sc = ic->ic_softc; 4790 4791 IWM_LOCK(sc); 4792 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 4793 IWM_UNLOCK(sc); 4794 return (ENXIO); 4795 } 4796 error = mbufq_enqueue(&sc->sc_snd, m); 4797 if (error) { 4798 IWM_UNLOCK(sc); 4799 return (error); 4800 } 4801 iwm_start(sc); 4802 IWM_UNLOCK(sc); 4803 return (0); 4804 } 4805 4806 /* 4807 * Dequeue packets from sendq and call send. 4808 */ 4809 static void 4810 iwm_start(struct iwm_softc *sc) 4811 { 4812 struct ieee80211_node *ni; 4813 struct mbuf *m; 4814 int ac = 0; 4815 4816 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__); 4817 while (sc->qfullmsk == 0 && 4818 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 4819 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4820 if (iwm_tx(sc, m, ni, ac) != 0) { 4821 if_inc_counter(ni->ni_vap->iv_ifp, 4822 IFCOUNTER_OERRORS, 1); 4823 ieee80211_free_node(ni); 4824 continue; 4825 } 4826 sc->sc_tx_timer = 15; 4827 } 4828 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__); 4829 } 4830 4831 static void 4832 iwm_stop(struct iwm_softc *sc) 4833 { 4834 4835 sc->sc_flags &= ~IWM_FLAG_HW_INITED; 4836 sc->sc_flags |= IWM_FLAG_STOPPED; 4837 sc->sc_generation++; 4838 iwm_led_blink_stop(sc); 4839 sc->sc_tx_timer = 0; 4840 iwm_stop_device(sc); 4841 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 4842 } 4843 4844 static void 4845 iwm_watchdog(void *arg) 4846 { 4847 struct iwm_softc *sc = arg; 4848 4849 if (sc->sc_tx_timer > 0) { 4850 if (--sc->sc_tx_timer == 0) { 4851 device_printf(sc->sc_dev, "device timeout\n"); 4852 #ifdef IWM_DEBUG 4853 iwm_nic_error(sc); 4854 #endif 4855 iwm_stop(sc); 4856 #if defined(__DragonFly__) 4857 ++sc->sc_ic.ic_oerrors; 4858 #else 4859 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 4860 #endif 4861 return; 4862 } 4863 } 4864 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 4865 } 4866 4867 static void 4868 iwm_parent(struct ieee80211com *ic) 4869 { 4870 struct iwm_softc *sc = ic->ic_softc; 4871 int startall = 0; 4872 4873 IWM_LOCK(sc); 4874 if (ic->ic_nrunning > 0) { 4875 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) { 4876 iwm_init(sc); 4877 startall = 1; 4878 } 4879 } else if (sc->sc_flags & IWM_FLAG_HW_INITED) 4880 iwm_stop(sc); 4881 IWM_UNLOCK(sc); 4882 if (startall) 4883 ieee80211_start_all(ic); 4884 } 4885 4886 /* 4887 * The interrupt side of things 4888 */ 4889 4890 /* 4891 * error dumping routines are from iwlwifi/mvm/utils.c 4892 */ 4893 4894 /* 4895 * Note: This structure is read from the device with IO accesses, 4896 * and the reading already does the endian conversion. As it is 4897 * read with uint32_t-sized accesses, any members with a different size 4898 * need to be ordered correctly though! 4899 */ 4900 struct iwm_error_event_table { 4901 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 4902 uint32_t error_id; /* type of error */ 4903 uint32_t trm_hw_status0; /* TRM HW status */ 4904 uint32_t trm_hw_status1; /* TRM HW status */ 4905 uint32_t blink2; /* branch link */ 4906 uint32_t ilink1; /* interrupt link */ 4907 uint32_t ilink2; /* interrupt link */ 4908 uint32_t data1; /* error-specific data */ 4909 uint32_t data2; /* error-specific data */ 4910 uint32_t data3; /* error-specific data */ 4911 uint32_t bcon_time; /* beacon timer */ 4912 uint32_t tsf_low; /* network timestamp function timer */ 4913 uint32_t tsf_hi; /* network timestamp function timer */ 4914 uint32_t gp1; /* GP1 timer register */ 4915 uint32_t gp2; /* GP2 timer register */ 4916 uint32_t fw_rev_type; /* firmware revision type */ 4917 uint32_t major; /* uCode version major */ 4918 uint32_t minor; /* uCode version minor */ 4919 uint32_t hw_ver; /* HW Silicon version */ 4920 uint32_t brd_ver; /* HW board version */ 4921 uint32_t log_pc; /* log program counter */ 4922 uint32_t frame_ptr; /* frame pointer */ 4923 uint32_t stack_ptr; /* stack pointer */ 4924 uint32_t hcmd; /* last host command header */ 4925 uint32_t isr0; /* isr status register LMPM_NIC_ISR0: 4926 * rxtx_flag */ 4927 uint32_t isr1; /* isr status register LMPM_NIC_ISR1: 4928 * host_flag */ 4929 uint32_t isr2; /* isr status register LMPM_NIC_ISR2: 4930 * enc_flag */ 4931 uint32_t isr3; /* isr status register LMPM_NIC_ISR3: 4932 * time_flag */ 4933 uint32_t isr4; /* isr status register LMPM_NIC_ISR4: 4934 * wico interrupt */ 4935 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */ 4936 uint32_t wait_event; /* wait event() caller address */ 4937 uint32_t l2p_control; /* L2pControlField */ 4938 uint32_t l2p_duration; /* L2pDurationField */ 4939 uint32_t l2p_mhvalid; /* L2pMhValidBits */ 4940 uint32_t l2p_addr_match; /* L2pAddrMatchStat */ 4941 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on 4942 * (LMPM_PMG_SEL) */ 4943 uint32_t u_timestamp; /* indicate when the date and time of the 4944 * compilation */ 4945 uint32_t flow_handler; /* FH read/write pointers, RX credit */ 4946 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 4947 4948 /* 4949 * UMAC error struct - relevant starting from family 8000 chip. 4950 * Note: This structure is read from the device with IO accesses, 4951 * and the reading already does the endian conversion. As it is 4952 * read with u32-sized accesses, any members with a different size 4953 * need to be ordered correctly though! 4954 */ 4955 struct iwm_umac_error_event_table { 4956 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 4957 uint32_t error_id; /* type of error */ 4958 uint32_t blink1; /* branch link */ 4959 uint32_t blink2; /* branch link */ 4960 uint32_t ilink1; /* interrupt link */ 4961 uint32_t ilink2; /* interrupt link */ 4962 uint32_t data1; /* error-specific data */ 4963 uint32_t data2; /* error-specific data */ 4964 uint32_t data3; /* error-specific data */ 4965 uint32_t umac_major; 4966 uint32_t umac_minor; 4967 uint32_t frame_pointer; /* core register 27*/ 4968 uint32_t stack_pointer; /* core register 28 */ 4969 uint32_t cmd_header; /* latest host cmd sent to UMAC */ 4970 uint32_t nic_isr_pref; /* ISR status register */ 4971 } __packed; 4972 4973 #define ERROR_START_OFFSET (1 * sizeof(uint32_t)) 4974 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t)) 4975 4976 #ifdef IWM_DEBUG 4977 struct { 4978 const char *name; 4979 uint8_t num; 4980 } advanced_lookup[] = { 4981 { "NMI_INTERRUPT_WDG", 0x34 }, 4982 { "SYSASSERT", 0x35 }, 4983 { "UCODE_VERSION_MISMATCH", 0x37 }, 4984 { "BAD_COMMAND", 0x38 }, 4985 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 4986 { "FATAL_ERROR", 0x3D }, 4987 { "NMI_TRM_HW_ERR", 0x46 }, 4988 { "NMI_INTERRUPT_TRM", 0x4C }, 4989 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 4990 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 4991 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 4992 { "NMI_INTERRUPT_HOST", 0x66 }, 4993 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 4994 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 4995 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 4996 { "ADVANCED_SYSASSERT", 0 }, 4997 }; 4998 4999 static const char * 5000 iwm_desc_lookup(uint32_t num) 5001 { 5002 int i; 5003 5004 for (i = 0; i < nitems(advanced_lookup) - 1; i++) 5005 if (advanced_lookup[i].num == num) 5006 return advanced_lookup[i].name; 5007 5008 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 5009 return advanced_lookup[i].name; 5010 } 5011 5012 static void 5013 iwm_nic_umac_error(struct iwm_softc *sc) 5014 { 5015 struct iwm_umac_error_event_table table; 5016 uint32_t base; 5017 5018 base = sc->umac_error_event_table; 5019 5020 if (base < 0x800000) { 5021 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n", 5022 base); 5023 return; 5024 } 5025 5026 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5027 device_printf(sc->sc_dev, "reading errlog failed\n"); 5028 return; 5029 } 5030 5031 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5032 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n"); 5033 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5034 sc->sc_flags, table.valid); 5035 } 5036 5037 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id, 5038 iwm_desc_lookup(table.error_id)); 5039 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1); 5040 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2); 5041 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n", 5042 table.ilink1); 5043 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n", 5044 table.ilink2); 5045 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1); 5046 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2); 5047 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3); 5048 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major); 5049 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor); 5050 device_printf(sc->sc_dev, "0x%08X | frame pointer\n", 5051 table.frame_pointer); 5052 device_printf(sc->sc_dev, "0x%08X | stack pointer\n", 5053 table.stack_pointer); 5054 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header); 5055 device_printf(sc->sc_dev, "0x%08X | isr status reg\n", 5056 table.nic_isr_pref); 5057 } 5058 5059 /* 5060 * Support for dumping the error log seemed like a good idea ... 5061 * but it's mostly hex junk and the only sensible thing is the 5062 * hw/ucode revision (which we know anyway). Since it's here, 5063 * I'll just leave it in, just in case e.g. the Intel guys want to 5064 * help us decipher some "ADVANCED_SYSASSERT" later. 5065 */ 5066 static void 5067 iwm_nic_error(struct iwm_softc *sc) 5068 { 5069 struct iwm_error_event_table table; 5070 uint32_t base; 5071 5072 device_printf(sc->sc_dev, "dumping device error log\n"); 5073 base = sc->error_event_table; 5074 if (base < 0x800000) { 5075 device_printf(sc->sc_dev, 5076 "Invalid error log pointer 0x%08x\n", base); 5077 return; 5078 } 5079 5080 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5081 device_printf(sc->sc_dev, "reading errlog failed\n"); 5082 return; 5083 } 5084 5085 if (!table.valid) { 5086 device_printf(sc->sc_dev, "errlog not found, skipping\n"); 5087 return; 5088 } 5089 5090 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5091 device_printf(sc->sc_dev, "Start Error Log Dump:\n"); 5092 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5093 sc->sc_flags, table.valid); 5094 } 5095 5096 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id, 5097 iwm_desc_lookup(table.error_id)); 5098 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n", 5099 table.trm_hw_status0); 5100 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n", 5101 table.trm_hw_status1); 5102 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2); 5103 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1); 5104 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2); 5105 device_printf(sc->sc_dev, "%08X | data1\n", table.data1); 5106 device_printf(sc->sc_dev, "%08X | data2\n", table.data2); 5107 device_printf(sc->sc_dev, "%08X | data3\n", table.data3); 5108 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time); 5109 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low); 5110 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi); 5111 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1); 5112 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2); 5113 device_printf(sc->sc_dev, "%08X | uCode revision type\n", 5114 table.fw_rev_type); 5115 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major); 5116 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor); 5117 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver); 5118 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver); 5119 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd); 5120 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0); 5121 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1); 5122 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2); 5123 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3); 5124 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4); 5125 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id); 5126 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event); 5127 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control); 5128 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration); 5129 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid); 5130 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match); 5131 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 5132 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp); 5133 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler); 5134 5135 if (sc->umac_error_event_table) 5136 iwm_nic_umac_error(sc); 5137 } 5138 #endif 5139 5140 static void 5141 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m) 5142 { 5143 struct ieee80211com *ic = &sc->sc_ic; 5144 struct iwm_cmd_response *cresp; 5145 struct mbuf *m1; 5146 uint32_t offset = 0; 5147 uint32_t maxoff = IWM_RBUF_SIZE; 5148 uint32_t nextoff; 5149 boolean_t stolen = FALSE; 5150 5151 #define HAVEROOM(a) \ 5152 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff) 5153 5154 while (HAVEROOM(offset)) { 5155 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, 5156 offset); 5157 int qid, idx, code, len; 5158 5159 qid = pkt->hdr.qid; 5160 idx = pkt->hdr.idx; 5161 5162 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 5163 5164 /* 5165 * randomly get these from the firmware, no idea why. 5166 * they at least seem harmless, so just ignore them for now 5167 */ 5168 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) || 5169 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) { 5170 break; 5171 } 5172 5173 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5174 "rx packet qid=%d idx=%d type=%x\n", 5175 qid & ~0x80, pkt->hdr.idx, code); 5176 5177 len = iwm_rx_packet_len(pkt); 5178 len += sizeof(uint32_t); /* account for status word */ 5179 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN); 5180 5181 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt); 5182 5183 switch (code) { 5184 case IWM_REPLY_RX_PHY_CMD: 5185 iwm_mvm_rx_rx_phy_cmd(sc, pkt); 5186 break; 5187 5188 case IWM_REPLY_RX_MPDU_CMD: { 5189 /* 5190 * If this is the last frame in the RX buffer, we 5191 * can directly feed the mbuf to the sharks here. 5192 */ 5193 struct iwm_rx_packet *nextpkt = mtodoff(m, 5194 struct iwm_rx_packet *, nextoff); 5195 if (!HAVEROOM(nextoff) || 5196 (nextpkt->hdr.code == 0 && 5197 (nextpkt->hdr.qid & ~0x80) == 0 && 5198 nextpkt->hdr.idx == 0) || 5199 (nextpkt->len_n_flags == 5200 htole32(IWM_FH_RSCSR_FRAME_INVALID))) { 5201 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) { 5202 stolen = FALSE; 5203 /* Make sure we abort the loop */ 5204 nextoff = maxoff; 5205 } 5206 break; 5207 } 5208 5209 /* 5210 * Use m_copym instead of m_split, because that 5211 * makes it easier to keep a valid rx buffer in 5212 * the ring, when iwm_mvm_rx_rx_mpdu() fails. 5213 * 5214 * We need to start m_copym() at offset 0, to get the 5215 * M_PKTHDR flag preserved. 5216 */ 5217 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 5218 if (m1) { 5219 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen)) 5220 stolen = TRUE; 5221 else 5222 m_freem(m1); 5223 } 5224 break; 5225 } 5226 5227 case IWM_TX_CMD: 5228 iwm_mvm_rx_tx_cmd(sc, pkt); 5229 break; 5230 5231 case IWM_MISSED_BEACONS_NOTIFICATION: { 5232 struct iwm_missed_beacons_notif *resp; 5233 int missed; 5234 5235 /* XXX look at mac_id to determine interface ID */ 5236 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5237 5238 resp = (void *)pkt->data; 5239 missed = le32toh(resp->consec_missed_beacons); 5240 5241 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE, 5242 "%s: MISSED_BEACON: mac_id=%d, " 5243 "consec_since_last_rx=%d, consec=%d, num_expect=%d " 5244 "num_rx=%d\n", 5245 __func__, 5246 le32toh(resp->mac_id), 5247 le32toh(resp->consec_missed_beacons_since_last_rx), 5248 le32toh(resp->consec_missed_beacons), 5249 le32toh(resp->num_expected_beacons), 5250 le32toh(resp->num_recvd_beacons)); 5251 5252 /* Be paranoid */ 5253 if (vap == NULL) 5254 break; 5255 5256 /* XXX no net80211 locking? */ 5257 if (vap->iv_state == IEEE80211_S_RUN && 5258 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 5259 if (missed > vap->iv_bmissthreshold) { 5260 /* XXX bad locking; turn into task */ 5261 IWM_UNLOCK(sc); 5262 ieee80211_beacon_miss(ic); 5263 IWM_LOCK(sc); 5264 } 5265 } 5266 5267 break; } 5268 5269 case IWM_MFUART_LOAD_NOTIFICATION: 5270 break; 5271 5272 case IWM_MVM_ALIVE: 5273 break; 5274 5275 case IWM_CALIB_RES_NOTIF_PHY_DB: 5276 break; 5277 5278 case IWM_STATISTICS_NOTIFICATION: 5279 iwm_mvm_handle_rx_statistics(sc, pkt); 5280 break; 5281 5282 case IWM_NVM_ACCESS_CMD: 5283 case IWM_MCC_UPDATE_CMD: 5284 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5285 memcpy(sc->sc_cmd_resp, 5286 pkt, sizeof(sc->sc_cmd_resp)); 5287 } 5288 break; 5289 5290 case IWM_MCC_CHUB_UPDATE_CMD: { 5291 struct iwm_mcc_chub_notif *notif; 5292 notif = (void *)pkt->data; 5293 5294 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8; 5295 sc->sc_fw_mcc[1] = notif->mcc & 0xff; 5296 sc->sc_fw_mcc[2] = '\0'; 5297 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 5298 "fw source %d sent CC '%s'\n", 5299 notif->source_id, sc->sc_fw_mcc); 5300 break; 5301 } 5302 5303 case IWM_DTS_MEASUREMENT_NOTIFICATION: 5304 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP, 5305 IWM_DTS_MEASUREMENT_NOTIF_WIDE): { 5306 struct iwm_dts_measurement_notif_v1 *notif; 5307 5308 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) { 5309 device_printf(sc->sc_dev, 5310 "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); 5311 break; 5312 } 5313 notif = (void *)pkt->data; 5314 IWM_DPRINTF(sc, IWM_DEBUG_TEMP, 5315 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n", 5316 notif->temp); 5317 break; 5318 } 5319 5320 case IWM_PHY_CONFIGURATION_CMD: 5321 case IWM_TX_ANT_CONFIGURATION_CMD: 5322 case IWM_ADD_STA: 5323 case IWM_MAC_CONTEXT_CMD: 5324 case IWM_REPLY_SF_CFG_CMD: 5325 case IWM_POWER_TABLE_CMD: 5326 case IWM_LTR_CONFIG: 5327 case IWM_PHY_CONTEXT_CMD: 5328 case IWM_BINDING_CONTEXT_CMD: 5329 case IWM_TIME_EVENT_CMD: 5330 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD): 5331 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC): 5332 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC): 5333 case IWM_SCAN_OFFLOAD_REQUEST_CMD: 5334 case IWM_SCAN_OFFLOAD_ABORT_CMD: 5335 case IWM_REPLY_BEACON_FILTERING_CMD: 5336 case IWM_MAC_PM_POWER_TABLE: 5337 case IWM_TIME_QUOTA_CMD: 5338 case IWM_REMOVE_STA: 5339 case IWM_TXPATH_FLUSH: 5340 case IWM_LQ_CMD: 5341 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, 5342 IWM_FW_PAGING_BLOCK_CMD): 5343 case IWM_BT_CONFIG: 5344 case IWM_REPLY_THERMAL_MNG_BACKOFF: 5345 cresp = (void *)pkt->data; 5346 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5347 memcpy(sc->sc_cmd_resp, 5348 pkt, sizeof(*pkt)+sizeof(*cresp)); 5349 } 5350 break; 5351 5352 /* ignore */ 5353 case IWM_PHY_DB_CMD: 5354 break; 5355 5356 case IWM_INIT_COMPLETE_NOTIF: 5357 break; 5358 5359 case IWM_SCAN_OFFLOAD_COMPLETE: 5360 iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt); 5361 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5362 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5363 ieee80211_runtask(ic, &sc->sc_es_task); 5364 } 5365 break; 5366 5367 case IWM_SCAN_ITERATION_COMPLETE: { 5368 struct iwm_lmac_scan_complete_notif *notif; 5369 notif = (void *)pkt->data; 5370 break; 5371 } 5372 5373 case IWM_SCAN_COMPLETE_UMAC: 5374 iwm_mvm_rx_umac_scan_complete_notif(sc, pkt); 5375 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5376 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5377 ieee80211_runtask(ic, &sc->sc_es_task); 5378 } 5379 break; 5380 5381 case IWM_SCAN_ITERATION_COMPLETE_UMAC: { 5382 struct iwm_umac_scan_iter_complete_notif *notif; 5383 notif = (void *)pkt->data; 5384 5385 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration " 5386 "complete, status=0x%x, %d channels scanned\n", 5387 notif->status, notif->scanned_channels); 5388 break; 5389 } 5390 5391 case IWM_REPLY_ERROR: { 5392 struct iwm_error_resp *resp; 5393 resp = (void *)pkt->data; 5394 5395 device_printf(sc->sc_dev, 5396 "firmware error 0x%x, cmd 0x%x\n", 5397 le32toh(resp->error_type), 5398 resp->cmd_id); 5399 break; 5400 } 5401 5402 case IWM_TIME_EVENT_NOTIFICATION: 5403 iwm_mvm_rx_time_event_notif(sc, pkt); 5404 break; 5405 5406 /* 5407 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG 5408 * messages. Just ignore them for now. 5409 */ 5410 case IWM_DEBUG_LOG_MSG: 5411 break; 5412 5413 case IWM_MCAST_FILTER_CMD: 5414 break; 5415 5416 case IWM_SCD_QUEUE_CFG: { 5417 struct iwm_scd_txq_cfg_rsp *rsp; 5418 rsp = (void *)pkt->data; 5419 5420 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 5421 "queue cfg token=0x%x sta_id=%d " 5422 "tid=%d scd_queue=%d\n", 5423 rsp->token, rsp->sta_id, rsp->tid, 5424 rsp->scd_queue); 5425 break; 5426 } 5427 5428 default: 5429 device_printf(sc->sc_dev, 5430 "frame %d/%d %x UNHANDLED (this should " 5431 "not happen)\n", qid & ~0x80, idx, 5432 pkt->len_n_flags); 5433 break; 5434 } 5435 5436 /* 5437 * Why test bit 0x80? The Linux driver: 5438 * 5439 * There is one exception: uCode sets bit 15 when it 5440 * originates the response/notification, i.e. when the 5441 * response/notification is not a direct response to a 5442 * command sent by the driver. For example, uCode issues 5443 * IWM_REPLY_RX when it sends a received frame to the driver; 5444 * it is not a direct response to any driver command. 5445 * 5446 * Ok, so since when is 7 == 15? Well, the Linux driver 5447 * uses a slightly different format for pkt->hdr, and "qid" 5448 * is actually the upper byte of a two-byte field. 5449 */ 5450 if (!(qid & (1 << 7))) 5451 iwm_cmd_done(sc, pkt); 5452 5453 offset = nextoff; 5454 } 5455 if (stolen) 5456 m_freem(m); 5457 #undef HAVEROOM 5458 } 5459 5460 /* 5461 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt. 5462 * Basic structure from if_iwn 5463 */ 5464 static void 5465 iwm_notif_intr(struct iwm_softc *sc) 5466 { 5467 uint16_t hw; 5468 5469 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 5470 BUS_DMASYNC_POSTREAD); 5471 5472 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff; 5473 5474 /* 5475 * Process responses 5476 */ 5477 while (sc->rxq.cur != hw) { 5478 struct iwm_rx_ring *ring = &sc->rxq; 5479 struct iwm_rx_data *data = &ring->data[ring->cur]; 5480 5481 bus_dmamap_sync(ring->data_dmat, data->map, 5482 BUS_DMASYNC_POSTREAD); 5483 5484 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5485 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur); 5486 iwm_handle_rxb(sc, data->m); 5487 5488 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT; 5489 } 5490 5491 /* 5492 * Tell the firmware that it can reuse the ring entries that 5493 * we have just processed. 5494 * Seems like the hardware gets upset unless we align 5495 * the write by 8?? 5496 */ 5497 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1; 5498 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8)); 5499 } 5500 5501 static void 5502 iwm_intr(void *arg) 5503 { 5504 struct iwm_softc *sc = arg; 5505 int handled = 0; 5506 int r1, r2, rv = 0; 5507 int isperiodic = 0; 5508 5509 #if defined(__DragonFly__) 5510 if (sc->sc_mem == NULL) { 5511 kprintf("iwm_intr: detached\n"); 5512 return; 5513 } 5514 #endif 5515 IWM_LOCK(sc); 5516 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 5517 5518 if (sc->sc_flags & IWM_FLAG_USE_ICT) { 5519 uint32_t *ict = sc->ict_dma.vaddr; 5520 int tmp; 5521 5522 tmp = htole32(ict[sc->ict_cur]); 5523 if (!tmp) 5524 goto out_ena; 5525 5526 /* 5527 * ok, there was something. keep plowing until we have all. 5528 */ 5529 r1 = r2 = 0; 5530 while (tmp) { 5531 r1 |= tmp; 5532 ict[sc->ict_cur] = 0; 5533 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT; 5534 tmp = htole32(ict[sc->ict_cur]); 5535 } 5536 5537 /* this is where the fun begins. don't ask */ 5538 if (r1 == 0xffffffff) 5539 r1 = 0; 5540 5541 /* i am not expected to understand this */ 5542 if (r1 & 0xc0000) 5543 r1 |= 0x8000; 5544 r1 = (0xff & r1) | ((0xff00 & r1) << 16); 5545 } else { 5546 r1 = IWM_READ(sc, IWM_CSR_INT); 5547 /* "hardware gone" (where, fishing?) */ 5548 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 5549 goto out; 5550 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS); 5551 } 5552 if (r1 == 0 && r2 == 0) { 5553 goto out_ena; 5554 } 5555 5556 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask); 5557 5558 /* Safely ignore these bits for debug checks below */ 5559 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD); 5560 5561 if (r1 & IWM_CSR_INT_BIT_SW_ERR) { 5562 int i; 5563 struct ieee80211com *ic = &sc->sc_ic; 5564 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5565 5566 #ifdef IWM_DEBUG 5567 iwm_nic_error(sc); 5568 #endif 5569 /* Dump driver status (TX and RX rings) while we're here. */ 5570 device_printf(sc->sc_dev, "driver status:\n"); 5571 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) { 5572 struct iwm_tx_ring *ring = &sc->txq[i]; 5573 device_printf(sc->sc_dev, 5574 " tx ring %2d: qid=%-2d cur=%-3d " 5575 "queued=%-3d\n", 5576 i, ring->qid, ring->cur, ring->queued); 5577 } 5578 device_printf(sc->sc_dev, 5579 " rx ring: cur=%d\n", sc->rxq.cur); 5580 device_printf(sc->sc_dev, 5581 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state); 5582 5583 /* Reset our firmware state tracking. */ 5584 sc->sc_firmware_state = 0; 5585 /* Don't stop the device; just do a VAP restart */ 5586 IWM_UNLOCK(sc); 5587 5588 if (vap == NULL) { 5589 kprintf("%s: null vap\n", __func__); 5590 return; 5591 } 5592 5593 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 5594 "restarting\n", __func__, vap->iv_state); 5595 5596 ieee80211_restart_all(ic); 5597 return; 5598 } 5599 5600 if (r1 & IWM_CSR_INT_BIT_HW_ERR) { 5601 handled |= IWM_CSR_INT_BIT_HW_ERR; 5602 device_printf(sc->sc_dev, "hardware error, stopping device\n"); 5603 iwm_stop(sc); 5604 rv = 1; 5605 goto out; 5606 } 5607 5608 /* firmware chunk loaded */ 5609 if (r1 & IWM_CSR_INT_BIT_FH_TX) { 5610 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK); 5611 handled |= IWM_CSR_INT_BIT_FH_TX; 5612 sc->sc_fw_chunk_done = 1; 5613 wakeup(&sc->sc_fw); 5614 } 5615 5616 if (r1 & IWM_CSR_INT_BIT_RF_KILL) { 5617 handled |= IWM_CSR_INT_BIT_RF_KILL; 5618 if (iwm_check_rfkill(sc)) { 5619 device_printf(sc->sc_dev, 5620 "%s: rfkill switch, disabling interface\n", 5621 __func__); 5622 iwm_stop(sc); 5623 } 5624 } 5625 5626 /* 5627 * The Linux driver uses periodic interrupts to avoid races. 5628 * We cargo-cult like it's going out of fashion. 5629 */ 5630 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) { 5631 handled |= IWM_CSR_INT_BIT_RX_PERIODIC; 5632 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC); 5633 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0) 5634 IWM_WRITE_1(sc, 5635 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS); 5636 isperiodic = 1; 5637 } 5638 5639 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) { 5640 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX); 5641 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK); 5642 5643 iwm_notif_intr(sc); 5644 5645 /* enable periodic interrupt, see above */ 5646 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic) 5647 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, 5648 IWM_CSR_INT_PERIODIC_ENA); 5649 } 5650 5651 if (__predict_false(r1 & ~handled)) 5652 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5653 "%s: unhandled interrupts: %x\n", __func__, r1); 5654 rv = 1; 5655 5656 out_ena: 5657 iwm_restore_interrupts(sc); 5658 out: 5659 IWM_UNLOCK(sc); 5660 return; 5661 } 5662 5663 /* 5664 * Autoconf glue-sniffing 5665 */ 5666 #define PCI_VENDOR_INTEL 0x8086 5667 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3 5668 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4 5669 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165 5670 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166 5671 #define PCI_PRODUCT_INTEL_WL_3168 0x24fb 5672 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1 5673 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2 5674 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a 5675 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b 5676 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3 5677 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4 5678 #define PCI_PRODUCT_INTEL_WL_8265 0x24fd 5679 5680 static const struct iwm_devices { 5681 uint16_t device; 5682 const struct iwm_cfg *cfg; 5683 } iwm_devices[] = { 5684 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg }, 5685 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg }, 5686 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg }, 5687 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg }, 5688 { PCI_PRODUCT_INTEL_WL_3168, &iwm3168_cfg }, 5689 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg }, 5690 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg }, 5691 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg }, 5692 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg }, 5693 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg }, 5694 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg }, 5695 { PCI_PRODUCT_INTEL_WL_8265, &iwm8265_cfg }, 5696 }; 5697 5698 static int 5699 iwm_probe(device_t dev) 5700 { 5701 int i; 5702 5703 for (i = 0; i < nitems(iwm_devices); i++) { 5704 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL && 5705 pci_get_device(dev) == iwm_devices[i].device) { 5706 device_set_desc(dev, iwm_devices[i].cfg->name); 5707 return (BUS_PROBE_DEFAULT); 5708 } 5709 } 5710 5711 return (ENXIO); 5712 } 5713 5714 static int 5715 iwm_dev_check(device_t dev) 5716 { 5717 struct iwm_softc *sc; 5718 uint16_t devid; 5719 int i; 5720 5721 sc = device_get_softc(dev); 5722 5723 devid = pci_get_device(dev); 5724 for (i = 0; i < NELEM(iwm_devices); i++) { 5725 if (iwm_devices[i].device == devid) { 5726 sc->cfg = iwm_devices[i].cfg; 5727 return (0); 5728 } 5729 } 5730 device_printf(dev, "unknown adapter type\n"); 5731 return ENXIO; 5732 } 5733 5734 /* PCI registers */ 5735 #define PCI_CFG_RETRY_TIMEOUT 0x041 5736 5737 static int 5738 iwm_pci_attach(device_t dev) 5739 { 5740 struct iwm_softc *sc; 5741 int count, error, rid; 5742 uint16_t reg; 5743 #if defined(__DragonFly__) 5744 int irq_flags; 5745 #endif 5746 5747 sc = device_get_softc(dev); 5748 5749 /* We disable the RETRY_TIMEOUT register (0x41) to keep 5750 * PCI Tx retries from interfering with C3 CPU state */ 5751 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 5752 5753 /* Enable bus-mastering and hardware bug workaround. */ 5754 pci_enable_busmaster(dev); 5755 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg)); 5756 /* if !MSI */ 5757 if (reg & PCIM_STATUS_INTxSTATE) { 5758 reg &= ~PCIM_STATUS_INTxSTATE; 5759 } 5760 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg)); 5761 5762 rid = PCIR_BAR(0); 5763 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 5764 RF_ACTIVE); 5765 if (sc->sc_mem == NULL) { 5766 device_printf(sc->sc_dev, "can't map mem space\n"); 5767 return (ENXIO); 5768 } 5769 sc->sc_st = rman_get_bustag(sc->sc_mem); 5770 sc->sc_sh = rman_get_bushandle(sc->sc_mem); 5771 5772 /* Install interrupt handler. */ 5773 count = 1; 5774 rid = 0; 5775 #if defined(__DragonFly__) 5776 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags); 5777 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags); 5778 #else 5779 if (pci_alloc_msi(dev, &count) == 0) 5780 rid = 1; 5781 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 5782 (rid != 0 ? 0 : RF_SHAREABLE)); 5783 #endif 5784 if (sc->sc_irq == NULL) { 5785 device_printf(dev, "can't map interrupt\n"); 5786 return (ENXIO); 5787 } 5788 #if defined(__DragonFly__) 5789 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, 5790 iwm_intr, sc, &sc->sc_ih, 5791 &wlan_global_serializer); 5792 #else 5793 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 5794 NULL, iwm_intr, sc, &sc->sc_ih); 5795 #endif 5796 if (sc->sc_ih == NULL) { 5797 device_printf(dev, "can't establish interrupt"); 5798 #if defined(__DragonFly__) 5799 pci_release_msi(dev); 5800 #endif 5801 return (ENXIO); 5802 } 5803 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev); 5804 5805 return (0); 5806 } 5807 5808 static void 5809 iwm_pci_detach(device_t dev) 5810 { 5811 struct iwm_softc *sc = device_get_softc(dev); 5812 5813 if (sc->sc_irq != NULL) { 5814 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 5815 bus_release_resource(dev, SYS_RES_IRQ, 5816 rman_get_rid(sc->sc_irq), sc->sc_irq); 5817 pci_release_msi(dev); 5818 #if defined(__DragonFly__) 5819 sc->sc_irq = NULL; 5820 #endif 5821 } 5822 if (sc->sc_mem != NULL) { 5823 bus_release_resource(dev, SYS_RES_MEMORY, 5824 rman_get_rid(sc->sc_mem), sc->sc_mem); 5825 #if defined(__DragonFly__) 5826 sc->sc_mem = NULL; 5827 #endif 5828 } 5829 } 5830 5831 5832 5833 static int 5834 iwm_attach(device_t dev) 5835 { 5836 struct iwm_softc *sc = device_get_softc(dev); 5837 struct ieee80211com *ic = &sc->sc_ic; 5838 int error; 5839 int txq_i, i; 5840 5841 sc->sc_dev = dev; 5842 sc->sc_attached = 1; 5843 IWM_LOCK_INIT(sc); 5844 mbufq_init(&sc->sc_snd, ifqmaxlen); 5845 #if defined(__DragonFly__) 5846 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk); 5847 #else 5848 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0); 5849 #endif 5850 callout_init(&sc->sc_led_blink_to); 5851 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc); 5852 5853 sc->sc_notif_wait = iwm_notification_wait_init(sc); 5854 if (sc->sc_notif_wait == NULL) { 5855 device_printf(dev, "failed to init notification wait struct\n"); 5856 goto fail; 5857 } 5858 5859 sc->sf_state = IWM_SF_UNINIT; 5860 5861 /* Init phy db */ 5862 sc->sc_phy_db = iwm_phy_db_init(sc); 5863 if (!sc->sc_phy_db) { 5864 device_printf(dev, "Cannot init phy_db\n"); 5865 goto fail; 5866 } 5867 5868 /* Set EBS as successful as long as not stated otherwise by the FW. */ 5869 sc->last_ebs_successful = TRUE; 5870 5871 /* PCI attach */ 5872 error = iwm_pci_attach(dev); 5873 if (error != 0) 5874 goto fail; 5875 5876 sc->sc_wantresp = -1; 5877 5878 /* Match device id */ 5879 error = iwm_dev_check(dev); 5880 if (error != 0) 5881 goto fail; 5882 5883 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV); 5884 /* 5885 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 5886 * changed, and now the revision step also includes bit 0-1 (no more 5887 * "dash" value). To keep hw_rev backwards compatible - we'll store it 5888 * in the old format. 5889 */ 5890 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 5891 int ret; 5892 uint32_t hw_step; 5893 5894 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) | 5895 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2); 5896 5897 if (iwm_prepare_card_hw(sc) != 0) { 5898 device_printf(dev, "could not initialize hardware\n"); 5899 goto fail; 5900 } 5901 5902 /* 5903 * In order to recognize C step the driver should read the 5904 * chip version id located at the AUX bus MISC address. 5905 */ 5906 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, 5907 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 5908 DELAY(2); 5909 5910 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL, 5911 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 5912 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 5913 25000); 5914 if (!ret) { 5915 device_printf(sc->sc_dev, 5916 "Failed to wake up the nic\n"); 5917 goto fail; 5918 } 5919 5920 if (iwm_nic_lock(sc)) { 5921 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG); 5922 hw_step |= IWM_ENABLE_WFPM; 5923 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step); 5924 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG); 5925 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF; 5926 if (hw_step == 0x3) 5927 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) | 5928 (IWM_SILICON_C_STEP << 2); 5929 iwm_nic_unlock(sc); 5930 } else { 5931 device_printf(sc->sc_dev, "Failed to lock the nic\n"); 5932 goto fail; 5933 } 5934 } 5935 5936 /* special-case 7265D, it has the same PCI IDs. */ 5937 if (sc->cfg == &iwm7265_cfg && 5938 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) { 5939 sc->cfg = &iwm7265d_cfg; 5940 } 5941 5942 /* Allocate DMA memory for firmware transfers. */ 5943 if ((error = iwm_alloc_fwmem(sc)) != 0) { 5944 device_printf(dev, "could not allocate memory for firmware\n"); 5945 goto fail; 5946 } 5947 5948 /* Allocate "Keep Warm" page. */ 5949 if ((error = iwm_alloc_kw(sc)) != 0) { 5950 device_printf(dev, "could not allocate keep warm page\n"); 5951 goto fail; 5952 } 5953 5954 /* We use ICT interrupts */ 5955 if ((error = iwm_alloc_ict(sc)) != 0) { 5956 device_printf(dev, "could not allocate ICT table\n"); 5957 goto fail; 5958 } 5959 5960 /* Allocate TX scheduler "rings". */ 5961 if ((error = iwm_alloc_sched(sc)) != 0) { 5962 device_printf(dev, "could not allocate TX scheduler rings\n"); 5963 goto fail; 5964 } 5965 5966 /* Allocate TX rings */ 5967 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) { 5968 if ((error = iwm_alloc_tx_ring(sc, 5969 &sc->txq[txq_i], txq_i)) != 0) { 5970 device_printf(dev, 5971 "could not allocate TX ring %d\n", 5972 txq_i); 5973 goto fail; 5974 } 5975 } 5976 5977 /* Allocate RX ring. */ 5978 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) { 5979 device_printf(dev, "could not allocate RX ring\n"); 5980 goto fail; 5981 } 5982 5983 /* Clear pending interrupts. */ 5984 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff); 5985 5986 ic->ic_softc = sc; 5987 ic->ic_name = device_get_nameunit(sc->sc_dev); 5988 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 5989 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 5990 5991 /* Set device capabilities. */ 5992 ic->ic_caps = 5993 IEEE80211_C_STA | 5994 IEEE80211_C_WPA | /* WPA/RSN */ 5995 IEEE80211_C_WME | 5996 IEEE80211_C_PMGT | 5997 IEEE80211_C_SHSLOT | /* short slot time supported */ 5998 IEEE80211_C_SHPREAMBLE /* short preamble supported */ 5999 // IEEE80211_C_BGSCAN /* capable of bg scanning */ 6000 ; 6001 /* Advertise full-offload scanning */ 6002 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD; 6003 for (i = 0; i < nitems(sc->sc_phyctxt); i++) { 6004 sc->sc_phyctxt[i].id = i; 6005 sc->sc_phyctxt[i].color = 0; 6006 sc->sc_phyctxt[i].ref = 0; 6007 sc->sc_phyctxt[i].channel = NULL; 6008 } 6009 6010 /* Default noise floor */ 6011 sc->sc_noise = -96; 6012 6013 /* Max RSSI */ 6014 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM; 6015 6016 #ifdef IWM_DEBUG 6017 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 6018 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 6019 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging"); 6020 #endif 6021 6022 error = iwm_read_firmware(sc); 6023 if (error) { 6024 goto fail; 6025 } else if (sc->sc_fw.fw_fp == NULL) { 6026 /* 6027 * XXX Add a solution for properly deferring firmware load 6028 * during bootup. 6029 */ 6030 goto fail; 6031 } else { 6032 sc->sc_preinit_hook.ich_func = iwm_preinit; 6033 sc->sc_preinit_hook.ich_arg = sc; 6034 sc->sc_preinit_hook.ich_desc = "iwm"; 6035 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) { 6036 device_printf(dev, 6037 "config_intrhook_establish failed\n"); 6038 goto fail; 6039 } 6040 } 6041 6042 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6043 "<-%s\n", __func__); 6044 6045 return 0; 6046 6047 /* Free allocated memory if something failed during attachment. */ 6048 fail: 6049 iwm_detach_local(sc, 0); 6050 6051 return ENXIO; 6052 } 6053 6054 static int 6055 iwm_is_valid_ether_addr(uint8_t *addr) 6056 { 6057 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 6058 6059 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr)) 6060 return (FALSE); 6061 6062 return (TRUE); 6063 } 6064 6065 static int 6066 iwm_wme_update(struct ieee80211com *ic) 6067 { 6068 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 6069 struct iwm_softc *sc = ic->ic_softc; 6070 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6071 struct iwm_vap *ivp = IWM_VAP(vap); 6072 struct iwm_node *in; 6073 struct wmeParams tmp[WME_NUM_AC]; 6074 int aci, error; 6075 6076 if (vap == NULL) 6077 return (0); 6078 6079 IEEE80211_LOCK(ic); 6080 for (aci = 0; aci < WME_NUM_AC; aci++) 6081 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 6082 IEEE80211_UNLOCK(ic); 6083 6084 IWM_LOCK(sc); 6085 for (aci = 0; aci < WME_NUM_AC; aci++) { 6086 const struct wmeParams *ac = &tmp[aci]; 6087 ivp->queue_params[aci].aifsn = ac->wmep_aifsn; 6088 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin); 6089 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax); 6090 ivp->queue_params[aci].edca_txop = 6091 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit); 6092 } 6093 ivp->have_wme = TRUE; 6094 if (ivp->is_uploaded && vap->iv_bss != NULL) { 6095 in = IWM_NODE(vap->iv_bss); 6096 if (in->in_assoc) { 6097 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 6098 device_printf(sc->sc_dev, 6099 "%s: failed to update MAC\n", __func__); 6100 } 6101 } 6102 } 6103 IWM_UNLOCK(sc); 6104 6105 return (0); 6106 #undef IWM_EXP2 6107 } 6108 6109 static void 6110 iwm_preinit(void *arg) 6111 { 6112 struct iwm_softc *sc = arg; 6113 device_t dev = sc->sc_dev; 6114 struct ieee80211com *ic = &sc->sc_ic; 6115 int error; 6116 6117 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6118 "->%s\n", __func__); 6119 6120 IWM_LOCK(sc); 6121 if ((error = iwm_start_hw(sc)) != 0) { 6122 device_printf(dev, "could not initialize hardware\n"); 6123 IWM_UNLOCK(sc); 6124 goto fail; 6125 } 6126 6127 error = iwm_run_init_mvm_ucode(sc, 1); 6128 iwm_stop_device(sc); 6129 if (error) { 6130 IWM_UNLOCK(sc); 6131 goto fail; 6132 } 6133 device_printf(dev, 6134 "hw rev 0x%x, fw ver %s, address %s\n", 6135 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, 6136 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr)); 6137 6138 /* not all hardware can do 5GHz band */ 6139 if (!sc->nvm_data->sku_cap_band_52GHz_enable) 6140 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0, 6141 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])); 6142 IWM_UNLOCK(sc); 6143 6144 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 6145 ic->ic_channels); 6146 6147 /* 6148 * At this point we've committed - if we fail to do setup, 6149 * we now also have to tear down the net80211 state. 6150 */ 6151 ieee80211_ifattach(ic); 6152 ic->ic_vap_create = iwm_vap_create; 6153 ic->ic_vap_delete = iwm_vap_delete; 6154 ic->ic_raw_xmit = iwm_raw_xmit; 6155 ic->ic_node_alloc = iwm_node_alloc; 6156 ic->ic_scan_start = iwm_scan_start; 6157 ic->ic_scan_end = iwm_scan_end; 6158 ic->ic_update_mcast = iwm_update_mcast; 6159 ic->ic_getradiocaps = iwm_init_channel_map; 6160 ic->ic_set_channel = iwm_set_channel; 6161 ic->ic_scan_curchan = iwm_scan_curchan; 6162 ic->ic_scan_mindwell = iwm_scan_mindwell; 6163 ic->ic_wme.wme_update = iwm_wme_update; 6164 ic->ic_parent = iwm_parent; 6165 ic->ic_transmit = iwm_transmit; 6166 iwm_radiotap_attach(sc); 6167 if (bootverbose) 6168 ieee80211_announce(ic); 6169 6170 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6171 "<-%s\n", __func__); 6172 config_intrhook_disestablish(&sc->sc_preinit_hook); 6173 6174 return; 6175 fail: 6176 config_intrhook_disestablish(&sc->sc_preinit_hook); 6177 iwm_detach_local(sc, 0); 6178 } 6179 6180 /* 6181 * Attach the interface to 802.11 radiotap. 6182 */ 6183 static void 6184 iwm_radiotap_attach(struct iwm_softc *sc) 6185 { 6186 struct ieee80211com *ic = &sc->sc_ic; 6187 6188 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6189 "->%s begin\n", __func__); 6190 ieee80211_radiotap_attach(ic, 6191 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 6192 IWM_TX_RADIOTAP_PRESENT, 6193 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 6194 IWM_RX_RADIOTAP_PRESENT); 6195 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6196 "->%s end\n", __func__); 6197 } 6198 6199 static struct ieee80211vap * 6200 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 6201 enum ieee80211_opmode opmode, int flags, 6202 const uint8_t bssid[IEEE80211_ADDR_LEN], 6203 const uint8_t mac[IEEE80211_ADDR_LEN]) 6204 { 6205 struct iwm_vap *ivp; 6206 struct ieee80211vap *vap; 6207 6208 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 6209 return NULL; 6210 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO); 6211 vap = &ivp->iv_vap; 6212 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 6213 vap->iv_bmissthreshold = 10; /* override default */ 6214 /* Override with driver methods. */ 6215 ivp->iv_newstate = vap->iv_newstate; 6216 vap->iv_newstate = iwm_newstate; 6217 6218 ivp->id = IWM_DEFAULT_MACID; 6219 ivp->color = IWM_DEFAULT_COLOR; 6220 6221 ivp->have_wme = FALSE; 6222 ivp->ps_disabled = FALSE; 6223 6224 ieee80211_ratectl_init(vap); 6225 /* Complete setup. */ 6226 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status, 6227 mac); 6228 ic->ic_opmode = opmode; 6229 6230 return vap; 6231 } 6232 6233 static void 6234 iwm_vap_delete(struct ieee80211vap *vap) 6235 { 6236 struct iwm_vap *ivp = IWM_VAP(vap); 6237 6238 ieee80211_ratectl_deinit(vap); 6239 ieee80211_vap_detach(vap); 6240 kfree(ivp, M_80211_VAP); 6241 } 6242 6243 static void 6244 iwm_xmit_queue_drain(struct iwm_softc *sc) 6245 { 6246 struct mbuf *m; 6247 struct ieee80211_node *ni; 6248 6249 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 6250 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 6251 ieee80211_free_node(ni); 6252 m_freem(m); 6253 } 6254 } 6255 6256 static void 6257 iwm_scan_start(struct ieee80211com *ic) 6258 { 6259 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6260 struct iwm_softc *sc = ic->ic_softc; 6261 int error; 6262 6263 IWM_LOCK(sc); 6264 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6265 /* This should not be possible */ 6266 device_printf(sc->sc_dev, 6267 "%s: Previous scan not completed yet\n", __func__); 6268 } 6269 if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) 6270 error = iwm_mvm_umac_scan(sc); 6271 else 6272 error = iwm_mvm_lmac_scan(sc); 6273 if (error != 0) { 6274 device_printf(sc->sc_dev, "could not initiate scan\n"); 6275 IWM_UNLOCK(sc); 6276 ieee80211_cancel_scan(vap); 6277 } else { 6278 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING; 6279 iwm_led_blink_start(sc); 6280 IWM_UNLOCK(sc); 6281 } 6282 } 6283 6284 static void 6285 iwm_scan_end(struct ieee80211com *ic) 6286 { 6287 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6288 struct iwm_softc *sc = ic->ic_softc; 6289 6290 IWM_LOCK(sc); 6291 iwm_led_blink_stop(sc); 6292 if (vap->iv_state == IEEE80211_S_RUN) 6293 iwm_mvm_led_enable(sc); 6294 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6295 /* 6296 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because 6297 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq 6298 * taskqueue. 6299 */ 6300 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 6301 iwm_mvm_scan_stop_wait(sc); 6302 } 6303 IWM_UNLOCK(sc); 6304 6305 /* 6306 * Make sure we don't race, if sc_es_task is still enqueued here. 6307 * This is to make sure that it won't call ieee80211_scan_done 6308 * when we have already started the next scan. 6309 */ 6310 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL); 6311 } 6312 6313 static void 6314 iwm_update_mcast(struct ieee80211com *ic) 6315 { 6316 } 6317 6318 static void 6319 iwm_set_channel(struct ieee80211com *ic) 6320 { 6321 } 6322 6323 static void 6324 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 6325 { 6326 } 6327 6328 static void 6329 iwm_scan_mindwell(struct ieee80211_scan_state *ss) 6330 { 6331 return; 6332 } 6333 6334 void 6335 iwm_init_task(void *arg1) 6336 { 6337 struct iwm_softc *sc = arg1; 6338 6339 IWM_LOCK(sc); 6340 while (sc->sc_flags & IWM_FLAG_BUSY) { 6341 #if defined(__DragonFly__) 6342 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0); 6343 #else 6344 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0); 6345 #endif 6346 } 6347 sc->sc_flags |= IWM_FLAG_BUSY; 6348 iwm_stop(sc); 6349 if (sc->sc_ic.ic_nrunning > 0) 6350 iwm_init(sc); 6351 sc->sc_flags &= ~IWM_FLAG_BUSY; 6352 wakeup(&sc->sc_flags); 6353 IWM_UNLOCK(sc); 6354 } 6355 6356 static int 6357 iwm_resume(device_t dev) 6358 { 6359 struct iwm_softc *sc = device_get_softc(dev); 6360 int do_reinit = 0; 6361 6362 /* 6363 * We disable the RETRY_TIMEOUT register (0x41) to keep 6364 * PCI Tx retries from interfering with C3 CPU state. 6365 */ 6366 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 6367 6368 if (!sc->sc_attached) 6369 return 0; 6370 6371 iwm_init_task(device_get_softc(dev)); 6372 6373 IWM_LOCK(sc); 6374 if (sc->sc_flags & IWM_FLAG_SCANNING) { 6375 sc->sc_flags &= ~IWM_FLAG_SCANNING; 6376 do_reinit = 1; 6377 } 6378 IWM_UNLOCK(sc); 6379 6380 if (do_reinit) 6381 ieee80211_resume_all(&sc->sc_ic); 6382 6383 return 0; 6384 } 6385 6386 static int 6387 iwm_suspend(device_t dev) 6388 { 6389 int do_stop = 0; 6390 struct iwm_softc *sc = device_get_softc(dev); 6391 6392 do_stop = !! (sc->sc_ic.ic_nrunning > 0); 6393 6394 if (!sc->sc_attached) 6395 return (0); 6396 6397 ieee80211_suspend_all(&sc->sc_ic); 6398 6399 if (do_stop) { 6400 IWM_LOCK(sc); 6401 iwm_stop(sc); 6402 sc->sc_flags |= IWM_FLAG_SCANNING; 6403 IWM_UNLOCK(sc); 6404 } 6405 6406 return (0); 6407 } 6408 6409 static int 6410 iwm_detach_local(struct iwm_softc *sc, int do_net80211) 6411 { 6412 struct iwm_fw_info *fw = &sc->sc_fw; 6413 device_t dev = sc->sc_dev; 6414 int i; 6415 6416 if (!sc->sc_attached) 6417 return 0; 6418 sc->sc_attached = 0; 6419 if (do_net80211) { 6420 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task); 6421 } 6422 callout_drain(&sc->sc_led_blink_to); 6423 callout_drain(&sc->sc_watchdog_to); 6424 iwm_stop_device(sc); 6425 if (do_net80211) { 6426 IWM_LOCK(sc); 6427 iwm_xmit_queue_drain(sc); 6428 IWM_UNLOCK(sc); 6429 ieee80211_ifdetach(&sc->sc_ic); 6430 } 6431 6432 iwm_phy_db_free(sc->sc_phy_db); 6433 sc->sc_phy_db = NULL; 6434 6435 iwm_free_nvm_data(sc->nvm_data); 6436 6437 /* Free descriptor rings */ 6438 iwm_free_rx_ring(sc, &sc->rxq); 6439 for (i = 0; i < nitems(sc->txq); i++) 6440 iwm_free_tx_ring(sc, &sc->txq[i]); 6441 6442 /* Free firmware */ 6443 if (fw->fw_fp != NULL) 6444 iwm_fw_info_free(fw); 6445 6446 /* Free scheduler */ 6447 iwm_dma_contig_free(&sc->sched_dma); 6448 iwm_dma_contig_free(&sc->ict_dma); 6449 iwm_dma_contig_free(&sc->kw_dma); 6450 iwm_dma_contig_free(&sc->fw_dma); 6451 6452 iwm_free_fw_paging(sc); 6453 6454 /* Finished with the hardware - detach things */ 6455 iwm_pci_detach(dev); 6456 6457 if (sc->sc_notif_wait != NULL) { 6458 iwm_notification_wait_free(sc->sc_notif_wait); 6459 sc->sc_notif_wait = NULL; 6460 } 6461 6462 IWM_LOCK_DESTROY(sc); 6463 6464 return (0); 6465 } 6466 6467 static int 6468 iwm_detach(device_t dev) 6469 { 6470 struct iwm_softc *sc = device_get_softc(dev); 6471 6472 return (iwm_detach_local(sc, 1)); 6473 } 6474 6475 static device_method_t iwm_pci_methods[] = { 6476 /* Device interface */ 6477 DEVMETHOD(device_probe, iwm_probe), 6478 DEVMETHOD(device_attach, iwm_attach), 6479 DEVMETHOD(device_detach, iwm_detach), 6480 DEVMETHOD(device_suspend, iwm_suspend), 6481 DEVMETHOD(device_resume, iwm_resume), 6482 6483 DEVMETHOD_END 6484 }; 6485 6486 static driver_t iwm_pci_driver = { 6487 "iwm", 6488 iwm_pci_methods, 6489 sizeof (struct iwm_softc) 6490 }; 6491 6492 static devclass_t iwm_devclass; 6493 6494 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL); 6495 MODULE_DEPEND(iwm, firmware, 1, 1, 1); 6496 MODULE_DEPEND(iwm, pci, 1, 1, 1); 6497 MODULE_DEPEND(iwm, wlan, 1, 1, 1); 6498