1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 2014 genua mbh <info@genua.de> 5 * Copyright (c) 2014 Fixup Software Ltd. 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /*- 21 * Based on BSD-licensed source modules in the Linux iwlwifi driver, 22 * which were used as the reference documentation for this implementation. 23 * 24 * Driver version we are currently based off of is 25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd) 26 * 27 *********************************************************************** 28 * 29 * This file is provided under a dual BSD/GPLv2 license. When using or 30 * redistributing this file, you may do so under either license. 31 * 32 * GPL LICENSE SUMMARY 33 * 34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. 35 * 36 * This program is free software; you can redistribute it and/or modify 37 * it under the terms of version 2 of the GNU General Public License as 38 * published by the Free Software Foundation. 39 * 40 * This program is distributed in the hope that it will be useful, but 41 * WITHOUT ANY WARRANTY; without even the implied warranty of 42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 43 * General Public License for more details. 44 * 45 * You should have received a copy of the GNU General Public License 46 * along with this program; if not, write to the Free Software 47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 48 * USA 49 * 50 * The full GNU General Public License is included in this distribution 51 * in the file called COPYING. 52 * 53 * Contact Information: 54 * Intel Linux Wireless <ilw@linux.intel.com> 55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 56 * 57 * 58 * BSD LICENSE 59 * 60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. 61 * All rights reserved. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 67 * * Redistributions of source code must retain the above copyright 68 * notice, this list of conditions and the following disclaimer. 69 * * Redistributions in binary form must reproduce the above copyright 70 * notice, this list of conditions and the following disclaimer in 71 * the documentation and/or other materials provided with the 72 * distribution. 73 * * Neither the name Intel Corporation nor the names of its 74 * contributors may be used to endorse or promote products derived 75 * from this software without specific prior written permission. 76 * 77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 88 */ 89 90 /*- 91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 92 * 93 * Permission to use, copy, modify, and distribute this software for any 94 * purpose with or without fee is hereby granted, provided that the above 95 * copyright notice and this permission notice appear in all copies. 96 * 97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 104 */ 105 /* 106 * DragonFly work 107 * 108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD 109 * changes to remove per-device network interface (DragonFly has not 110 * caught up to that yet on the WLAN side). 111 * 112 * Comprehensive list of adjustments for DragonFly not #ifdef'd: 113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT 114 * specifications to M_INTWAIT. We still don't 115 * understand why FreeBSD uses M_NOWAIT for 116 * critical must-not-fail kmalloc()s). 117 * free -> kfree 118 * printf -> kprintf 119 * (bug fix) memset in iwm_reset_rx_ring. 120 * (debug) added several kprintf()s on error 121 * 122 * header file paths (DFly allows localized path specifications). 123 * minor header file differences. 124 * 125 * Comprehensive list of adjustments for DragonFly #ifdef'd: 126 * (safety) added register read-back serialization in iwm_reset_rx_ring(). 127 * packet counters 128 * msleep -> iwmsleep (handle deadlocks due to dfly interrupt serializer) 129 * mtx -> lk (mtx functions -> lockmgr functions) 130 * callout differences 131 * taskqueue differences 132 * MSI differences 133 * bus_setup_intr() differences 134 * minor PCI config register naming differences 135 */ 136 #include <sys/cdefs.h> 137 __FBSDID("$FreeBSD$"); 138 139 #include <sys/param.h> 140 #include <sys/bus.h> 141 #include <sys/endian.h> 142 #include <sys/firmware.h> 143 #include <sys/kernel.h> 144 #include <sys/malloc.h> 145 #include <sys/mbuf.h> 146 #include <sys/mutex.h> 147 #include <sys/module.h> 148 #include <sys/proc.h> 149 #include <sys/rman.h> 150 #include <sys/socket.h> 151 #include <sys/sockio.h> 152 #include <sys/sysctl.h> 153 #include <sys/linker.h> 154 155 #include <machine/endian.h> 156 157 #include <bus/pci/pcivar.h> 158 #include <bus/pci/pcireg.h> 159 160 #include <net/bpf.h> 161 162 #include <net/if.h> 163 #include <net/if_var.h> 164 #include <net/if_arp.h> 165 #include <net/if_dl.h> 166 #include <net/if_media.h> 167 #include <net/if_types.h> 168 169 #include <netinet/in.h> 170 #include <netinet/in_systm.h> 171 #include <netinet/if_ether.h> 172 #include <netinet/ip.h> 173 174 #include <netproto/802_11/ieee80211_var.h> 175 #include <netproto/802_11/ieee80211_regdomain.h> 176 #include <netproto/802_11/ieee80211_ratectl.h> 177 #include <netproto/802_11/ieee80211_radiotap.h> 178 179 #include "if_iwmreg.h" 180 #include "if_iwmvar.h" 181 #include "if_iwm_debug.h" 182 #include "if_iwm_util.h" 183 #include "if_iwm_binding.h" 184 #include "if_iwm_phy_db.h" 185 #include "if_iwm_mac_ctxt.h" 186 #include "if_iwm_phy_ctxt.h" 187 #include "if_iwm_time_event.h" 188 #include "if_iwm_power.h" 189 #include "if_iwm_scan.h" 190 #include "if_iwm_pcie_trans.h" 191 #include "if_iwm_led.h" 192 193 const uint8_t iwm_nvm_channels[] = { 194 /* 2.4 GHz */ 195 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 196 /* 5 GHz */ 197 36, 40, 44, 48, 52, 56, 60, 64, 198 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 199 149, 153, 157, 161, 165 200 }; 201 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS, 202 "IWM_NUM_CHANNELS is too small"); 203 204 const uint8_t iwm_nvm_channels_8000[] = { 205 /* 2.4 GHz */ 206 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 207 /* 5 GHz */ 208 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 209 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 210 149, 153, 157, 161, 165, 169, 173, 177, 181 211 }; 212 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000, 213 "IWM_NUM_CHANNELS_8000 is too small"); 214 215 #define IWM_NUM_2GHZ_CHANNELS 14 216 #define IWM_N_HW_ADDR_MASK 0xF 217 218 /* 219 * XXX For now, there's simply a fixed set of rate table entries 220 * that are populated. 221 */ 222 const struct iwm_rate { 223 uint8_t rate; 224 uint8_t plcp; 225 } iwm_rates[] = { 226 { 2, IWM_RATE_1M_PLCP }, 227 { 4, IWM_RATE_2M_PLCP }, 228 { 11, IWM_RATE_5M_PLCP }, 229 { 22, IWM_RATE_11M_PLCP }, 230 { 12, IWM_RATE_6M_PLCP }, 231 { 18, IWM_RATE_9M_PLCP }, 232 { 24, IWM_RATE_12M_PLCP }, 233 { 36, IWM_RATE_18M_PLCP }, 234 { 48, IWM_RATE_24M_PLCP }, 235 { 72, IWM_RATE_36M_PLCP }, 236 { 96, IWM_RATE_48M_PLCP }, 237 { 108, IWM_RATE_54M_PLCP }, 238 }; 239 #define IWM_RIDX_CCK 0 240 #define IWM_RIDX_OFDM 4 241 #define IWM_RIDX_MAX (nitems(iwm_rates)-1) 242 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM) 243 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM) 244 245 struct iwm_nvm_section { 246 uint16_t length; 247 uint8_t *data; 248 }; 249 250 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t); 251 static int iwm_firmware_store_section(struct iwm_softc *, 252 enum iwm_ucode_type, 253 const uint8_t *, size_t); 254 static int iwm_set_default_calib(struct iwm_softc *, const void *); 255 static void iwm_fw_info_free(struct iwm_fw_info *); 256 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type); 257 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int); 258 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, 259 bus_size_t, bus_size_t); 260 static void iwm_dma_contig_free(struct iwm_dma_info *); 261 static int iwm_alloc_fwmem(struct iwm_softc *); 262 static void iwm_free_fwmem(struct iwm_softc *); 263 static int iwm_alloc_sched(struct iwm_softc *); 264 static void iwm_free_sched(struct iwm_softc *); 265 static int iwm_alloc_kw(struct iwm_softc *); 266 static void iwm_free_kw(struct iwm_softc *); 267 static int iwm_alloc_ict(struct iwm_softc *); 268 static void iwm_free_ict(struct iwm_softc *); 269 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 270 static void iwm_disable_rx_dma(struct iwm_softc *); 271 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 272 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 273 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, 274 int); 275 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 276 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 277 static void iwm_enable_interrupts(struct iwm_softc *); 278 static void iwm_restore_interrupts(struct iwm_softc *); 279 static void iwm_disable_interrupts(struct iwm_softc *); 280 static void iwm_ict_reset(struct iwm_softc *); 281 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *); 282 static void iwm_stop_device(struct iwm_softc *); 283 static void iwm_mvm_nic_config(struct iwm_softc *); 284 static int iwm_nic_rx_init(struct iwm_softc *); 285 static int iwm_nic_tx_init(struct iwm_softc *); 286 static int iwm_nic_init(struct iwm_softc *); 287 static int iwm_enable_txq(struct iwm_softc *, int, int, int); 288 static int iwm_post_alive(struct iwm_softc *); 289 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, 290 uint16_t, uint8_t *, uint16_t *); 291 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *, 292 uint16_t *, size_t); 293 static uint32_t iwm_eeprom_channel_flags(uint16_t); 294 static void iwm_add_channel_band(struct iwm_softc *, 295 struct ieee80211_channel[], int, int *, int, size_t, 296 const uint8_t[]); 297 static void iwm_init_channel_map(struct ieee80211com *, int, int *, 298 struct ieee80211_channel[]); 299 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *, 300 const uint16_t *, const uint16_t *, 301 const uint16_t *, const uint16_t *, 302 const uint16_t *); 303 static void iwm_set_hw_address_8000(struct iwm_softc *, 304 struct iwm_nvm_data *, 305 const uint16_t *, const uint16_t *); 306 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *, 307 const uint16_t *); 308 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *); 309 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *, 310 const uint16_t *); 311 static int iwm_get_n_hw_addrs(const struct iwm_softc *, 312 const const uint16_t *); 313 static void iwm_set_radio_cfg(const struct iwm_softc *, 314 struct iwm_nvm_data *, uint32_t); 315 static int iwm_parse_nvm_sections(struct iwm_softc *, 316 struct iwm_nvm_section *); 317 static int iwm_nvm_init(struct iwm_softc *); 318 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t, 319 const uint8_t *, uint32_t); 320 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, 321 const uint8_t *, uint32_t); 322 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type); 323 static int iwm_load_cpu_sections_8000(struct iwm_softc *, 324 struct iwm_fw_sects *, int , int *); 325 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type); 326 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type); 327 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type); 328 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t); 329 static int iwm_send_phy_cfg_cmd(struct iwm_softc *); 330 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *, 331 enum iwm_ucode_type); 332 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int); 333 static int iwm_rx_addbuf(struct iwm_softc *, int, int); 334 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *); 335 static int iwm_mvm_get_signal_strength(struct iwm_softc *, 336 struct iwm_rx_phy_info *); 337 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *, 338 struct iwm_rx_packet *, 339 struct iwm_rx_data *); 340 static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *); 341 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *, 342 struct iwm_rx_data *); 343 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *, 344 struct iwm_rx_packet *, 345 struct iwm_node *); 346 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *, 347 struct iwm_rx_data *); 348 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *); 349 #if 0 350 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, 351 uint16_t); 352 #endif 353 static const struct iwm_rate * 354 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *, 355 struct ieee80211_frame *, struct iwm_tx_cmd *); 356 static int iwm_tx(struct iwm_softc *, struct mbuf *, 357 struct ieee80211_node *, int); 358 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *, 359 const struct ieee80211_bpf_params *); 360 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *, 361 struct iwm_mvm_add_sta_cmd_v7 *, 362 int *); 363 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *, 364 int); 365 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *); 366 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *); 367 static int iwm_mvm_add_int_sta_common(struct iwm_softc *, 368 struct iwm_int_sta *, 369 const uint8_t *, uint16_t, uint16_t); 370 static int iwm_mvm_add_aux_sta(struct iwm_softc *); 371 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *); 372 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *); 373 static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *); 374 static int iwm_release(struct iwm_softc *, struct iwm_node *); 375 static struct ieee80211_node * 376 iwm_node_alloc(struct ieee80211vap *, 377 const uint8_t[IEEE80211_ADDR_LEN]); 378 static void iwm_setrates(struct iwm_softc *, struct iwm_node *); 379 static int iwm_media_change(struct ifnet *); 380 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int); 381 static void iwm_endscan_cb(void *, int); 382 static void iwm_mvm_fill_sf_command(struct iwm_softc *, 383 struct iwm_sf_cfg_cmd *, 384 struct ieee80211_node *); 385 static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state); 386 static int iwm_send_bt_init_conf(struct iwm_softc *); 387 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *); 388 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t); 389 static int iwm_init_hw(struct iwm_softc *); 390 static void iwm_init(struct iwm_softc *); 391 static void iwm_start(struct iwm_softc *); 392 static void iwm_stop(struct iwm_softc *); 393 static void iwm_watchdog(void *); 394 static void iwm_parent(struct ieee80211com *); 395 #ifdef IWM_DEBUG 396 static const char * 397 iwm_desc_lookup(uint32_t); 398 static void iwm_nic_error(struct iwm_softc *); 399 static void iwm_nic_umac_error(struct iwm_softc *); 400 #endif 401 static void iwm_notif_intr(struct iwm_softc *); 402 static void iwm_intr(void *); 403 static int iwm_attach(device_t); 404 static int iwm_is_valid_ether_addr(uint8_t *); 405 static void iwm_preinit(void *); 406 static int iwm_detach_local(struct iwm_softc *sc, int); 407 static void iwm_init_task(void *); 408 static void iwm_radiotap_attach(struct iwm_softc *); 409 static struct ieee80211vap * 410 iwm_vap_create(struct ieee80211com *, 411 const char [IFNAMSIZ], int, 412 enum ieee80211_opmode, int, 413 const uint8_t [IEEE80211_ADDR_LEN], 414 const uint8_t [IEEE80211_ADDR_LEN]); 415 static void iwm_vap_delete(struct ieee80211vap *); 416 static void iwm_scan_start(struct ieee80211com *); 417 static void iwm_scan_end(struct ieee80211com *); 418 static void iwm_update_mcast(struct ieee80211com *); 419 static void iwm_set_channel(struct ieee80211com *); 420 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long); 421 static void iwm_scan_mindwell(struct ieee80211_scan_state *); 422 static int iwm_detach(device_t); 423 424 #if defined(__DragonFly__) 425 static int iwm_msi_enable = 1; 426 427 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable); 428 429 /* 430 * This is a hack due to the wlan_serializer deadlocking sleepers. 431 */ 432 int iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to); 433 434 int 435 iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to) 436 { 437 int error; 438 439 if (wlan_is_serialized()) { 440 wlan_serialize_exit(); 441 kprintf("%s: have to release serializer for sleeping\n", 442 __func__); 443 error = lksleep(chan, lk, flags, wmesg, to); 444 lockmgr(lk, LK_RELEASE); 445 wlan_serialize_enter(); 446 lockmgr(lk, LK_EXCLUSIVE); 447 } else { 448 error = lksleep(chan, lk, flags, wmesg, to); 449 } 450 return error; 451 } 452 453 #endif 454 455 /* 456 * Firmware parser. 457 */ 458 459 static int 460 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen) 461 { 462 const struct iwm_fw_cscheme_list *l = (const void *)data; 463 464 if (dlen < sizeof(*l) || 465 dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) 466 return EINVAL; 467 468 /* we don't actually store anything for now, always use s/w crypto */ 469 470 return 0; 471 } 472 473 static int 474 iwm_firmware_store_section(struct iwm_softc *sc, 475 enum iwm_ucode_type type, const uint8_t *data, size_t dlen) 476 { 477 struct iwm_fw_sects *fws; 478 struct iwm_fw_onesect *fwone; 479 480 if (type >= IWM_UCODE_TYPE_MAX) 481 return EINVAL; 482 if (dlen < sizeof(uint32_t)) 483 return EINVAL; 484 485 fws = &sc->sc_fw.fw_sects[type]; 486 if (fws->fw_count >= IWM_UCODE_SECT_MAX) 487 return EINVAL; 488 489 fwone = &fws->fw_sect[fws->fw_count]; 490 491 /* first 32bit are device load offset */ 492 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t)); 493 494 /* rest is data */ 495 fwone->fws_data = data + sizeof(uint32_t); 496 fwone->fws_len = dlen - sizeof(uint32_t); 497 498 fws->fw_count++; 499 fws->fw_totlen += fwone->fws_len; 500 501 return 0; 502 } 503 504 struct iwm_tlv_calib_data { 505 uint32_t ucode_type; 506 struct iwm_tlv_calib_ctrl calib; 507 } __packed; 508 509 static int 510 iwm_set_default_calib(struct iwm_softc *sc, const void *data) 511 { 512 const struct iwm_tlv_calib_data *def_calib = data; 513 uint32_t ucode_type = le32toh(def_calib->ucode_type); 514 515 if (ucode_type >= IWM_UCODE_TYPE_MAX) { 516 device_printf(sc->sc_dev, 517 "Wrong ucode_type %u for default " 518 "calibration.\n", ucode_type); 519 return EINVAL; 520 } 521 522 sc->sc_default_calib[ucode_type].flow_trigger = 523 def_calib->calib.flow_trigger; 524 sc->sc_default_calib[ucode_type].event_trigger = 525 def_calib->calib.event_trigger; 526 527 return 0; 528 } 529 530 static void 531 iwm_fw_info_free(struct iwm_fw_info *fw) 532 { 533 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD); 534 fw->fw_fp = NULL; 535 /* don't touch fw->fw_status */ 536 memset(fw->fw_sects, 0, sizeof(fw->fw_sects)); 537 } 538 539 static int 540 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) 541 { 542 struct iwm_fw_info *fw = &sc->sc_fw; 543 const struct iwm_tlv_ucode_header *uhdr; 544 struct iwm_ucode_tlv tlv; 545 enum iwm_ucode_tlv_type tlv_type; 546 const struct firmware *fwp; 547 const uint8_t *data; 548 int error = 0; 549 size_t len; 550 551 if (fw->fw_status == IWM_FW_STATUS_DONE && 552 ucode_type != IWM_UCODE_TYPE_INIT) 553 return 0; 554 555 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) { 556 #if defined(__DragonFly__) 557 iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0); 558 #else 559 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0); 560 #endif 561 } 562 fw->fw_status = IWM_FW_STATUS_INPROGRESS; 563 564 if (fw->fw_fp != NULL) 565 iwm_fw_info_free(fw); 566 567 /* 568 * Load firmware into driver memory. 569 * fw_fp will be set. 570 */ 571 IWM_UNLOCK(sc); 572 fwp = firmware_get(sc->sc_fwname); 573 IWM_LOCK(sc); 574 if (fwp == NULL) { 575 device_printf(sc->sc_dev, 576 "could not read firmware %s (error %d)\n", 577 sc->sc_fwname, error); 578 goto out; 579 } 580 fw->fw_fp = fwp; 581 582 /* (Re-)Initialize default values. */ 583 sc->sc_capaflags = 0; 584 sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS; 585 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa)); 586 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc)); 587 588 /* 589 * Parse firmware contents 590 */ 591 592 uhdr = (const void *)fw->fw_fp->data; 593 if (*(const uint32_t *)fw->fw_fp->data != 0 594 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) { 595 device_printf(sc->sc_dev, "invalid firmware %s\n", 596 sc->sc_fwname); 597 error = EINVAL; 598 goto out; 599 } 600 601 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)", 602 IWM_UCODE_MAJOR(le32toh(uhdr->ver)), 603 IWM_UCODE_MINOR(le32toh(uhdr->ver)), 604 IWM_UCODE_API(le32toh(uhdr->ver))); 605 data = uhdr->data; 606 len = fw->fw_fp->datasize - sizeof(*uhdr); 607 608 while (len >= sizeof(tlv)) { 609 size_t tlv_len; 610 const void *tlv_data; 611 612 memcpy(&tlv, data, sizeof(tlv)); 613 tlv_len = le32toh(tlv.length); 614 tlv_type = le32toh(tlv.type); 615 616 len -= sizeof(tlv); 617 data += sizeof(tlv); 618 tlv_data = data; 619 620 if (len < tlv_len) { 621 device_printf(sc->sc_dev, 622 "firmware too short: %zu bytes\n", 623 len); 624 error = EINVAL; 625 goto parse_out; 626 } 627 628 switch ((int)tlv_type) { 629 case IWM_UCODE_TLV_PROBE_MAX_LEN: 630 if (tlv_len < sizeof(uint32_t)) { 631 device_printf(sc->sc_dev, 632 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n", 633 __func__, 634 (int) tlv_len); 635 error = EINVAL; 636 goto parse_out; 637 } 638 sc->sc_capa_max_probe_len 639 = le32toh(*(const uint32_t *)tlv_data); 640 /* limit it to something sensible */ 641 if (sc->sc_capa_max_probe_len > 642 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) { 643 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 644 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN " 645 "ridiculous\n", __func__); 646 error = EINVAL; 647 goto parse_out; 648 } 649 break; 650 case IWM_UCODE_TLV_PAN: 651 if (tlv_len) { 652 device_printf(sc->sc_dev, 653 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n", 654 __func__, 655 (int) tlv_len); 656 error = EINVAL; 657 goto parse_out; 658 } 659 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN; 660 break; 661 case IWM_UCODE_TLV_FLAGS: 662 if (tlv_len < sizeof(uint32_t)) { 663 device_printf(sc->sc_dev, 664 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n", 665 __func__, 666 (int) tlv_len); 667 error = EINVAL; 668 goto parse_out; 669 } 670 /* 671 * Apparently there can be many flags, but Linux driver 672 * parses only the first one, and so do we. 673 * 674 * XXX: why does this override IWM_UCODE_TLV_PAN? 675 * Intentional or a bug? Observations from 676 * current firmware file: 677 * 1) TLV_PAN is parsed first 678 * 2) TLV_FLAGS contains TLV_FLAGS_PAN 679 * ==> this resets TLV_PAN to itself... hnnnk 680 */ 681 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data); 682 break; 683 case IWM_UCODE_TLV_CSCHEME: 684 if ((error = iwm_store_cscheme(sc, 685 tlv_data, tlv_len)) != 0) { 686 device_printf(sc->sc_dev, 687 "%s: iwm_store_cscheme(): returned %d\n", 688 __func__, 689 error); 690 goto parse_out; 691 } 692 break; 693 case IWM_UCODE_TLV_NUM_OF_CPU: { 694 uint32_t num_cpu; 695 if (tlv_len != sizeof(uint32_t)) { 696 device_printf(sc->sc_dev, 697 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n", 698 __func__, 699 (int) tlv_len); 700 error = EINVAL; 701 goto parse_out; 702 } 703 num_cpu = le32toh(*(const uint32_t *)tlv_data); 704 if (num_cpu < 1 || num_cpu > 2) { 705 device_printf(sc->sc_dev, 706 "%s: Driver supports only 1 or 2 CPUs\n", 707 __func__); 708 error = EINVAL; 709 goto parse_out; 710 } 711 break; 712 } 713 case IWM_UCODE_TLV_SEC_RT: 714 if ((error = iwm_firmware_store_section(sc, 715 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) { 716 device_printf(sc->sc_dev, 717 "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n", 718 __func__, 719 error); 720 goto parse_out; 721 } 722 break; 723 case IWM_UCODE_TLV_SEC_INIT: 724 if ((error = iwm_firmware_store_section(sc, 725 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) { 726 device_printf(sc->sc_dev, 727 "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n", 728 __func__, 729 error); 730 goto parse_out; 731 } 732 break; 733 case IWM_UCODE_TLV_SEC_WOWLAN: 734 if ((error = iwm_firmware_store_section(sc, 735 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) { 736 device_printf(sc->sc_dev, 737 "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n", 738 __func__, 739 error); 740 goto parse_out; 741 } 742 break; 743 case IWM_UCODE_TLV_DEF_CALIB: 744 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) { 745 device_printf(sc->sc_dev, 746 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n", 747 __func__, 748 (int) tlv_len, 749 (int) sizeof(struct iwm_tlv_calib_data)); 750 error = EINVAL; 751 goto parse_out; 752 } 753 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) { 754 device_printf(sc->sc_dev, 755 "%s: iwm_set_default_calib() failed: %d\n", 756 __func__, 757 error); 758 goto parse_out; 759 } 760 break; 761 case IWM_UCODE_TLV_PHY_SKU: 762 if (tlv_len != sizeof(uint32_t)) { 763 error = EINVAL; 764 device_printf(sc->sc_dev, 765 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n", 766 __func__, 767 (int) tlv_len); 768 goto parse_out; 769 } 770 sc->sc_fw_phy_config = 771 le32toh(*(const uint32_t *)tlv_data); 772 break; 773 774 case IWM_UCODE_TLV_API_CHANGES_SET: { 775 const struct iwm_ucode_api *api; 776 if (tlv_len != sizeof(*api)) { 777 error = EINVAL; 778 goto parse_out; 779 } 780 api = (const struct iwm_ucode_api *)tlv_data; 781 /* Flags may exceed 32 bits in future firmware. */ 782 if (le32toh(api->api_index) > 0) { 783 device_printf(sc->sc_dev, 784 "unsupported API index %d\n", 785 le32toh(api->api_index)); 786 goto parse_out; 787 } 788 sc->sc_ucode_api = le32toh(api->api_flags); 789 break; 790 } 791 792 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: { 793 const struct iwm_ucode_capa *capa; 794 int idx, i; 795 if (tlv_len != sizeof(*capa)) { 796 error = EINVAL; 797 goto parse_out; 798 } 799 capa = (const struct iwm_ucode_capa *)tlv_data; 800 idx = le32toh(capa->api_index); 801 if (idx > howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) { 802 device_printf(sc->sc_dev, 803 "unsupported API index %d\n", idx); 804 goto parse_out; 805 } 806 for (i = 0; i < 32; i++) { 807 if ((le32toh(capa->api_capa) & (1U << i)) == 0) 808 continue; 809 setbit(sc->sc_enabled_capa, i + (32 * idx)); 810 } 811 break; 812 } 813 814 case 48: /* undocumented TLV */ 815 case IWM_UCODE_TLV_SDIO_ADMA_ADDR: 816 case IWM_UCODE_TLV_FW_GSCAN_CAPA: 817 /* ignore, not used by current driver */ 818 break; 819 820 case IWM_UCODE_TLV_SEC_RT_USNIFFER: 821 if ((error = iwm_firmware_store_section(sc, 822 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data, 823 tlv_len)) != 0) 824 goto parse_out; 825 break; 826 827 case IWM_UCODE_TLV_N_SCAN_CHANNELS: 828 if (tlv_len != sizeof(uint32_t)) { 829 error = EINVAL; 830 goto parse_out; 831 } 832 sc->sc_capa_n_scan_channels = 833 le32toh(*(const uint32_t *)tlv_data); 834 break; 835 836 case IWM_UCODE_TLV_FW_VERSION: 837 if (tlv_len != sizeof(uint32_t) * 3) { 838 error = EINVAL; 839 goto parse_out; 840 } 841 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), 842 "%d.%d.%d", 843 le32toh(((const uint32_t *)tlv_data)[0]), 844 le32toh(((const uint32_t *)tlv_data)[1]), 845 le32toh(((const uint32_t *)tlv_data)[2])); 846 break; 847 848 default: 849 device_printf(sc->sc_dev, 850 "%s: unknown firmware section %d, abort\n", 851 __func__, tlv_type); 852 error = EINVAL; 853 goto parse_out; 854 } 855 856 len -= roundup(tlv_len, 4); 857 data += roundup(tlv_len, 4); 858 } 859 860 KASSERT(error == 0, ("unhandled error")); 861 862 parse_out: 863 if (error) { 864 device_printf(sc->sc_dev, "firmware parse error %d, " 865 "section type %d\n", error, tlv_type); 866 } 867 868 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) { 869 device_printf(sc->sc_dev, 870 "device uses unsupported power ops\n"); 871 error = ENOTSUP; 872 } 873 874 out: 875 if (error) { 876 fw->fw_status = IWM_FW_STATUS_NONE; 877 if (fw->fw_fp != NULL) 878 iwm_fw_info_free(fw); 879 } else 880 fw->fw_status = IWM_FW_STATUS_DONE; 881 wakeup(&sc->sc_fw); 882 883 return error; 884 } 885 886 /* 887 * DMA resource routines 888 */ 889 890 static void 891 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 892 { 893 if (error != 0) 894 return; 895 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 896 *(bus_addr_t *)arg = segs[0].ds_addr; 897 } 898 899 static int 900 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma, 901 bus_size_t size, bus_size_t alignment) 902 { 903 int error; 904 905 dma->tag = NULL; 906 dma->size = size; 907 908 #if defined(__DragonFly__) 909 error = bus_dma_tag_create(tag, alignment, 910 0, 911 BUS_SPACE_MAXADDR_32BIT, 912 BUS_SPACE_MAXADDR, 913 NULL, NULL, 914 size, 1, size, 915 BUS_DMA_NOWAIT, &dma->tag); 916 #else 917 error = bus_dma_tag_create(tag, alignment, 918 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 919 1, size, 0, NULL, NULL, &dma->tag); 920 #endif 921 if (error != 0) 922 goto fail; 923 924 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 925 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 926 if (error != 0) 927 goto fail; 928 929 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 930 iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 931 if (error != 0) 932 goto fail; 933 934 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 935 936 return 0; 937 938 fail: 939 iwm_dma_contig_free(dma); 940 941 return error; 942 } 943 944 static void 945 iwm_dma_contig_free(struct iwm_dma_info *dma) 946 { 947 if (dma->map != NULL) { 948 if (dma->vaddr != NULL) { 949 bus_dmamap_sync(dma->tag, dma->map, 950 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 951 bus_dmamap_unload(dma->tag, dma->map); 952 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 953 dma->vaddr = NULL; 954 } 955 bus_dmamap_destroy(dma->tag, dma->map); 956 dma->map = NULL; 957 } 958 if (dma->tag != NULL) { 959 bus_dma_tag_destroy(dma->tag); 960 dma->tag = NULL; 961 } 962 963 } 964 965 /* fwmem is used to load firmware onto the card */ 966 static int 967 iwm_alloc_fwmem(struct iwm_softc *sc) 968 { 969 /* Must be aligned on a 16-byte boundary. */ 970 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, 971 sc->sc_fwdmasegsz, 16); 972 } 973 974 static void 975 iwm_free_fwmem(struct iwm_softc *sc) 976 { 977 iwm_dma_contig_free(&sc->fw_dma); 978 } 979 980 /* tx scheduler rings. not used? */ 981 static int 982 iwm_alloc_sched(struct iwm_softc *sc) 983 { 984 int rv; 985 986 /* TX scheduler rings must be aligned on a 1KB boundary. */ 987 rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 988 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024); 989 return rv; 990 } 991 992 static void 993 iwm_free_sched(struct iwm_softc *sc) 994 { 995 iwm_dma_contig_free(&sc->sched_dma); 996 } 997 998 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */ 999 static int 1000 iwm_alloc_kw(struct iwm_softc *sc) 1001 { 1002 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096); 1003 } 1004 1005 static void 1006 iwm_free_kw(struct iwm_softc *sc) 1007 { 1008 iwm_dma_contig_free(&sc->kw_dma); 1009 } 1010 1011 /* interrupt cause table */ 1012 static int 1013 iwm_alloc_ict(struct iwm_softc *sc) 1014 { 1015 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 1016 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT); 1017 } 1018 1019 static void 1020 iwm_free_ict(struct iwm_softc *sc) 1021 { 1022 iwm_dma_contig_free(&sc->ict_dma); 1023 } 1024 1025 static int 1026 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1027 { 1028 bus_size_t size; 1029 int i, error; 1030 1031 ring->cur = 0; 1032 1033 /* Allocate RX descriptors (256-byte aligned). */ 1034 size = IWM_RX_RING_COUNT * sizeof(uint32_t); 1035 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 1036 if (error != 0) { 1037 device_printf(sc->sc_dev, 1038 "could not allocate RX ring DMA memory\n"); 1039 goto fail; 1040 } 1041 ring->desc = ring->desc_dma.vaddr; 1042 1043 /* Allocate RX status area (16-byte aligned). */ 1044 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 1045 sizeof(*ring->stat), 16); 1046 if (error != 0) { 1047 device_printf(sc->sc_dev, 1048 "could not allocate RX status DMA memory\n"); 1049 goto fail; 1050 } 1051 ring->stat = ring->stat_dma.vaddr; 1052 1053 /* Create RX buffer DMA tag. */ 1054 #if defined(__DragonFly__) 1055 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE, 1056 0, 1057 BUS_SPACE_MAXADDR_32BIT, 1058 BUS_SPACE_MAXADDR, 1059 NULL, NULL, 1060 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 1061 BUS_DMA_NOWAIT, &ring->data_dmat); 1062 #else 1063 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1064 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1065 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat); 1066 #endif 1067 if (error != 0) { 1068 device_printf(sc->sc_dev, 1069 "%s: could not create RX buf DMA tag, error %d\n", 1070 __func__, error); 1071 goto fail; 1072 } 1073 1074 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */ 1075 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map); 1076 if (error != 0) { 1077 device_printf(sc->sc_dev, 1078 "%s: could not create RX buf DMA map, error %d\n", 1079 __func__, error); 1080 goto fail; 1081 } 1082 /* 1083 * Allocate and map RX buffers. 1084 */ 1085 for (i = 0; i < IWM_RX_RING_COUNT; i++) { 1086 struct iwm_rx_data *data = &ring->data[i]; 1087 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1088 if (error != 0) { 1089 device_printf(sc->sc_dev, 1090 "%s: could not create RX buf DMA map, error %d\n", 1091 __func__, error); 1092 goto fail; 1093 } 1094 data->m = NULL; 1095 1096 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) { 1097 goto fail; 1098 } 1099 } 1100 return 0; 1101 1102 fail: iwm_free_rx_ring(sc, ring); 1103 return error; 1104 } 1105 1106 static void 1107 iwm_disable_rx_dma(struct iwm_softc *sc) 1108 { 1109 /* XXX conditional nic locks are stupid */ 1110 /* XXX print out if we can't lock the NIC? */ 1111 if (iwm_nic_lock(sc)) { 1112 /* XXX handle if RX stop doesn't finish? */ 1113 (void) iwm_pcie_rx_stop(sc); 1114 iwm_nic_unlock(sc); 1115 } 1116 } 1117 1118 static void 1119 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1120 { 1121 /* Reset the ring state */ 1122 ring->cur = 0; 1123 1124 /* 1125 * The hw rx ring index in shared memory must also be cleared, 1126 * otherwise the discrepancy can cause reprocessing chaos. 1127 */ 1128 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1129 } 1130 1131 static void 1132 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1133 { 1134 int i; 1135 1136 iwm_dma_contig_free(&ring->desc_dma); 1137 iwm_dma_contig_free(&ring->stat_dma); 1138 1139 for (i = 0; i < IWM_RX_RING_COUNT; i++) { 1140 struct iwm_rx_data *data = &ring->data[i]; 1141 1142 if (data->m != NULL) { 1143 bus_dmamap_sync(ring->data_dmat, data->map, 1144 BUS_DMASYNC_POSTREAD); 1145 bus_dmamap_unload(ring->data_dmat, data->map); 1146 m_freem(data->m); 1147 data->m = NULL; 1148 } 1149 if (data->map != NULL) { 1150 bus_dmamap_destroy(ring->data_dmat, data->map); 1151 data->map = NULL; 1152 } 1153 } 1154 if (ring->spare_map != NULL) { 1155 bus_dmamap_destroy(ring->data_dmat, ring->spare_map); 1156 ring->spare_map = NULL; 1157 } 1158 if (ring->data_dmat != NULL) { 1159 bus_dma_tag_destroy(ring->data_dmat); 1160 ring->data_dmat = NULL; 1161 } 1162 } 1163 1164 static int 1165 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid) 1166 { 1167 bus_addr_t paddr; 1168 bus_size_t size; 1169 int i, error; 1170 1171 ring->qid = qid; 1172 ring->queued = 0; 1173 ring->cur = 0; 1174 1175 /* Allocate TX descriptors (256-byte aligned). */ 1176 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd); 1177 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 1178 if (error != 0) { 1179 device_printf(sc->sc_dev, 1180 "could not allocate TX ring DMA memory\n"); 1181 goto fail; 1182 } 1183 ring->desc = ring->desc_dma.vaddr; 1184 1185 /* 1186 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need 1187 * to allocate commands space for other rings. 1188 */ 1189 if (qid > IWM_MVM_CMD_QUEUE) 1190 return 0; 1191 1192 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd); 1193 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4); 1194 if (error != 0) { 1195 device_printf(sc->sc_dev, 1196 "could not allocate TX cmd DMA memory\n"); 1197 goto fail; 1198 } 1199 ring->cmd = ring->cmd_dma.vaddr; 1200 1201 #if defined(__DragonFly__) 1202 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE, 1203 0, 1204 BUS_SPACE_MAXADDR_32BIT, 1205 BUS_SPACE_MAXADDR, 1206 NULL, NULL, 1207 MCLBYTES, IWM_MAX_SCATTER - 2, MCLBYTES, 1208 BUS_DMA_NOWAIT, &ring->data_dmat); 1209 #else 1210 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1211 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1212 IWM_MAX_SCATTER - 2, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); 1213 #endif 1214 if (error != 0) { 1215 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n"); 1216 goto fail; 1217 } 1218 1219 paddr = ring->cmd_dma.paddr; 1220 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1221 struct iwm_tx_data *data = &ring->data[i]; 1222 1223 data->cmd_paddr = paddr; 1224 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header) 1225 + offsetof(struct iwm_tx_cmd, scratch); 1226 paddr += sizeof(struct iwm_device_cmd); 1227 1228 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1229 if (error != 0) { 1230 device_printf(sc->sc_dev, 1231 "could not create TX buf DMA map\n"); 1232 goto fail; 1233 } 1234 } 1235 KASSERT(paddr == ring->cmd_dma.paddr + size, 1236 ("invalid physical address")); 1237 return 0; 1238 1239 fail: iwm_free_tx_ring(sc, ring); 1240 return error; 1241 } 1242 1243 static void 1244 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1245 { 1246 int i; 1247 1248 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1249 struct iwm_tx_data *data = &ring->data[i]; 1250 1251 if (data->m != NULL) { 1252 bus_dmamap_sync(ring->data_dmat, data->map, 1253 BUS_DMASYNC_POSTWRITE); 1254 bus_dmamap_unload(ring->data_dmat, data->map); 1255 m_freem(data->m); 1256 data->m = NULL; 1257 } 1258 } 1259 /* Clear TX descriptors. */ 1260 memset(ring->desc, 0, ring->desc_dma.size); 1261 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1262 BUS_DMASYNC_PREWRITE); 1263 sc->qfullmsk &= ~(1 << ring->qid); 1264 ring->queued = 0; 1265 ring->cur = 0; 1266 } 1267 1268 static void 1269 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1270 { 1271 int i; 1272 1273 iwm_dma_contig_free(&ring->desc_dma); 1274 iwm_dma_contig_free(&ring->cmd_dma); 1275 1276 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1277 struct iwm_tx_data *data = &ring->data[i]; 1278 1279 if (data->m != NULL) { 1280 bus_dmamap_sync(ring->data_dmat, data->map, 1281 BUS_DMASYNC_POSTWRITE); 1282 bus_dmamap_unload(ring->data_dmat, data->map); 1283 m_freem(data->m); 1284 data->m = NULL; 1285 } 1286 if (data->map != NULL) { 1287 bus_dmamap_destroy(ring->data_dmat, data->map); 1288 data->map = NULL; 1289 } 1290 } 1291 if (ring->data_dmat != NULL) { 1292 bus_dma_tag_destroy(ring->data_dmat); 1293 ring->data_dmat = NULL; 1294 } 1295 } 1296 1297 /* 1298 * High-level hardware frobbing routines 1299 */ 1300 1301 static void 1302 iwm_enable_interrupts(struct iwm_softc *sc) 1303 { 1304 sc->sc_intmask = IWM_CSR_INI_SET_MASK; 1305 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1306 } 1307 1308 static void 1309 iwm_restore_interrupts(struct iwm_softc *sc) 1310 { 1311 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1312 } 1313 1314 static void 1315 iwm_disable_interrupts(struct iwm_softc *sc) 1316 { 1317 /* disable interrupts */ 1318 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 1319 1320 /* acknowledge all interrupts */ 1321 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1322 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0); 1323 } 1324 1325 static void 1326 iwm_ict_reset(struct iwm_softc *sc) 1327 { 1328 iwm_disable_interrupts(sc); 1329 1330 /* Reset ICT table. */ 1331 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE); 1332 sc->ict_cur = 0; 1333 1334 /* Set physical address of ICT table (4KB aligned). */ 1335 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG, 1336 IWM_CSR_DRAM_INT_TBL_ENABLE 1337 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER 1338 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK 1339 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT); 1340 1341 /* Switch to ICT interrupt mode in driver. */ 1342 sc->sc_flags |= IWM_FLAG_USE_ICT; 1343 1344 /* Re-enable interrupts. */ 1345 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1346 iwm_enable_interrupts(sc); 1347 } 1348 1349 /* 1350 * Since this .. hard-resets things, it's time to actually 1351 * mark the first vap (if any) as having no mac context. 1352 * It's annoying, but since the driver is potentially being 1353 * stop/start'ed whilst active (thanks openbsd port!) we 1354 * have to correctly track this. 1355 */ 1356 static void 1357 iwm_stop_device(struct iwm_softc *sc) 1358 { 1359 struct ieee80211com *ic = &sc->sc_ic; 1360 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 1361 int chnl, ntries; 1362 int qid; 1363 1364 /* tell the device to stop sending interrupts */ 1365 iwm_disable_interrupts(sc); 1366 1367 /* 1368 * FreeBSD-local: mark the first vap as not-uploaded, 1369 * so the next transition through auth/assoc 1370 * will correctly populate the MAC context. 1371 */ 1372 if (vap) { 1373 struct iwm_vap *iv = IWM_VAP(vap); 1374 iv->is_uploaded = 0; 1375 } 1376 1377 /* device going down, Stop using ICT table */ 1378 sc->sc_flags &= ~IWM_FLAG_USE_ICT; 1379 1380 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */ 1381 1382 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1383 1384 /* Stop all DMA channels. */ 1385 if (iwm_nic_lock(sc)) { 1386 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1387 IWM_WRITE(sc, 1388 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0); 1389 for (ntries = 0; ntries < 200; ntries++) { 1390 uint32_t r; 1391 1392 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG); 1393 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE( 1394 chnl)) 1395 break; 1396 DELAY(20); 1397 } 1398 } 1399 iwm_nic_unlock(sc); 1400 } 1401 iwm_disable_rx_dma(sc); 1402 1403 /* Stop RX ring. */ 1404 iwm_reset_rx_ring(sc, &sc->rxq); 1405 1406 /* Reset all TX rings. */ 1407 for (qid = 0; qid < nitems(sc->txq); qid++) 1408 iwm_reset_tx_ring(sc, &sc->txq[qid]); 1409 1410 /* 1411 * Power-down device's busmaster DMA clocks 1412 */ 1413 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT); 1414 DELAY(5); 1415 1416 /* Make sure (redundant) we've released our request to stay awake */ 1417 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL, 1418 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1419 1420 /* Stop the device, and put it in low power state */ 1421 iwm_apm_stop(sc); 1422 1423 /* Upon stop, the APM issues an interrupt if HW RF kill is set. 1424 * Clean again the interrupt here 1425 */ 1426 iwm_disable_interrupts(sc); 1427 /* stop and reset the on-board processor */ 1428 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET); 1429 1430 /* 1431 * Even if we stop the HW, we still want the RF kill 1432 * interrupt 1433 */ 1434 iwm_enable_rfkill_int(sc); 1435 iwm_check_rfkill(sc); 1436 } 1437 1438 static void 1439 iwm_mvm_nic_config(struct iwm_softc *sc) 1440 { 1441 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; 1442 uint32_t reg_val = 0; 1443 1444 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >> 1445 IWM_FW_PHY_CFG_RADIO_TYPE_POS; 1446 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >> 1447 IWM_FW_PHY_CFG_RADIO_STEP_POS; 1448 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >> 1449 IWM_FW_PHY_CFG_RADIO_DASH_POS; 1450 1451 /* SKU control */ 1452 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) << 1453 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; 1454 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) << 1455 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; 1456 1457 /* radio configuration */ 1458 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; 1459 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; 1460 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 1461 1462 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val); 1463 1464 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1465 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, 1466 radio_cfg_step, radio_cfg_dash); 1467 1468 /* 1469 * W/A : NIC is stuck in a reset state after Early PCIe power off 1470 * (PCIe power is lost before PERST# is asserted), causing ME FW 1471 * to lose ownership and not being able to obtain it back. 1472 */ 1473 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) { 1474 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG, 1475 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 1476 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 1477 } 1478 } 1479 1480 static int 1481 iwm_nic_rx_init(struct iwm_softc *sc) 1482 { 1483 if (!iwm_nic_lock(sc)) 1484 return EBUSY; 1485 1486 /* 1487 * Initialize RX ring. This is from the iwn driver. 1488 */ 1489 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1490 1491 /* stop DMA */ 1492 iwm_disable_rx_dma(sc); 1493 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 1494 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 1495 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0); 1496 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 1497 1498 /* Set physical address of RX ring (256-byte aligned). */ 1499 IWM_WRITE(sc, 1500 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8); 1501 1502 /* Set physical address of RX status (16-byte aligned). */ 1503 IWM_WRITE(sc, 1504 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4); 1505 1506 #if defined(__DragonFly__) 1507 /* Force serialization (probably not needed but don't trust the HW) */ 1508 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG); 1509 #endif 1510 1511 /* Enable RX. */ 1512 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 1513 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 1514 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */ 1515 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 1516 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | 1517 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 1518 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 1519 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS); 1520 1521 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF); 1522 1523 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 1524 if (sc->host_interrupt_operation_mode) 1525 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE); 1526 1527 /* 1528 * Thus sayeth el jefe (iwlwifi) via a comment: 1529 * 1530 * This value should initially be 0 (before preparing any 1531 * RBs), should be 8 after preparing the first 8 RBs (for example) 1532 */ 1533 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8); 1534 1535 iwm_nic_unlock(sc); 1536 1537 return 0; 1538 } 1539 1540 static int 1541 iwm_nic_tx_init(struct iwm_softc *sc) 1542 { 1543 int qid; 1544 1545 if (!iwm_nic_lock(sc)) 1546 return EBUSY; 1547 1548 /* Deactivate TX scheduler. */ 1549 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1550 1551 /* Set physical address of "keep warm" page (16-byte aligned). */ 1552 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4); 1553 1554 /* Initialize TX rings. */ 1555 for (qid = 0; qid < nitems(sc->txq); qid++) { 1556 struct iwm_tx_ring *txq = &sc->txq[qid]; 1557 1558 /* Set physical address of TX ring (256-byte aligned). */ 1559 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid), 1560 txq->desc_dma.paddr >> 8); 1561 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 1562 "%s: loading ring %d descriptors (%p) at %lx\n", 1563 __func__, 1564 qid, txq->desc, 1565 (unsigned long) (txq->desc_dma.paddr >> 8)); 1566 } 1567 1568 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1569 1570 iwm_nic_unlock(sc); 1571 1572 return 0; 1573 } 1574 1575 static int 1576 iwm_nic_init(struct iwm_softc *sc) 1577 { 1578 int error; 1579 1580 iwm_apm_init(sc); 1581 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) 1582 iwm_set_pwr(sc); 1583 1584 iwm_mvm_nic_config(sc); 1585 1586 if ((error = iwm_nic_rx_init(sc)) != 0) 1587 return error; 1588 1589 /* 1590 * Ditto for TX, from iwn 1591 */ 1592 if ((error = iwm_nic_tx_init(sc)) != 0) 1593 return error; 1594 1595 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1596 "%s: shadow registers enabled\n", __func__); 1597 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff); 1598 1599 return 0; 1600 } 1601 1602 const uint8_t iwm_mvm_ac_to_tx_fifo[] = { 1603 IWM_MVM_TX_FIFO_VO, 1604 IWM_MVM_TX_FIFO_VI, 1605 IWM_MVM_TX_FIFO_BE, 1606 IWM_MVM_TX_FIFO_BK, 1607 }; 1608 1609 static int 1610 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo) 1611 { 1612 if (!iwm_nic_lock(sc)) { 1613 device_printf(sc->sc_dev, 1614 "%s: cannot enable txq %d\n", 1615 __func__, 1616 qid); 1617 return EBUSY; 1618 } 1619 1620 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0); 1621 1622 if (qid == IWM_MVM_CMD_QUEUE) { 1623 /* unactivate before configuration */ 1624 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1625 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) 1626 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1627 1628 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid)); 1629 1630 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0); 1631 1632 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0); 1633 /* Set scheduler window size and frame limit. */ 1634 iwm_write_mem32(sc, 1635 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) + 1636 sizeof(uint32_t), 1637 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1638 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1639 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1640 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1641 1642 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1643 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1644 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) | 1645 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) | 1646 IWM_SCD_QUEUE_STTS_REG_MSK); 1647 } else { 1648 struct iwm_scd_txq_cfg_cmd cmd; 1649 int error; 1650 1651 iwm_nic_unlock(sc); 1652 1653 memset(&cmd, 0, sizeof(cmd)); 1654 cmd.scd_queue = qid; 1655 cmd.enable = 1; 1656 cmd.sta_id = sta_id; 1657 cmd.tx_fifo = fifo; 1658 cmd.aggregate = 0; 1659 cmd.window = IWM_FRAME_LIMIT; 1660 1661 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC, 1662 sizeof(cmd), &cmd); 1663 if (error) { 1664 device_printf(sc->sc_dev, 1665 "cannot enable txq %d\n", qid); 1666 return error; 1667 } 1668 1669 if (!iwm_nic_lock(sc)) 1670 return EBUSY; 1671 } 1672 1673 iwm_write_prph(sc, IWM_SCD_EN_CTRL, 1674 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid); 1675 1676 iwm_nic_unlock(sc); 1677 1678 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n", 1679 __func__, qid, fifo); 1680 1681 return 0; 1682 } 1683 1684 static int 1685 iwm_post_alive(struct iwm_softc *sc) 1686 { 1687 int nwords; 1688 int error, chnl; 1689 uint32_t base; 1690 1691 if (!iwm_nic_lock(sc)) 1692 return EBUSY; 1693 1694 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR); 1695 if (sc->sched_base != base) { 1696 device_printf(sc->sc_dev, 1697 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n", 1698 __func__, sc->sched_base, base); 1699 } 1700 1701 iwm_ict_reset(sc); 1702 1703 /* Clear TX scheduler state in SRAM. */ 1704 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND - 1705 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) 1706 / sizeof(uint32_t); 1707 error = iwm_write_mem(sc, 1708 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, 1709 NULL, nwords); 1710 if (error) 1711 goto out; 1712 1713 /* Set physical address of TX scheduler rings (1KB aligned). */ 1714 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10); 1715 1716 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0); 1717 1718 iwm_nic_unlock(sc); 1719 1720 /* enable command channel */ 1721 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7); 1722 if (error) 1723 return error; 1724 1725 if (!iwm_nic_lock(sc)) 1726 return EBUSY; 1727 1728 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff); 1729 1730 /* Enable DMA channels. */ 1731 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1732 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 1733 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 1734 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 1735 } 1736 1737 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG, 1738 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 1739 1740 /* Enable L1-Active */ 1741 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) { 1742 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG, 1743 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1744 } 1745 1746 out: 1747 iwm_nic_unlock(sc); 1748 return error; 1749 } 1750 1751 /* 1752 * NVM read access and content parsing. We do not support 1753 * external NVM or writing NVM. 1754 * iwlwifi/mvm/nvm.c 1755 */ 1756 1757 /* list of NVM sections we are allowed/need to read */ 1758 const int nvm_to_read[] = { 1759 IWM_NVM_SECTION_TYPE_HW, 1760 IWM_NVM_SECTION_TYPE_SW, 1761 IWM_NVM_SECTION_TYPE_REGULATORY, 1762 IWM_NVM_SECTION_TYPE_CALIBRATION, 1763 IWM_NVM_SECTION_TYPE_PRODUCTION, 1764 IWM_NVM_SECTION_TYPE_HW_8000, 1765 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE, 1766 IWM_NVM_SECTION_TYPE_PHY_SKU, 1767 }; 1768 1769 /* Default NVM size to read */ 1770 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024) 1771 #define IWM_MAX_NVM_SECTION_SIZE 8192 1772 1773 #define IWM_NVM_WRITE_OPCODE 1 1774 #define IWM_NVM_READ_OPCODE 0 1775 1776 /* load nvm chunk response */ 1777 #define IWM_READ_NVM_CHUNK_SUCCEED 0 1778 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS 1 1779 1780 static int 1781 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, 1782 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len) 1783 { 1784 offset = 0; 1785 struct iwm_nvm_access_cmd nvm_access_cmd = { 1786 .offset = htole16(offset), 1787 .length = htole16(length), 1788 .type = htole16(section), 1789 .op_code = IWM_NVM_READ_OPCODE, 1790 }; 1791 struct iwm_nvm_access_resp *nvm_resp; 1792 struct iwm_rx_packet *pkt; 1793 struct iwm_host_cmd cmd = { 1794 .id = IWM_NVM_ACCESS_CMD, 1795 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB | 1796 IWM_CMD_SEND_IN_RFKILL, 1797 .data = { &nvm_access_cmd, }, 1798 }; 1799 int ret, offset_read; 1800 size_t bytes_read; 1801 uint8_t *resp_data; 1802 1803 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd); 1804 1805 ret = iwm_send_cmd(sc, &cmd); 1806 if (ret) { 1807 device_printf(sc->sc_dev, 1808 "Could not send NVM_ACCESS command (error=%d)\n", ret); 1809 return ret; 1810 } 1811 1812 pkt = cmd.resp_pkt; 1813 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) { 1814 device_printf(sc->sc_dev, 1815 "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n", 1816 pkt->hdr.flags); 1817 ret = EIO; 1818 goto exit; 1819 } 1820 1821 /* Extract NVM response */ 1822 nvm_resp = (void *)pkt->data; 1823 1824 ret = le16toh(nvm_resp->status); 1825 bytes_read = le16toh(nvm_resp->length); 1826 offset_read = le16toh(nvm_resp->offset); 1827 resp_data = nvm_resp->data; 1828 if (ret) { 1829 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1830 "NVM access command failed with status %d\n", ret); 1831 ret = EINVAL; 1832 goto exit; 1833 } 1834 1835 if (offset_read != offset) { 1836 device_printf(sc->sc_dev, 1837 "NVM ACCESS response with invalid offset %d\n", 1838 offset_read); 1839 ret = EINVAL; 1840 goto exit; 1841 } 1842 1843 if (bytes_read > length) { 1844 device_printf(sc->sc_dev, 1845 "NVM ACCESS response with too much data " 1846 "(%d bytes requested, %zd bytes received)\n", 1847 length, bytes_read); 1848 ret = EINVAL; 1849 goto exit; 1850 } 1851 1852 memcpy(data + offset, resp_data, bytes_read); 1853 *len = bytes_read; 1854 1855 exit: 1856 iwm_free_resp(sc, &cmd); 1857 return ret; 1858 } 1859 1860 /* 1861 * Reads an NVM section completely. 1862 * NICs prior to 7000 family don't have a real NVM, but just read 1863 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited 1864 * by uCode, we need to manually check in this case that we don't 1865 * overflow and try to read more than the EEPROM size. 1866 * For 7000 family NICs, we supply the maximal size we can read, and 1867 * the uCode fills the response with as much data as we can, 1868 * without overflowing, so no check is needed. 1869 */ 1870 static int 1871 iwm_nvm_read_section(struct iwm_softc *sc, 1872 uint16_t section, uint8_t *data, uint16_t *len, size_t max_len) 1873 { 1874 uint16_t chunklen, seglen; 1875 int error = 0; 1876 1877 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1878 "reading NVM section %d\n", section); 1879 1880 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE; 1881 *len = 0; 1882 1883 /* Read NVM chunks until exhausted (reading less than requested) */ 1884 while (seglen == chunklen && *len < max_len) { 1885 error = iwm_nvm_read_chunk(sc, 1886 section, *len, chunklen, data, &seglen); 1887 if (error) { 1888 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1889 "Cannot read from NVM section " 1890 "%d at offset %d\n", section, *len); 1891 return error; 1892 } 1893 *len += seglen; 1894 } 1895 1896 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1897 "NVM section %d read completed (%d bytes, error=%d)\n", 1898 section, *len, error); 1899 return error; 1900 } 1901 1902 /* NVM offsets (in words) definitions */ 1903 enum iwm_nvm_offsets { 1904 /* NVM HW-Section offset (in words) definitions */ 1905 IWM_HW_ADDR = 0x15, 1906 1907 /* NVM SW-Section offset (in words) definitions */ 1908 IWM_NVM_SW_SECTION = 0x1C0, 1909 IWM_NVM_VERSION = 0, 1910 IWM_RADIO_CFG = 1, 1911 IWM_SKU = 2, 1912 IWM_N_HW_ADDRS = 3, 1913 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION, 1914 1915 /* NVM calibration section offset (in words) definitions */ 1916 IWM_NVM_CALIB_SECTION = 0x2B8, 1917 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION 1918 }; 1919 1920 enum iwm_8000_nvm_offsets { 1921 /* NVM HW-Section offset (in words) definitions */ 1922 IWM_HW_ADDR0_WFPM_8000 = 0x12, 1923 IWM_HW_ADDR1_WFPM_8000 = 0x16, 1924 IWM_HW_ADDR0_PCIE_8000 = 0x8A, 1925 IWM_HW_ADDR1_PCIE_8000 = 0x8E, 1926 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1, 1927 1928 /* NVM SW-Section offset (in words) definitions */ 1929 IWM_NVM_SW_SECTION_8000 = 0x1C0, 1930 IWM_NVM_VERSION_8000 = 0, 1931 IWM_RADIO_CFG_8000 = 0, 1932 IWM_SKU_8000 = 2, 1933 IWM_N_HW_ADDRS_8000 = 3, 1934 1935 /* NVM REGULATORY -Section offset (in words) definitions */ 1936 IWM_NVM_CHANNELS_8000 = 0, 1937 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7, 1938 IWM_NVM_LAR_OFFSET_8000 = 0x507, 1939 IWM_NVM_LAR_ENABLED_8000 = 0x7, 1940 1941 /* NVM calibration section offset (in words) definitions */ 1942 IWM_NVM_CALIB_SECTION_8000 = 0x2B8, 1943 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000 1944 }; 1945 1946 /* SKU Capabilities (actual values from NVM definition) */ 1947 enum nvm_sku_bits { 1948 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0), 1949 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1), 1950 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2), 1951 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3), 1952 }; 1953 1954 /* radio config bits (actual values from NVM definition) */ 1955 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ 1956 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ 1957 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ 1958 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ 1959 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ 1960 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ 1961 1962 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF) 1963 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF) 1964 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF) 1965 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF) 1966 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF) 1967 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF) 1968 1969 #define DEFAULT_MAX_TX_POWER 16 1970 1971 /** 1972 * enum iwm_nvm_channel_flags - channel flags in NVM 1973 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo 1974 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel 1975 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed 1976 * @IWM_NVM_CHANNEL_RADAR: radar detection required 1977 * XXX cannot find this (DFS) flag in iwl-nvm-parse.c 1978 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate 1979 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?) 1980 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) 1981 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?) 1982 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?) 1983 */ 1984 enum iwm_nvm_channel_flags { 1985 IWM_NVM_CHANNEL_VALID = (1 << 0), 1986 IWM_NVM_CHANNEL_IBSS = (1 << 1), 1987 IWM_NVM_CHANNEL_ACTIVE = (1 << 3), 1988 IWM_NVM_CHANNEL_RADAR = (1 << 4), 1989 IWM_NVM_CHANNEL_DFS = (1 << 7), 1990 IWM_NVM_CHANNEL_WIDE = (1 << 8), 1991 IWM_NVM_CHANNEL_40MHZ = (1 << 9), 1992 IWM_NVM_CHANNEL_80MHZ = (1 << 10), 1993 IWM_NVM_CHANNEL_160MHZ = (1 << 11), 1994 }; 1995 1996 /* 1997 * Translate EEPROM flags to net80211. 1998 */ 1999 static uint32_t 2000 iwm_eeprom_channel_flags(uint16_t ch_flags) 2001 { 2002 uint32_t nflags; 2003 2004 nflags = 0; 2005 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0) 2006 nflags |= IEEE80211_CHAN_PASSIVE; 2007 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0) 2008 nflags |= IEEE80211_CHAN_NOADHOC; 2009 if (ch_flags & IWM_NVM_CHANNEL_RADAR) { 2010 nflags |= IEEE80211_CHAN_DFS; 2011 /* Just in case. */ 2012 nflags |= IEEE80211_CHAN_NOADHOC; 2013 } 2014 2015 return (nflags); 2016 } 2017 2018 static void 2019 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[], 2020 int maxchans, int *nchans, int ch_idx, size_t ch_num, 2021 const uint8_t bands[]) 2022 { 2023 const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags; 2024 uint32_t nflags; 2025 uint16_t ch_flags; 2026 uint8_t ieee; 2027 int error; 2028 2029 for (; ch_idx < ch_num; ch_idx++) { 2030 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx); 2031 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) 2032 ieee = iwm_nvm_channels[ch_idx]; 2033 else 2034 ieee = iwm_nvm_channels_8000[ch_idx]; 2035 2036 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) { 2037 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 2038 "Ch. %d Flags %x [%sGHz] - No traffic\n", 2039 ieee, ch_flags, 2040 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 2041 "5.2" : "2.4"); 2042 continue; 2043 } 2044 2045 nflags = iwm_eeprom_channel_flags(ch_flags); 2046 error = ieee80211_add_channel(chans, maxchans, nchans, 2047 ieee, 0, 0, nflags, bands); 2048 if (error != 0) 2049 break; 2050 2051 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 2052 "Ch. %d Flags %x [%sGHz] - Added\n", 2053 ieee, ch_flags, 2054 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 2055 "5.2" : "2.4"); 2056 } 2057 } 2058 2059 static void 2060 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans, 2061 struct ieee80211_channel chans[]) 2062 { 2063 struct iwm_softc *sc = ic->ic_softc; 2064 struct iwm_nvm_data *data = &sc->sc_nvm; 2065 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)]; 2066 size_t ch_num; 2067 2068 memset(bands, 0, sizeof(bands)); 2069 /* 1-13: 11b/g channels. */ 2070 setbit(bands, IEEE80211_MODE_11B); 2071 setbit(bands, IEEE80211_MODE_11G); 2072 iwm_add_channel_band(sc, chans, maxchans, nchans, 0, 2073 IWM_NUM_2GHZ_CHANNELS - 1, bands); 2074 2075 /* 14: 11b channel only. */ 2076 clrbit(bands, IEEE80211_MODE_11G); 2077 iwm_add_channel_band(sc, chans, maxchans, nchans, 2078 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands); 2079 2080 if (data->sku_cap_band_52GHz_enable) { 2081 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) 2082 ch_num = nitems(iwm_nvm_channels); 2083 else 2084 ch_num = nitems(iwm_nvm_channels_8000); 2085 memset(bands, 0, sizeof(bands)); 2086 setbit(bands, IEEE80211_MODE_11A); 2087 iwm_add_channel_band(sc, chans, maxchans, nchans, 2088 IWM_NUM_2GHZ_CHANNELS, ch_num, bands); 2089 } 2090 } 2091 2092 static void 2093 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data, 2094 const uint16_t *mac_override, const uint16_t *nvm_hw) 2095 { 2096 const uint8_t *hw_addr; 2097 2098 if (mac_override) { 2099 static const uint8_t reserved_mac[] = { 2100 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 2101 }; 2102 2103 hw_addr = (const uint8_t *)(mac_override + 2104 IWM_MAC_ADDRESS_OVERRIDE_8000); 2105 2106 /* 2107 * Store the MAC address from MAO section. 2108 * No byte swapping is required in MAO section 2109 */ 2110 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr); 2111 2112 /* 2113 * Force the use of the OTP MAC address in case of reserved MAC 2114 * address in the NVM, or if address is given but invalid. 2115 */ 2116 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) && 2117 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) && 2118 iwm_is_valid_ether_addr(data->hw_addr) && 2119 !IEEE80211_IS_MULTICAST(data->hw_addr)) 2120 return; 2121 2122 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2123 "%s: mac address from nvm override section invalid\n", 2124 __func__); 2125 } 2126 2127 if (nvm_hw) { 2128 /* read the mac address from WFMP registers */ 2129 uint32_t mac_addr0 = 2130 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0)); 2131 uint32_t mac_addr1 = 2132 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1)); 2133 2134 hw_addr = (const uint8_t *)&mac_addr0; 2135 data->hw_addr[0] = hw_addr[3]; 2136 data->hw_addr[1] = hw_addr[2]; 2137 data->hw_addr[2] = hw_addr[1]; 2138 data->hw_addr[3] = hw_addr[0]; 2139 2140 hw_addr = (const uint8_t *)&mac_addr1; 2141 data->hw_addr[4] = hw_addr[1]; 2142 data->hw_addr[5] = hw_addr[0]; 2143 2144 return; 2145 } 2146 2147 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__); 2148 memset(data->hw_addr, 0, sizeof(data->hw_addr)); 2149 } 2150 2151 static int 2152 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2153 const uint16_t *phy_sku) 2154 { 2155 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) 2156 return le16_to_cpup(nvm_sw + IWM_SKU); 2157 2158 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000)); 2159 } 2160 2161 static int 2162 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2163 { 2164 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) 2165 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION); 2166 else 2167 return le32_to_cpup((const uint32_t *)(nvm_sw + 2168 IWM_NVM_VERSION_8000)); 2169 } 2170 2171 static int 2172 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2173 const uint16_t *phy_sku) 2174 { 2175 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) 2176 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG); 2177 2178 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000)); 2179 } 2180 2181 static int 2182 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const const uint16_t *nvm_sw) 2183 { 2184 int n_hw_addr; 2185 2186 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) 2187 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS); 2188 2189 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000)); 2190 2191 return n_hw_addr & IWM_N_HW_ADDR_MASK; 2192 } 2193 2194 static void 2195 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data, 2196 uint32_t radio_cfg) 2197 { 2198 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) { 2199 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg); 2200 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg); 2201 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg); 2202 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg); 2203 return; 2204 } 2205 2206 /* set the radio configuration for family 8000 */ 2207 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg); 2208 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg); 2209 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg); 2210 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg); 2211 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg); 2212 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg); 2213 } 2214 2215 static int 2216 iwm_parse_nvm_data(struct iwm_softc *sc, 2217 const uint16_t *nvm_hw, const uint16_t *nvm_sw, 2218 const uint16_t *nvm_calib, const uint16_t *mac_override, 2219 const uint16_t *phy_sku, const uint16_t *regulatory) 2220 { 2221 struct iwm_nvm_data *data = &sc->sc_nvm; 2222 uint8_t hw_addr[IEEE80211_ADDR_LEN]; 2223 uint32_t sku, radio_cfg; 2224 2225 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw); 2226 2227 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku); 2228 iwm_set_radio_cfg(sc, data, radio_cfg); 2229 2230 sku = iwm_get_sku(sc, nvm_sw, phy_sku); 2231 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ; 2232 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ; 2233 data->sku_cap_11n_enable = 0; 2234 2235 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw); 2236 2237 /* The byte order is little endian 16 bit, meaning 214365 */ 2238 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) { 2239 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR); 2240 data->hw_addr[0] = hw_addr[1]; 2241 data->hw_addr[1] = hw_addr[0]; 2242 data->hw_addr[2] = hw_addr[3]; 2243 data->hw_addr[3] = hw_addr[2]; 2244 data->hw_addr[4] = hw_addr[5]; 2245 data->hw_addr[5] = hw_addr[4]; 2246 } else { 2247 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw); 2248 } 2249 2250 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) { 2251 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS], 2252 IWM_NUM_CHANNELS * sizeof(uint16_t)); 2253 } else { 2254 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000], 2255 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t)); 2256 } 2257 data->calib_version = 255; /* TODO: 2258 this value will prevent some checks from 2259 failing, we need to check if this 2260 field is still needed, and if it does, 2261 where is it in the NVM */ 2262 2263 return 0; 2264 } 2265 2266 static int 2267 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections) 2268 { 2269 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; 2270 2271 /* Checking for required sections */ 2272 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) { 2273 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2274 !sections[IWM_NVM_SECTION_TYPE_HW].data) { 2275 device_printf(sc->sc_dev, 2276 "Can't parse empty OTP/NVM sections\n"); 2277 return ENOENT; 2278 } 2279 2280 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data; 2281 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) { 2282 /* SW and REGULATORY sections are mandatory */ 2283 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2284 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) { 2285 device_printf(sc->sc_dev, 2286 "Can't parse empty OTP/NVM sections\n"); 2287 return ENOENT; 2288 } 2289 /* MAC_OVERRIDE or at least HW section must exist */ 2290 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data && 2291 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) { 2292 device_printf(sc->sc_dev, 2293 "Can't parse mac_address, empty sections\n"); 2294 return ENOENT; 2295 } 2296 2297 /* PHY_SKU section is mandatory in B0 */ 2298 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) { 2299 device_printf(sc->sc_dev, 2300 "Can't parse phy_sku in B0, empty sections\n"); 2301 return ENOENT; 2302 } 2303 2304 hw = (const uint16_t *) 2305 sections[IWM_NVM_SECTION_TYPE_HW_8000].data; 2306 } else { 2307 panic("unknown device family %d\n", sc->sc_device_family); 2308 } 2309 2310 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data; 2311 calib = (const uint16_t *) 2312 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data; 2313 regulatory = (const uint16_t *) 2314 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data; 2315 mac_override = (const uint16_t *) 2316 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data; 2317 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data; 2318 2319 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override, 2320 phy_sku, regulatory); 2321 } 2322 2323 static int 2324 iwm_nvm_init(struct iwm_softc *sc) 2325 { 2326 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS]; 2327 int i, section, error; 2328 uint16_t len; 2329 uint8_t *buf; 2330 const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE; 2331 2332 memset(nvm_sections, 0 , sizeof(nvm_sections)); 2333 2334 buf = kmalloc(bufsz, M_DEVBUF, M_INTWAIT); 2335 if (buf == NULL) 2336 return ENOMEM; 2337 2338 for (i = 0; i < nitems(nvm_to_read); i++) { 2339 section = nvm_to_read[i]; 2340 KKASSERT(section <= nitems(nvm_sections)); 2341 2342 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz); 2343 if (error) { 2344 error = 0; 2345 continue; 2346 } 2347 nvm_sections[section].data = kmalloc(len, M_DEVBUF, M_INTWAIT); 2348 if (nvm_sections[section].data == NULL) { 2349 error = ENOMEM; 2350 break; 2351 } 2352 memcpy(nvm_sections[section].data, buf, len); 2353 nvm_sections[section].length = len; 2354 } 2355 kfree(buf, M_DEVBUF); 2356 if (error) 2357 return error; 2358 2359 return iwm_parse_nvm_sections(sc, nvm_sections); 2360 } 2361 2362 /* 2363 * Firmware loading gunk. This is kind of a weird hybrid between the 2364 * iwn driver and the Linux iwlwifi driver. 2365 */ 2366 2367 static int 2368 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr, 2369 const uint8_t *section, uint32_t byte_cnt) 2370 { 2371 int error = EINVAL; 2372 uint32_t chunk_sz, offset; 2373 2374 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt); 2375 2376 for (offset = 0; offset < byte_cnt; offset += chunk_sz) { 2377 uint32_t addr, len; 2378 const uint8_t *data; 2379 2380 addr = dst_addr + offset; 2381 len = MIN(chunk_sz, byte_cnt - offset); 2382 data = section + offset; 2383 2384 error = iwm_firmware_load_chunk(sc, addr, data, len); 2385 if (error) 2386 break; 2387 } 2388 2389 return error; 2390 } 2391 2392 static int 2393 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr, 2394 const uint8_t *chunk, uint32_t byte_cnt) 2395 { 2396 struct iwm_dma_info *dma = &sc->fw_dma; 2397 int error; 2398 2399 /* Copy firmware chunk into pre-allocated DMA-safe memory. */ 2400 memcpy(dma->vaddr, chunk, byte_cnt); 2401 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 2402 2403 if (dst_addr >= IWM_FW_MEM_EXTENDED_START && 2404 dst_addr <= IWM_FW_MEM_EXTENDED_END) { 2405 iwm_set_bits_prph(sc, IWM_LMPM_CHICK, 2406 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2407 } 2408 2409 sc->sc_fw_chunk_done = 0; 2410 2411 if (!iwm_nic_lock(sc)) 2412 return EBUSY; 2413 2414 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2415 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 2416 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL), 2417 dst_addr); 2418 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL), 2419 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 2420 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL), 2421 (iwm_get_dma_hi_addr(dma->paddr) 2422 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 2423 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL), 2424 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 2425 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 2426 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 2427 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2428 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 2429 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 2430 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 2431 2432 iwm_nic_unlock(sc); 2433 2434 /* wait 1s for this segment to load */ 2435 error = 0; 2436 while (!sc->sc_fw_chunk_done) { 2437 #if defined(__DragonFly__) 2438 error = iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz); 2439 #else 2440 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz); 2441 #endif 2442 if (error) 2443 break; 2444 } 2445 2446 if (!sc->sc_fw_chunk_done) { 2447 device_printf(sc->sc_dev, 2448 "fw chunk addr 0x%x len %d failed to load\n", 2449 dst_addr, byte_cnt); 2450 } 2451 2452 if (dst_addr >= IWM_FW_MEM_EXTENDED_START && 2453 dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) { 2454 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK, 2455 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2456 iwm_nic_unlock(sc); 2457 } 2458 2459 return error; 2460 } 2461 2462 int 2463 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws, 2464 int cpu, int *first_ucode_section) 2465 { 2466 int shift_param; 2467 int i, error = 0, sec_num = 0x1; 2468 uint32_t val, last_read_idx = 0; 2469 const void *data; 2470 uint32_t dlen; 2471 uint32_t offset; 2472 2473 if (cpu == 1) { 2474 shift_param = 0; 2475 *first_ucode_section = 0; 2476 } else { 2477 shift_param = 16; 2478 (*first_ucode_section)++; 2479 } 2480 2481 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) { 2482 last_read_idx = i; 2483 data = fws->fw_sect[i].fws_data; 2484 dlen = fws->fw_sect[i].fws_len; 2485 offset = fws->fw_sect[i].fws_devoff; 2486 2487 /* 2488 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2489 * CPU1 to CPU2. 2490 * PAGING_SEPARATOR_SECTION delimiter - separate between 2491 * CPU2 non paged to CPU2 paging sec. 2492 */ 2493 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2494 offset == IWM_PAGING_SEPARATOR_SECTION) 2495 break; 2496 2497 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2498 "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n", 2499 i, offset, dlen, cpu); 2500 2501 if (dlen > sc->sc_fwdmasegsz) { 2502 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2503 "chunk %d too large (%d bytes)\n", i, dlen); 2504 error = EFBIG; 2505 } else { 2506 error = iwm_firmware_load_sect(sc, offset, data, dlen); 2507 } 2508 if (error) { 2509 device_printf(sc->sc_dev, 2510 "could not load firmware chunk %d (error %d)\n", 2511 i, error); 2512 return error; 2513 } 2514 2515 /* Notify the ucode of the loaded section number and status */ 2516 if (iwm_nic_lock(sc)) { 2517 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS); 2518 val = val | (sec_num << shift_param); 2519 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val); 2520 sec_num = (sec_num << 1) | 0x1; 2521 iwm_nic_unlock(sc); 2522 2523 /* 2524 * The firmware won't load correctly without this delay. 2525 */ 2526 DELAY(8000); 2527 } 2528 } 2529 2530 *first_ucode_section = last_read_idx; 2531 2532 if (iwm_nic_lock(sc)) { 2533 if (cpu == 1) 2534 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF); 2535 else 2536 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); 2537 iwm_nic_unlock(sc); 2538 } 2539 2540 return 0; 2541 } 2542 2543 int 2544 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) 2545 { 2546 struct iwm_fw_sects *fws; 2547 int error = 0; 2548 int first_ucode_section; 2549 2550 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n", 2551 ucode_type); 2552 2553 fws = &sc->sc_fw.fw_sects[ucode_type]; 2554 2555 /* configure the ucode to be ready to get the secured image */ 2556 /* release CPU reset */ 2557 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT); 2558 2559 /* load to FW the binary Secured sections of CPU1 */ 2560 error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section); 2561 if (error) 2562 return error; 2563 2564 /* load to FW the binary sections of CPU2 */ 2565 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section); 2566 } 2567 2568 static int 2569 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) 2570 { 2571 struct iwm_fw_sects *fws; 2572 int error, i; 2573 const void *data; 2574 uint32_t dlen; 2575 uint32_t offset; 2576 2577 sc->sc_uc.uc_intr = 0; 2578 2579 fws = &sc->sc_fw.fw_sects[ucode_type]; 2580 for (i = 0; i < fws->fw_count; i++) { 2581 data = fws->fw_sect[i].fws_data; 2582 dlen = fws->fw_sect[i].fws_len; 2583 offset = fws->fw_sect[i].fws_devoff; 2584 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 2585 "LOAD FIRMWARE type %d offset %u len %d\n", 2586 ucode_type, offset, dlen); 2587 if (dlen > sc->sc_fwdmasegsz) { 2588 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 2589 "chunk %d too large (%d bytes)\n", i, dlen); 2590 error = EFBIG; 2591 } else { 2592 error = iwm_firmware_load_sect(sc, offset, data, dlen); 2593 } 2594 if (error) { 2595 device_printf(sc->sc_dev, 2596 "could not load firmware chunk %u of %u " 2597 "(error=%d)\n", i, fws->fw_count, error); 2598 return error; 2599 } 2600 } 2601 2602 IWM_WRITE(sc, IWM_CSR_RESET, 0); 2603 2604 return 0; 2605 } 2606 2607 static int 2608 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) 2609 { 2610 int error, w; 2611 2612 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) 2613 error = iwm_load_firmware_8000(sc, ucode_type); 2614 else 2615 error = iwm_load_firmware_7000(sc, ucode_type); 2616 if (error) 2617 return error; 2618 2619 /* wait for the firmware to load */ 2620 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) { 2621 #if defined(__DragonFly__) 2622 error = iwmsleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10); 2623 #else 2624 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10); 2625 #endif 2626 } 2627 if (error || !sc->sc_uc.uc_ok) { 2628 device_printf(sc->sc_dev, "could not load firmware\n"); 2629 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) { 2630 device_printf(sc->sc_dev, "cpu1 status: 0x%x\n", 2631 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS)); 2632 device_printf(sc->sc_dev, "cpu2 status: 0x%x\n", 2633 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS)); 2634 } 2635 } 2636 2637 /* 2638 * Give the firmware some time to initialize. 2639 * Accessing it too early causes errors. 2640 */ 2641 iwmsleep(&w, &sc->sc_lk, 0, "iwmfwinit", hz); 2642 2643 return error; 2644 } 2645 2646 static int 2647 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) 2648 { 2649 int error; 2650 2651 IWM_WRITE(sc, IWM_CSR_INT, ~0); 2652 2653 if ((error = iwm_nic_init(sc)) != 0) { 2654 device_printf(sc->sc_dev, "unable to init nic\n"); 2655 return error; 2656 } 2657 2658 /* make sure rfkill handshake bits are cleared */ 2659 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2660 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, 2661 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2662 2663 /* clear (again), then enable host interrupts */ 2664 IWM_WRITE(sc, IWM_CSR_INT, ~0); 2665 iwm_enable_interrupts(sc); 2666 2667 /* really make sure rfkill handshake bits are cleared */ 2668 /* maybe we should write a few times more? just to make sure */ 2669 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2670 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2671 2672 /* Load the given image to the HW */ 2673 return iwm_load_firmware(sc, ucode_type); 2674 } 2675 2676 static int 2677 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant) 2678 { 2679 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = { 2680 .valid = htole32(valid_tx_ant), 2681 }; 2682 2683 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 2684 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd); 2685 } 2686 2687 static int 2688 iwm_send_phy_cfg_cmd(struct iwm_softc *sc) 2689 { 2690 struct iwm_phy_cfg_cmd phy_cfg_cmd; 2691 enum iwm_ucode_type ucode_type = sc->sc_uc_current; 2692 2693 /* Set parameters */ 2694 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config); 2695 phy_cfg_cmd.calib_control.event_trigger = 2696 sc->sc_default_calib[ucode_type].event_trigger; 2697 phy_cfg_cmd.calib_control.flow_trigger = 2698 sc->sc_default_calib[ucode_type].flow_trigger; 2699 2700 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET, 2701 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg); 2702 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC, 2703 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 2704 } 2705 2706 static int 2707 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc, 2708 enum iwm_ucode_type ucode_type) 2709 { 2710 enum iwm_ucode_type old_type = sc->sc_uc_current; 2711 int error; 2712 2713 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) { 2714 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n", 2715 error); 2716 return error; 2717 } 2718 2719 sc->sc_uc_current = ucode_type; 2720 error = iwm_start_fw(sc, ucode_type); 2721 if (error) { 2722 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error); 2723 sc->sc_uc_current = old_type; 2724 return error; 2725 } 2726 2727 error = iwm_post_alive(sc); 2728 if (error) { 2729 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error); 2730 } 2731 return error; 2732 } 2733 2734 /* 2735 * mvm misc bits 2736 */ 2737 2738 static int 2739 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm) 2740 { 2741 int error; 2742 2743 /* do not operate with rfkill switch turned on */ 2744 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) { 2745 device_printf(sc->sc_dev, 2746 "radio is disabled by hardware switch\n"); 2747 return EPERM; 2748 } 2749 2750 sc->sc_init_complete = 0; 2751 if ((error = iwm_mvm_load_ucode_wait_alive(sc, 2752 IWM_UCODE_TYPE_INIT)) != 0) { 2753 device_printf(sc->sc_dev, "failed to load init firmware\n"); 2754 return error; 2755 } 2756 2757 if (justnvm) { 2758 if ((error = iwm_nvm_init(sc)) != 0) { 2759 device_printf(sc->sc_dev, "failed to read nvm\n"); 2760 return error; 2761 } 2762 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr); 2763 2764 return 0; 2765 } 2766 2767 if ((error = iwm_send_bt_init_conf(sc)) != 0) { 2768 device_printf(sc->sc_dev, 2769 "failed to send bt coex configuration: %d\n", error); 2770 return error; 2771 } 2772 2773 /* Init Smart FIFO. */ 2774 error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF); 2775 if (error != 0) 2776 return error; 2777 2778 /* Send TX valid antennas before triggering calibrations */ 2779 if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) { 2780 device_printf(sc->sc_dev, 2781 "failed to send antennas before calibration: %d\n", error); 2782 return error; 2783 } 2784 2785 /* 2786 * Send phy configurations command to init uCode 2787 * to start the 16.0 uCode init image internal calibrations. 2788 */ 2789 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) { 2790 device_printf(sc->sc_dev, 2791 "%s: failed to run internal calibration: %d\n", 2792 __func__, error); 2793 return error; 2794 } 2795 2796 /* 2797 * Nothing to do but wait for the init complete notification 2798 * from the firmware 2799 */ 2800 while (!sc->sc_init_complete) { 2801 #if defined(__DragonFly__) 2802 error = iwmsleep(&sc->sc_init_complete, &sc->sc_lk, 2803 0, "iwminit", 2*hz); 2804 #else 2805 error = msleep(&sc->sc_init_complete, &sc->sc_mtx, 2806 0, "iwminit", 2*hz); 2807 #endif 2808 if (error) { 2809 device_printf(sc->sc_dev, "init complete failed: %d\n", 2810 sc->sc_init_complete); 2811 break; 2812 } 2813 } 2814 2815 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n", 2816 sc->sc_init_complete ? "" : "not "); 2817 2818 return error; 2819 } 2820 2821 /* 2822 * receive side 2823 */ 2824 2825 /* (re)stock rx ring, called at init-time and at runtime */ 2826 static int 2827 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx) 2828 { 2829 struct iwm_rx_ring *ring = &sc->rxq; 2830 struct iwm_rx_data *data = &ring->data[idx]; 2831 struct mbuf *m; 2832 bus_dmamap_t dmamap = NULL; 2833 int error; 2834 bus_addr_t paddr; 2835 2836 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE); 2837 if (m == NULL) 2838 return ENOBUFS; 2839 2840 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2841 error = bus_dmamap_load(ring->data_dmat, ring->spare_map, 2842 mtod(m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr, 2843 &paddr, BUS_DMA_NOWAIT); 2844 if (error != 0 && error != EFBIG) { 2845 device_printf(sc->sc_dev, 2846 "%s: can't map mbuf, error %d\n", __func__, error); 2847 goto fail; 2848 } 2849 2850 if (data->m != NULL) 2851 bus_dmamap_unload(ring->data_dmat, data->map); 2852 2853 /* Swap ring->spare_map with data->map */ 2854 dmamap = data->map; 2855 data->map = ring->spare_map; 2856 ring->spare_map = dmamap; 2857 2858 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 2859 data->m = m; 2860 2861 /* Update RX descriptor. */ 2862 KKASSERT((paddr & 255) == 0); 2863 ring->desc[idx] = htole32(paddr >> 8); 2864 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2865 BUS_DMASYNC_PREWRITE); 2866 2867 return 0; 2868 fail: 2869 m_freem(m); 2870 return error; 2871 } 2872 2873 #define IWM_RSSI_OFFSET 50 2874 static int 2875 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info) 2876 { 2877 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm; 2878 uint32_t agc_a, agc_b; 2879 uint32_t val; 2880 2881 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]); 2882 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS; 2883 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS; 2884 2885 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]); 2886 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS; 2887 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS; 2888 2889 /* 2890 * dBm = rssi dB - agc dB - constant. 2891 * Higher AGC (higher radio gain) means lower signal. 2892 */ 2893 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a; 2894 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b; 2895 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm); 2896 2897 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 2898 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n", 2899 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b); 2900 2901 return max_rssi_dbm; 2902 } 2903 2904 /* 2905 * iwm_mvm_get_signal_strength - use new rx PHY INFO API 2906 * values are reported by the fw as positive values - need to negate 2907 * to obtain their dBM. Account for missing antennas by replacing 0 2908 * values by -256dBm: practically 0 power and a non-feasible 8 bit value. 2909 */ 2910 static int 2911 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info) 2912 { 2913 int energy_a, energy_b, energy_c, max_energy; 2914 uint32_t val; 2915 2916 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]); 2917 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >> 2918 IWM_RX_INFO_ENERGY_ANT_A_POS; 2919 energy_a = energy_a ? -energy_a : -256; 2920 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >> 2921 IWM_RX_INFO_ENERGY_ANT_B_POS; 2922 energy_b = energy_b ? -energy_b : -256; 2923 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >> 2924 IWM_RX_INFO_ENERGY_ANT_C_POS; 2925 energy_c = energy_c ? -energy_c : -256; 2926 max_energy = MAX(energy_a, energy_b); 2927 max_energy = MAX(max_energy, energy_c); 2928 2929 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 2930 "energy In A %d B %d C %d , and max %d\n", 2931 energy_a, energy_b, energy_c, max_energy); 2932 2933 return max_energy; 2934 } 2935 2936 static void 2937 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, 2938 struct iwm_rx_packet *pkt, struct iwm_rx_data *data) 2939 { 2940 struct iwm_rx_phy_info *phy_info = (void *)pkt->data; 2941 2942 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n"); 2943 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2944 2945 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info)); 2946 } 2947 2948 /* 2949 * Retrieve the average noise (in dBm) among receivers. 2950 */ 2951 static int 2952 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats) 2953 { 2954 int i, total, nbant, noise; 2955 2956 total = nbant = noise = 0; 2957 for (i = 0; i < 3; i++) { 2958 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff; 2959 if (noise) { 2960 total += noise; 2961 nbant++; 2962 } 2963 } 2964 2965 /* There should be at least one antenna but check anyway. */ 2966 return (nbant == 0) ? -127 : (total / nbant) - 107; 2967 } 2968 2969 /* 2970 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler 2971 * 2972 * Handles the actual data of the Rx packet from the fw 2973 */ 2974 static void 2975 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, 2976 struct iwm_rx_packet *pkt, struct iwm_rx_data *data) 2977 { 2978 struct ieee80211com *ic = &sc->sc_ic; 2979 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2980 struct ieee80211_frame *wh; 2981 struct ieee80211_node *ni; 2982 struct ieee80211_rx_stats rxs; 2983 struct mbuf *m; 2984 struct iwm_rx_phy_info *phy_info; 2985 struct iwm_rx_mpdu_res_start *rx_res; 2986 uint32_t len; 2987 uint32_t rx_pkt_status; 2988 int rssi; 2989 2990 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2991 2992 phy_info = &sc->sc_last_phy_info; 2993 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data; 2994 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res)); 2995 len = le16toh(rx_res->byte_count); 2996 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len)); 2997 2998 m = data->m; 2999 m->m_data = pkt->data + sizeof(*rx_res); 3000 m->m_pkthdr.len = m->m_len = len; 3001 3002 if (__predict_false(phy_info->cfg_phy_cnt > 20)) { 3003 device_printf(sc->sc_dev, 3004 "dsp size out of range [0,20]: %d\n", 3005 phy_info->cfg_phy_cnt); 3006 return; 3007 } 3008 3009 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) || 3010 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) { 3011 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3012 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); 3013 return; /* drop */ 3014 } 3015 3016 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) { 3017 rssi = iwm_mvm_get_signal_strength(sc, phy_info); 3018 } else { 3019 rssi = iwm_mvm_calc_rssi(sc, phy_info); 3020 } 3021 rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */ 3022 rssi = MIN(rssi, sc->sc_max_rssi); /* clip to max. 100% */ 3023 3024 /* replenish ring for the buffer we're going to feed to the sharks */ 3025 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) { 3026 device_printf(sc->sc_dev, "%s: unable to add more buffers\n", 3027 __func__); 3028 return; 3029 } 3030 3031 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 3032 3033 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3034 "%s: phy_info: channel=%d, flags=0x%08x\n", 3035 __func__, 3036 le16toh(phy_info->channel), 3037 le16toh(phy_info->phy_flags)); 3038 3039 /* 3040 * Populate an RX state struct with the provided information. 3041 */ 3042 bzero(&rxs, sizeof(rxs)); 3043 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 3044 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 3045 rxs.c_ieee = le16toh(phy_info->channel); 3046 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) { 3047 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ); 3048 } else { 3049 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ); 3050 } 3051 rxs.rssi = rssi - sc->sc_noise; 3052 rxs.nf = sc->sc_noise; 3053 3054 if (ieee80211_radiotap_active_vap(vap)) { 3055 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap; 3056 3057 tap->wr_flags = 0; 3058 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE)) 3059 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3060 tap->wr_chan_freq = htole16(rxs.c_freq); 3061 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */ 3062 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); 3063 tap->wr_dbm_antsignal = (int8_t)rssi; 3064 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 3065 tap->wr_tsft = phy_info->system_timestamp; 3066 switch (phy_info->rate) { 3067 /* CCK rates. */ 3068 case 10: tap->wr_rate = 2; break; 3069 case 20: tap->wr_rate = 4; break; 3070 case 55: tap->wr_rate = 11; break; 3071 case 110: tap->wr_rate = 22; break; 3072 /* OFDM rates. */ 3073 case 0xd: tap->wr_rate = 12; break; 3074 case 0xf: tap->wr_rate = 18; break; 3075 case 0x5: tap->wr_rate = 24; break; 3076 case 0x7: tap->wr_rate = 36; break; 3077 case 0x9: tap->wr_rate = 48; break; 3078 case 0xb: tap->wr_rate = 72; break; 3079 case 0x1: tap->wr_rate = 96; break; 3080 case 0x3: tap->wr_rate = 108; break; 3081 /* Unknown rate: should not happen. */ 3082 default: tap->wr_rate = 0; 3083 } 3084 } 3085 3086 IWM_UNLOCK(sc); 3087 if (ni != NULL) { 3088 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m); 3089 ieee80211_input_mimo(ni, m, &rxs); 3090 ieee80211_free_node(ni); 3091 } else { 3092 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m); 3093 ieee80211_input_mimo_all(ic, m, &rxs); 3094 } 3095 IWM_LOCK(sc); 3096 } 3097 3098 static int 3099 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt, 3100 struct iwm_node *in) 3101 { 3102 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data; 3103 struct ieee80211_node *ni = &in->in_ni; 3104 struct ieee80211vap *vap = ni->ni_vap; 3105 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK; 3106 int failack = tx_resp->failure_frame; 3107 3108 KASSERT(tx_resp->frame_count == 1, ("too many frames")); 3109 3110 /* Update rate control statistics. */ 3111 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n", 3112 __func__, 3113 (int) le16toh(tx_resp->status.status), 3114 (int) le16toh(tx_resp->status.sequence), 3115 tx_resp->frame_count, 3116 tx_resp->bt_kill_count, 3117 tx_resp->failure_rts, 3118 tx_resp->failure_frame, 3119 le32toh(tx_resp->initial_rate), 3120 (int) le16toh(tx_resp->wireless_media_time)); 3121 3122 if (status != IWM_TX_STATUS_SUCCESS && 3123 status != IWM_TX_STATUS_DIRECT_DONE) { 3124 ieee80211_ratectl_tx_complete(vap, ni, 3125 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL); 3126 return (1); 3127 } else { 3128 ieee80211_ratectl_tx_complete(vap, ni, 3129 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL); 3130 return (0); 3131 } 3132 } 3133 3134 static void 3135 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, 3136 struct iwm_rx_packet *pkt, struct iwm_rx_data *data) 3137 { 3138 struct iwm_cmd_header *cmd_hdr = &pkt->hdr; 3139 int idx = cmd_hdr->idx; 3140 int qid = cmd_hdr->qid; 3141 struct iwm_tx_ring *ring = &sc->txq[qid]; 3142 struct iwm_tx_data *txd = &ring->data[idx]; 3143 struct iwm_node *in = txd->in; 3144 struct mbuf *m = txd->m; 3145 int status; 3146 3147 KASSERT(txd->done == 0, ("txd not done")); 3148 KASSERT(txd->in != NULL, ("txd without node")); 3149 KASSERT(txd->m != NULL, ("txd without mbuf")); 3150 3151 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3152 3153 sc->sc_tx_timer = 0; 3154 3155 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in); 3156 3157 /* Unmap and free mbuf. */ 3158 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE); 3159 bus_dmamap_unload(ring->data_dmat, txd->map); 3160 3161 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3162 "free txd %p, in %p\n", txd, txd->in); 3163 txd->done = 1; 3164 txd->m = NULL; 3165 txd->in = NULL; 3166 3167 ieee80211_tx_complete(&in->in_ni, m, status); 3168 3169 if (--ring->queued < IWM_TX_RING_LOMARK) { 3170 sc->qfullmsk &= ~(1 << ring->qid); 3171 if (sc->qfullmsk == 0) { 3172 /* 3173 * Well, we're in interrupt context, but then again 3174 * I guess net80211 does all sorts of stunts in 3175 * interrupt context, so maybe this is no biggie. 3176 */ 3177 iwm_start(sc); 3178 } 3179 } 3180 } 3181 3182 /* 3183 * transmit side 3184 */ 3185 3186 /* 3187 * Process a "command done" firmware notification. This is where we wakeup 3188 * processes waiting for a synchronous command completion. 3189 * from if_iwn 3190 */ 3191 static void 3192 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3193 { 3194 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE]; 3195 struct iwm_tx_data *data; 3196 3197 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) { 3198 return; /* Not a command ack. */ 3199 } 3200 3201 data = &ring->data[pkt->hdr.idx]; 3202 3203 /* If the command was mapped in an mbuf, free it. */ 3204 if (data->m != NULL) { 3205 bus_dmamap_sync(ring->data_dmat, data->map, 3206 BUS_DMASYNC_POSTWRITE); 3207 bus_dmamap_unload(ring->data_dmat, data->map); 3208 m_freem(data->m); 3209 data->m = NULL; 3210 } 3211 wakeup(&ring->desc[pkt->hdr.idx]); 3212 } 3213 3214 #if 0 3215 /* 3216 * necessary only for block ack mode 3217 */ 3218 void 3219 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id, 3220 uint16_t len) 3221 { 3222 struct iwm_agn_scd_bc_tbl *scd_bc_tbl; 3223 uint16_t w_val; 3224 3225 scd_bc_tbl = sc->sched_dma.vaddr; 3226 3227 len += 8; /* magic numbers came naturally from paris */ 3228 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE) 3229 len = roundup(len, 4) / 4; 3230 3231 w_val = htole16(sta_id << 12 | len); 3232 3233 /* Update TX scheduler. */ 3234 scd_bc_tbl[qid].tfd_offset[idx] = w_val; 3235 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3236 BUS_DMASYNC_PREWRITE); 3237 3238 /* I really wonder what this is ?!? */ 3239 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) { 3240 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val; 3241 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3242 BUS_DMASYNC_PREWRITE); 3243 } 3244 } 3245 #endif 3246 3247 /* 3248 * Take an 802.11 (non-n) rate, find the relevant rate 3249 * table entry. return the index into in_ridx[]. 3250 * 3251 * The caller then uses that index back into in_ridx 3252 * to figure out the rate index programmed /into/ 3253 * the firmware for this given node. 3254 */ 3255 static int 3256 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in, 3257 uint8_t rate) 3258 { 3259 int i; 3260 uint8_t r; 3261 3262 for (i = 0; i < nitems(in->in_ridx); i++) { 3263 r = iwm_rates[in->in_ridx[i]].rate; 3264 if (rate == r) 3265 return (i); 3266 } 3267 /* XXX Return the first */ 3268 /* XXX TODO: have it return the /lowest/ */ 3269 return (0); 3270 } 3271 3272 /* 3273 * Fill in the rate related information for a transmit command. 3274 */ 3275 static const struct iwm_rate * 3276 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in, 3277 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx) 3278 { 3279 struct ieee80211com *ic = &sc->sc_ic; 3280 struct ieee80211_node *ni = &in->in_ni; 3281 const struct iwm_rate *rinfo; 3282 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3283 int ridx, rate_flags; 3284 3285 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT; 3286 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY; 3287 3288 /* 3289 * XXX TODO: everything about the rate selection here is terrible! 3290 */ 3291 3292 if (type == IEEE80211_FC0_TYPE_DATA) { 3293 int i; 3294 /* for data frames, use RS table */ 3295 (void) ieee80211_ratectl_rate(ni, NULL, 0); 3296 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate); 3297 ridx = in->in_ridx[i]; 3298 3299 /* This is the index into the programmed table */ 3300 tx->initial_rate_index = i; 3301 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE); 3302 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3303 "%s: start with i=%d, txrate %d\n", 3304 __func__, i, iwm_rates[ridx].rate); 3305 } else { 3306 /* 3307 * For non-data, use the lowest supported rate for the given 3308 * operational mode. 3309 * 3310 * Note: there may not be any rate control information available. 3311 * This driver currently assumes if we're transmitting data 3312 * frames, use the rate control table. Grr. 3313 * 3314 * XXX TODO: use the configured rate for the traffic type! 3315 * XXX TODO: this should be per-vap, not curmode; as we later 3316 * on we'll want to handle off-channel stuff (eg TDLS). 3317 */ 3318 if (ic->ic_curmode == IEEE80211_MODE_11A) { 3319 /* 3320 * XXX this assumes the mode is either 11a or not 11a; 3321 * definitely won't work for 11n. 3322 */ 3323 ridx = IWM_RIDX_OFDM; 3324 } else { 3325 ridx = IWM_RIDX_CCK; 3326 } 3327 } 3328 3329 rinfo = &iwm_rates[ridx]; 3330 3331 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n", 3332 __func__, ridx, 3333 rinfo->rate, 3334 !! (IWM_RIDX_IS_CCK(ridx)) 3335 ); 3336 3337 /* XXX TODO: hard-coded TX antenna? */ 3338 rate_flags = 1 << IWM_RATE_MCS_ANT_POS; 3339 if (IWM_RIDX_IS_CCK(ridx)) 3340 rate_flags |= IWM_RATE_MCS_CCK_MSK; 3341 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp); 3342 3343 return rinfo; 3344 } 3345 3346 #define TB0_SIZE 16 3347 static int 3348 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 3349 { 3350 struct ieee80211com *ic = &sc->sc_ic; 3351 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3352 struct iwm_node *in = IWM_NODE(ni); 3353 struct iwm_tx_ring *ring; 3354 struct iwm_tx_data *data; 3355 struct iwm_tfd *desc; 3356 struct iwm_device_cmd *cmd; 3357 struct iwm_tx_cmd *tx; 3358 struct ieee80211_frame *wh; 3359 struct ieee80211_key *k = NULL; 3360 const struct iwm_rate *rinfo; 3361 uint32_t flags; 3362 u_int hdrlen; 3363 struct mbuf *m_defragged; 3364 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER]; 3365 int nsegs; 3366 uint8_t tid, type; 3367 int i, totlen, error, pad; 3368 3369 wh = mtod(m, struct ieee80211_frame *); 3370 hdrlen = ieee80211_anyhdrsize(wh); 3371 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3372 tid = 0; 3373 ring = &sc->txq[ac]; 3374 desc = &ring->desc[ring->cur]; 3375 memset(desc, 0, sizeof(*desc)); 3376 data = &ring->data[ring->cur]; 3377 3378 /* Fill out iwm_tx_cmd to send to the firmware */ 3379 cmd = &ring->cmd[ring->cur]; 3380 cmd->hdr.code = IWM_TX_CMD; 3381 cmd->hdr.flags = 0; 3382 cmd->hdr.qid = ring->qid; 3383 cmd->hdr.idx = ring->cur; 3384 3385 tx = (void *)cmd->data; 3386 memset(tx, 0, sizeof(*tx)); 3387 3388 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx); 3389 3390 /* Encrypt the frame if need be. */ 3391 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 3392 /* Retrieve key for TX && do software encryption. */ 3393 k = ieee80211_crypto_encap(ni, m); 3394 if (k == NULL) { 3395 m_freem(m); 3396 return (ENOBUFS); 3397 } 3398 /* 802.11 header may have moved. */ 3399 wh = mtod(m, struct ieee80211_frame *); 3400 } 3401 3402 if (ieee80211_radiotap_active_vap(vap)) { 3403 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap; 3404 3405 tap->wt_flags = 0; 3406 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 3407 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 3408 tap->wt_rate = rinfo->rate; 3409 if (k != NULL) 3410 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3411 ieee80211_radiotap_tx(vap, m); 3412 } 3413 3414 3415 totlen = m->m_pkthdr.len; 3416 3417 flags = 0; 3418 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3419 flags |= IWM_TX_CMD_FLG_ACK; 3420 } 3421 3422 if (type != IEEE80211_FC0_TYPE_DATA 3423 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) 3424 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3425 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE; 3426 } 3427 3428 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3429 type != IEEE80211_FC0_TYPE_DATA) 3430 tx->sta_id = sc->sc_aux_sta.sta_id; 3431 else 3432 tx->sta_id = IWM_STATION_ID; 3433 3434 if (type == IEEE80211_FC0_TYPE_MGT) { 3435 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3436 3437 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3438 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3439 tx->pm_frame_timeout = htole16(3); 3440 else 3441 tx->pm_frame_timeout = htole16(2); 3442 } else { 3443 tx->pm_frame_timeout = htole16(0); 3444 } 3445 3446 if (hdrlen & 3) { 3447 /* First segment length must be a multiple of 4. */ 3448 flags |= IWM_TX_CMD_FLG_MH_PAD; 3449 pad = 4 - (hdrlen & 3); 3450 } else 3451 pad = 0; 3452 3453 tx->driver_txop = 0; 3454 tx->next_frame_len = 0; 3455 3456 tx->len = htole16(totlen); 3457 tx->tid_tspec = tid; 3458 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE); 3459 3460 /* Set physical address of "scratch area". */ 3461 tx->dram_lsb_ptr = htole32(data->scratch_paddr); 3462 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr); 3463 3464 /* Copy 802.11 header in TX command. */ 3465 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen); 3466 3467 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL; 3468 3469 tx->sec_ctl = 0; 3470 tx->tx_flags |= htole32(flags); 3471 3472 /* Trim 802.11 header. */ 3473 m_adj(m, hdrlen); 3474 #if defined(__DragonFly__) 3475 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m, 3476 segs, IWM_MAX_SCATTER - 2, 3477 &nsegs, BUS_DMA_NOWAIT); 3478 #else 3479 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3480 segs, &nsegs, BUS_DMA_NOWAIT); 3481 #endif 3482 if (error != 0) { 3483 if (error != EFBIG) { 3484 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3485 error); 3486 m_freem(m); 3487 return error; 3488 } 3489 /* Too many DMA segments, linearize mbuf. */ 3490 m_defragged = m_defrag(m, M_NOWAIT); 3491 if (m_defragged == NULL) { 3492 device_printf(sc->sc_dev, 3493 "%s: could not defrag mbuf\n", __func__); 3494 m_freem(m); 3495 return (ENOBUFS); 3496 } 3497 m = m_defragged; 3498 3499 #if defined(__DragonFly__) 3500 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m, 3501 segs, IWM_MAX_SCATTER - 2, 3502 &nsegs, BUS_DMA_NOWAIT); 3503 #else 3504 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3505 segs, &nsegs, BUS_DMA_NOWAIT); 3506 #endif 3507 if (error != 0) { 3508 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3509 error); 3510 m_freem(m); 3511 return error; 3512 } 3513 } 3514 data->m = m; 3515 data->in = in; 3516 data->done = 0; 3517 3518 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3519 "sending txd %p, in %p\n", data, data->in); 3520 KASSERT(data->in != NULL, ("node is NULL")); 3521 3522 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3523 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n", 3524 ring->qid, ring->cur, totlen, nsegs, 3525 le32toh(tx->tx_flags), 3526 le32toh(tx->rate_n_flags), 3527 tx->initial_rate_index 3528 ); 3529 3530 /* Fill TX descriptor. */ 3531 desc->num_tbs = 2 + nsegs; 3532 3533 desc->tbs[0].lo = htole32(data->cmd_paddr); 3534 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | 3535 (TB0_SIZE << 4); 3536 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE); 3537 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | 3538 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) 3539 + hdrlen + pad - TB0_SIZE) << 4); 3540 3541 /* Other DMA segments are for data payload. */ 3542 for (i = 0; i < nsegs; i++) { 3543 seg = &segs[i]; 3544 desc->tbs[i+2].lo = htole32(seg->ds_addr); 3545 desc->tbs[i+2].hi_n_len = \ 3546 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) 3547 | ((seg->ds_len) << 4); 3548 } 3549 3550 bus_dmamap_sync(ring->data_dmat, data->map, 3551 BUS_DMASYNC_PREWRITE); 3552 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 3553 BUS_DMASYNC_PREWRITE); 3554 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3555 BUS_DMASYNC_PREWRITE); 3556 3557 #if 0 3558 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len)); 3559 #endif 3560 3561 /* Kick TX ring. */ 3562 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT; 3563 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3564 3565 /* Mark TX ring as full if we reach a certain threshold. */ 3566 if (++ring->queued > IWM_TX_RING_HIMARK) { 3567 sc->qfullmsk |= 1 << ring->qid; 3568 } 3569 3570 return 0; 3571 } 3572 3573 static int 3574 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3575 const struct ieee80211_bpf_params *params) 3576 { 3577 struct ieee80211com *ic = ni->ni_ic; 3578 struct iwm_softc *sc = ic->ic_softc; 3579 int error = 0; 3580 3581 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3582 "->%s begin\n", __func__); 3583 3584 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 3585 m_freem(m); 3586 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3587 "<-%s not RUNNING\n", __func__); 3588 return (ENETDOWN); 3589 } 3590 3591 IWM_LOCK(sc); 3592 /* XXX fix this */ 3593 if (params == NULL) { 3594 error = iwm_tx(sc, m, ni, 0); 3595 } else { 3596 error = iwm_tx(sc, m, ni, 0); 3597 } 3598 sc->sc_tx_timer = 5; 3599 IWM_UNLOCK(sc); 3600 3601 return (error); 3602 } 3603 3604 /* 3605 * mvm/tx.c 3606 */ 3607 3608 #if 0 3609 /* 3610 * Note that there are transports that buffer frames before they reach 3611 * the firmware. This means that after flush_tx_path is called, the 3612 * queue might not be empty. The race-free way to handle this is to: 3613 * 1) set the station as draining 3614 * 2) flush the Tx path 3615 * 3) wait for the transport queues to be empty 3616 */ 3617 int 3618 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync) 3619 { 3620 struct iwm_tx_path_flush_cmd flush_cmd = { 3621 .queues_ctl = htole32(tfd_msk), 3622 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH), 3623 }; 3624 int ret; 3625 3626 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 3627 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC, 3628 sizeof(flush_cmd), &flush_cmd); 3629 if (ret) 3630 device_printf(sc->sc_dev, 3631 "Flushing tx queue failed: %d\n", ret); 3632 return ret; 3633 } 3634 #endif 3635 3636 static int 3637 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc, 3638 struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status) 3639 { 3640 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd), 3641 cmd, status); 3642 } 3643 3644 /* send station add/update command to firmware */ 3645 static int 3646 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update) 3647 { 3648 struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd; 3649 int ret; 3650 uint32_t status; 3651 3652 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd)); 3653 3654 add_sta_cmd.sta_id = IWM_STATION_ID; 3655 add_sta_cmd.mac_id_n_color 3656 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID, 3657 IWM_DEFAULT_COLOR)); 3658 if (!update) { 3659 int ac; 3660 for (ac = 0; ac < WME_NUM_AC; ac++) { 3661 add_sta_cmd.tfd_queue_msk |= 3662 htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]); 3663 } 3664 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid); 3665 } 3666 add_sta_cmd.add_modify = update ? 1 : 0; 3667 add_sta_cmd.station_flags_msk 3668 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK); 3669 add_sta_cmd.tid_disable_tx = htole16(0xffff); 3670 if (update) 3671 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX); 3672 3673 status = IWM_ADD_STA_SUCCESS; 3674 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status); 3675 if (ret) 3676 return ret; 3677 3678 switch (status) { 3679 case IWM_ADD_STA_SUCCESS: 3680 break; 3681 default: 3682 ret = EIO; 3683 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n"); 3684 break; 3685 } 3686 3687 return ret; 3688 } 3689 3690 static int 3691 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in) 3692 { 3693 return iwm_mvm_sta_send_to_fw(sc, in, 0); 3694 } 3695 3696 static int 3697 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in) 3698 { 3699 return iwm_mvm_sta_send_to_fw(sc, in, 1); 3700 } 3701 3702 static int 3703 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta, 3704 const uint8_t *addr, uint16_t mac_id, uint16_t color) 3705 { 3706 struct iwm_mvm_add_sta_cmd_v7 cmd; 3707 int ret; 3708 uint32_t status; 3709 3710 memset(&cmd, 0, sizeof(cmd)); 3711 cmd.sta_id = sta->sta_id; 3712 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color)); 3713 3714 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk); 3715 cmd.tid_disable_tx = htole16(0xffff); 3716 3717 if (addr) 3718 IEEE80211_ADDR_COPY(cmd.addr, addr); 3719 3720 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status); 3721 if (ret) 3722 return ret; 3723 3724 switch (status) { 3725 case IWM_ADD_STA_SUCCESS: 3726 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 3727 "%s: Internal station added.\n", __func__); 3728 return 0; 3729 default: 3730 device_printf(sc->sc_dev, 3731 "%s: Add internal station failed, status=0x%x\n", 3732 __func__, status); 3733 ret = EIO; 3734 break; 3735 } 3736 return ret; 3737 } 3738 3739 static int 3740 iwm_mvm_add_aux_sta(struct iwm_softc *sc) 3741 { 3742 int ret; 3743 3744 sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID; 3745 sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE); 3746 3747 ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST); 3748 if (ret) 3749 return ret; 3750 3751 ret = iwm_mvm_add_int_sta_common(sc, 3752 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0); 3753 3754 if (ret) 3755 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta)); 3756 return ret; 3757 } 3758 3759 static int 3760 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in) 3761 { 3762 struct iwm_time_quota_cmd cmd; 3763 int i, idx, ret, num_active_macs, quota, quota_rem; 3764 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, }; 3765 int n_ifs[IWM_MAX_BINDINGS] = {0, }; 3766 uint16_t id; 3767 3768 memset(&cmd, 0, sizeof(cmd)); 3769 3770 /* currently, PHY ID == binding ID */ 3771 if (in) { 3772 id = in->in_phyctxt->id; 3773 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id")); 3774 colors[id] = in->in_phyctxt->color; 3775 3776 if (1) 3777 n_ifs[id] = 1; 3778 } 3779 3780 /* 3781 * The FW's scheduling session consists of 3782 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments 3783 * equally between all the bindings that require quota 3784 */ 3785 num_active_macs = 0; 3786 for (i = 0; i < IWM_MAX_BINDINGS; i++) { 3787 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID); 3788 num_active_macs += n_ifs[i]; 3789 } 3790 3791 quota = 0; 3792 quota_rem = 0; 3793 if (num_active_macs) { 3794 quota = IWM_MVM_MAX_QUOTA / num_active_macs; 3795 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs; 3796 } 3797 3798 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) { 3799 if (colors[i] < 0) 3800 continue; 3801 3802 cmd.quotas[idx].id_and_color = 3803 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i])); 3804 3805 if (n_ifs[i] <= 0) { 3806 cmd.quotas[idx].quota = htole32(0); 3807 cmd.quotas[idx].max_duration = htole32(0); 3808 } else { 3809 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]); 3810 cmd.quotas[idx].max_duration = htole32(0); 3811 } 3812 idx++; 3813 } 3814 3815 /* Give the remainder of the session to the first binding */ 3816 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem); 3817 3818 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC, 3819 sizeof(cmd), &cmd); 3820 if (ret) 3821 device_printf(sc->sc_dev, 3822 "%s: Failed to send quota: %d\n", __func__, ret); 3823 return ret; 3824 } 3825 3826 /* 3827 * ieee80211 routines 3828 */ 3829 3830 /* 3831 * Change to AUTH state in 80211 state machine. Roughly matches what 3832 * Linux does in bss_info_changed(). 3833 */ 3834 static int 3835 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc) 3836 { 3837 struct ieee80211_node *ni; 3838 struct iwm_node *in; 3839 struct iwm_vap *iv = IWM_VAP(vap); 3840 uint32_t duration; 3841 int error; 3842 3843 /* 3844 * XXX i have a feeling that the vap node is being 3845 * freed from underneath us. Grr. 3846 */ 3847 ni = ieee80211_ref_node(vap->iv_bss); 3848 in = IWM_NODE(ni); 3849 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE, 3850 "%s: called; vap=%p, bss ni=%p\n", 3851 __func__, 3852 vap, 3853 ni); 3854 3855 in->in_assoc = 0; 3856 3857 error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON); 3858 if (error != 0) 3859 return error; 3860 3861 error = iwm_allow_mcast(vap, sc); 3862 if (error) { 3863 device_printf(sc->sc_dev, 3864 "%s: failed to set multicast\n", __func__); 3865 goto out; 3866 } 3867 3868 /* 3869 * This is where it deviates from what Linux does. 3870 * 3871 * Linux iwlwifi doesn't reset the nic each time, nor does it 3872 * call ctxt_add() here. Instead, it adds it during vap creation, 3873 * and always does a mac_ctx_changed(). 3874 * 3875 * The openbsd port doesn't attempt to do that - it reset things 3876 * at odd states and does the add here. 3877 * 3878 * So, until the state handling is fixed (ie, we never reset 3879 * the NIC except for a firmware failure, which should drag 3880 * the NIC back to IDLE, re-setup and re-add all the mac/phy 3881 * contexts that are required), let's do a dirty hack here. 3882 */ 3883 if (iv->is_uploaded) { 3884 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 3885 device_printf(sc->sc_dev, 3886 "%s: failed to update MAC\n", __func__); 3887 goto out; 3888 } 3889 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0], 3890 in->in_ni.ni_chan, 1, 1)) != 0) { 3891 device_printf(sc->sc_dev, 3892 "%s: failed update phy ctxt\n", __func__); 3893 goto out; 3894 } 3895 in->in_phyctxt = &sc->sc_phyctxt[0]; 3896 3897 if ((error = iwm_mvm_binding_update(sc, in)) != 0) { 3898 device_printf(sc->sc_dev, 3899 "%s: binding update cmd\n", __func__); 3900 goto out; 3901 } 3902 if ((error = iwm_mvm_update_sta(sc, in)) != 0) { 3903 device_printf(sc->sc_dev, 3904 "%s: failed to update sta\n", __func__); 3905 goto out; 3906 } 3907 } else { 3908 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) { 3909 device_printf(sc->sc_dev, 3910 "%s: failed to add MAC\n", __func__); 3911 goto out; 3912 } 3913 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0], 3914 in->in_ni.ni_chan, 1, 1)) != 0) { 3915 device_printf(sc->sc_dev, 3916 "%s: failed add phy ctxt!\n", __func__); 3917 error = ETIMEDOUT; 3918 goto out; 3919 } 3920 in->in_phyctxt = &sc->sc_phyctxt[0]; 3921 3922 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) { 3923 device_printf(sc->sc_dev, 3924 "%s: binding add cmd\n", __func__); 3925 goto out; 3926 } 3927 if ((error = iwm_mvm_add_sta(sc, in)) != 0) { 3928 device_printf(sc->sc_dev, 3929 "%s: failed to add sta\n", __func__); 3930 goto out; 3931 } 3932 } 3933 3934 /* 3935 * Prevent the FW from wandering off channel during association 3936 * by "protecting" the session with a time event. 3937 */ 3938 /* XXX duration is in units of TU, not MS */ 3939 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 3940 iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */); 3941 DELAY(100); 3942 3943 error = 0; 3944 out: 3945 ieee80211_free_node(ni); 3946 return (error); 3947 } 3948 3949 static int 3950 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc) 3951 { 3952 struct iwm_node *in = IWM_NODE(vap->iv_bss); 3953 int error; 3954 3955 if ((error = iwm_mvm_update_sta(sc, in)) != 0) { 3956 device_printf(sc->sc_dev, 3957 "%s: failed to update STA\n", __func__); 3958 return error; 3959 } 3960 3961 in->in_assoc = 1; 3962 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 3963 device_printf(sc->sc_dev, 3964 "%s: failed to update MAC\n", __func__); 3965 return error; 3966 } 3967 3968 return 0; 3969 } 3970 3971 static int 3972 iwm_release(struct iwm_softc *sc, struct iwm_node *in) 3973 { 3974 /* 3975 * Ok, so *technically* the proper set of calls for going 3976 * from RUN back to SCAN is: 3977 * 3978 * iwm_mvm_power_mac_disable(sc, in); 3979 * iwm_mvm_mac_ctxt_changed(sc, in); 3980 * iwm_mvm_rm_sta(sc, in); 3981 * iwm_mvm_update_quotas(sc, NULL); 3982 * iwm_mvm_mac_ctxt_changed(sc, in); 3983 * iwm_mvm_binding_remove_vif(sc, in); 3984 * iwm_mvm_mac_ctxt_remove(sc, in); 3985 * 3986 * However, that freezes the device not matter which permutations 3987 * and modifications are attempted. Obviously, this driver is missing 3988 * something since it works in the Linux driver, but figuring out what 3989 * is missing is a little more complicated. Now, since we're going 3990 * back to nothing anyway, we'll just do a complete device reset. 3991 * Up your's, device! 3992 */ 3993 /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */ 3994 iwm_stop_device(sc); 3995 iwm_init_hw(sc); 3996 if (in) 3997 in->in_assoc = 0; 3998 return 0; 3999 4000 #if 0 4001 int error; 4002 4003 iwm_mvm_power_mac_disable(sc, in); 4004 4005 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) { 4006 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error); 4007 return error; 4008 } 4009 4010 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) { 4011 device_printf(sc->sc_dev, "sta remove fail %d\n", error); 4012 return error; 4013 } 4014 error = iwm_mvm_rm_sta(sc, in); 4015 in->in_assoc = 0; 4016 iwm_mvm_update_quotas(sc, NULL); 4017 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) { 4018 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error); 4019 return error; 4020 } 4021 iwm_mvm_binding_remove_vif(sc, in); 4022 4023 iwm_mvm_mac_ctxt_remove(sc, in); 4024 4025 return error; 4026 #endif 4027 } 4028 4029 static struct ieee80211_node * 4030 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 4031 { 4032 return kmalloc(sizeof (struct iwm_node), M_80211_NODE, 4033 M_INTWAIT | M_ZERO); 4034 } 4035 4036 static void 4037 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in) 4038 { 4039 struct ieee80211_node *ni = &in->in_ni; 4040 struct iwm_lq_cmd *lq = &in->in_lq; 4041 int nrates = ni->ni_rates.rs_nrates; 4042 int i, ridx, tab = 0; 4043 int txant = 0; 4044 4045 if (nrates > nitems(lq->rs_table)) { 4046 device_printf(sc->sc_dev, 4047 "%s: node supports %d rates, driver handles " 4048 "only %zu\n", __func__, nrates, nitems(lq->rs_table)); 4049 return; 4050 } 4051 if (nrates == 0) { 4052 device_printf(sc->sc_dev, 4053 "%s: node supports 0 rates, odd!\n", __func__); 4054 return; 4055 } 4056 4057 /* 4058 * XXX .. and most of iwm_node is not initialised explicitly; 4059 * it's all just 0x0 passed to the firmware. 4060 */ 4061 4062 /* first figure out which rates we should support */ 4063 /* XXX TODO: this isn't 11n aware /at all/ */ 4064 memset(&in->in_ridx, -1, sizeof(in->in_ridx)); 4065 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4066 "%s: nrates=%d\n", __func__, nrates); 4067 4068 /* 4069 * Loop over nrates and populate in_ridx from the highest 4070 * rate to the lowest rate. Remember, in_ridx[] has 4071 * IEEE80211_RATE_MAXSIZE entries! 4072 */ 4073 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) { 4074 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL; 4075 4076 /* Map 802.11 rate to HW rate index. */ 4077 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++) 4078 if (iwm_rates[ridx].rate == rate) 4079 break; 4080 if (ridx > IWM_RIDX_MAX) { 4081 device_printf(sc->sc_dev, 4082 "%s: WARNING: device rate for %d not found!\n", 4083 __func__, rate); 4084 } else { 4085 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4086 "%s: rate: i: %d, rate=%d, ridx=%d\n", 4087 __func__, 4088 i, 4089 rate, 4090 ridx); 4091 in->in_ridx[i] = ridx; 4092 } 4093 } 4094 4095 /* then construct a lq_cmd based on those */ 4096 memset(lq, 0, sizeof(*lq)); 4097 lq->sta_id = IWM_STATION_ID; 4098 4099 /* For HT, always enable RTS/CTS to avoid excessive retries. */ 4100 if (ni->ni_flags & IEEE80211_NODE_HT) 4101 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK; 4102 4103 /* 4104 * are these used? (we don't do SISO or MIMO) 4105 * need to set them to non-zero, though, or we get an error. 4106 */ 4107 lq->single_stream_ant_msk = 1; 4108 lq->dual_stream_ant_msk = 1; 4109 4110 /* 4111 * Build the actual rate selection table. 4112 * The lowest bits are the rates. Additionally, 4113 * CCK needs bit 9 to be set. The rest of the bits 4114 * we add to the table select the tx antenna 4115 * Note that we add the rates in the highest rate first 4116 * (opposite of ni_rates). 4117 */ 4118 /* 4119 * XXX TODO: this should be looping over the min of nrates 4120 * and LQ_MAX_RETRY_NUM. Sigh. 4121 */ 4122 for (i = 0; i < nrates; i++) { 4123 int nextant; 4124 4125 if (txant == 0) 4126 txant = iwm_fw_valid_tx_ant(sc); 4127 nextant = 1<<(ffs(txant)-1); 4128 txant &= ~nextant; 4129 4130 /* 4131 * Map the rate id into a rate index into 4132 * our hardware table containing the 4133 * configuration to use for this rate. 4134 */ 4135 ridx = in->in_ridx[i]; 4136 tab = iwm_rates[ridx].plcp; 4137 tab |= nextant << IWM_RATE_MCS_ANT_POS; 4138 if (IWM_RIDX_IS_CCK(ridx)) 4139 tab |= IWM_RATE_MCS_CCK_MSK; 4140 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4141 "station rate i=%d, rate=%d, hw=%x\n", 4142 i, iwm_rates[ridx].rate, tab); 4143 lq->rs_table[i] = htole32(tab); 4144 } 4145 /* then fill the rest with the lowest possible rate */ 4146 for (i = nrates; i < nitems(lq->rs_table); i++) { 4147 KASSERT(tab != 0, ("invalid tab")); 4148 lq->rs_table[i] = htole32(tab); 4149 } 4150 } 4151 4152 static int 4153 iwm_media_change(struct ifnet *ifp) 4154 { 4155 struct ieee80211vap *vap = ifp->if_softc; 4156 struct ieee80211com *ic = vap->iv_ic; 4157 struct iwm_softc *sc = ic->ic_softc; 4158 int error; 4159 4160 error = ieee80211_media_change(ifp); 4161 if (error != ENETRESET) 4162 return error; 4163 4164 IWM_LOCK(sc); 4165 if (ic->ic_nrunning > 0) { 4166 iwm_stop(sc); 4167 iwm_init(sc); 4168 } 4169 IWM_UNLOCK(sc); 4170 return error; 4171 } 4172 4173 4174 static int 4175 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4176 { 4177 struct iwm_vap *ivp = IWM_VAP(vap); 4178 struct ieee80211com *ic = vap->iv_ic; 4179 struct iwm_softc *sc = ic->ic_softc; 4180 struct iwm_node *in; 4181 int error; 4182 4183 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4184 "switching state %s -> %s\n", 4185 ieee80211_state_name[vap->iv_state], 4186 ieee80211_state_name[nstate]); 4187 IEEE80211_UNLOCK(ic); 4188 IWM_LOCK(sc); 4189 4190 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state) 4191 iwm_led_blink_stop(sc); 4192 4193 /* disable beacon filtering if we're hopping out of RUN */ 4194 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) { 4195 iwm_mvm_disable_beacon_filter(sc); 4196 4197 if (((in = IWM_NODE(vap->iv_bss)) != NULL)) 4198 in->in_assoc = 0; 4199 4200 iwm_release(sc, NULL); 4201 4202 /* 4203 * It's impossible to directly go RUN->SCAN. If we iwm_release() 4204 * above then the card will be completely reinitialized, 4205 * so the driver must do everything necessary to bring the card 4206 * from INIT to SCAN. 4207 * 4208 * Additionally, upon receiving deauth frame from AP, 4209 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH 4210 * state. This will also fail with this driver, so bring the FSM 4211 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well. 4212 * 4213 * XXX TODO: fix this for FreeBSD! 4214 */ 4215 if (nstate == IEEE80211_S_SCAN || 4216 nstate == IEEE80211_S_AUTH || 4217 nstate == IEEE80211_S_ASSOC) { 4218 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4219 "Force transition to INIT; MGT=%d\n", arg); 4220 IWM_UNLOCK(sc); 4221 IEEE80211_LOCK(ic); 4222 /* Always pass arg as -1 since we can't Tx right now. */ 4223 /* 4224 * XXX arg is just ignored anyway when transitioning 4225 * to IEEE80211_S_INIT. 4226 */ 4227 vap->iv_newstate(vap, IEEE80211_S_INIT, -1); 4228 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4229 "Going INIT->SCAN\n"); 4230 nstate = IEEE80211_S_SCAN; 4231 IEEE80211_UNLOCK(ic); 4232 IWM_LOCK(sc); 4233 } 4234 } 4235 4236 switch (nstate) { 4237 case IEEE80211_S_INIT: 4238 break; 4239 4240 case IEEE80211_S_AUTH: 4241 if ((error = iwm_auth(vap, sc)) != 0) { 4242 device_printf(sc->sc_dev, 4243 "%s: could not move to auth state: %d\n", 4244 __func__, error); 4245 break; 4246 } 4247 break; 4248 4249 case IEEE80211_S_ASSOC: 4250 if ((error = iwm_assoc(vap, sc)) != 0) { 4251 device_printf(sc->sc_dev, 4252 "%s: failed to associate: %d\n", __func__, 4253 error); 4254 break; 4255 } 4256 break; 4257 4258 case IEEE80211_S_RUN: 4259 { 4260 struct iwm_host_cmd cmd = { 4261 .id = IWM_LQ_CMD, 4262 .len = { sizeof(in->in_lq), }, 4263 .flags = IWM_CMD_SYNC, 4264 }; 4265 4266 /* Update the association state, now we have it all */ 4267 /* (eg associd comes in at this point */ 4268 error = iwm_assoc(vap, sc); 4269 if (error != 0) { 4270 device_printf(sc->sc_dev, 4271 "%s: failed to update association state: %d\n", 4272 __func__, 4273 error); 4274 break; 4275 } 4276 4277 in = IWM_NODE(vap->iv_bss); 4278 iwm_mvm_power_mac_update_mode(sc, in); 4279 iwm_mvm_enable_beacon_filter(sc, in); 4280 iwm_mvm_update_quotas(sc, in); 4281 iwm_setrates(sc, in); 4282 4283 cmd.data[0] = &in->in_lq; 4284 if ((error = iwm_send_cmd(sc, &cmd)) != 0) { 4285 device_printf(sc->sc_dev, 4286 "%s: IWM_LQ_CMD failed\n", __func__); 4287 } 4288 4289 iwm_mvm_led_enable(sc); 4290 break; 4291 } 4292 4293 default: 4294 break; 4295 } 4296 IWM_UNLOCK(sc); 4297 IEEE80211_LOCK(ic); 4298 4299 return (ivp->iv_newstate(vap, nstate, arg)); 4300 } 4301 4302 void 4303 iwm_endscan_cb(void *arg, int pending) 4304 { 4305 struct iwm_softc *sc = arg; 4306 struct ieee80211com *ic = &sc->sc_ic; 4307 4308 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE, 4309 "%s: scan ended\n", 4310 __func__); 4311 4312 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 4313 } 4314 4315 /* 4316 * Aging and idle timeouts for the different possible scenarios 4317 * in default configuration 4318 */ 4319 static const uint32_t 4320 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = { 4321 { 4322 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF), 4323 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF) 4324 }, 4325 { 4326 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF), 4327 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF) 4328 }, 4329 { 4330 htole32(IWM_SF_MCAST_AGING_TIMER_DEF), 4331 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF) 4332 }, 4333 { 4334 htole32(IWM_SF_BA_AGING_TIMER_DEF), 4335 htole32(IWM_SF_BA_IDLE_TIMER_DEF) 4336 }, 4337 { 4338 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF), 4339 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF) 4340 }, 4341 }; 4342 4343 /* 4344 * Aging and idle timeouts for the different possible scenarios 4345 * in single BSS MAC configuration. 4346 */ 4347 static const uint32_t 4348 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = { 4349 { 4350 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER), 4351 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER) 4352 }, 4353 { 4354 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER), 4355 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER) 4356 }, 4357 { 4358 htole32(IWM_SF_MCAST_AGING_TIMER), 4359 htole32(IWM_SF_MCAST_IDLE_TIMER) 4360 }, 4361 { 4362 htole32(IWM_SF_BA_AGING_TIMER), 4363 htole32(IWM_SF_BA_IDLE_TIMER) 4364 }, 4365 { 4366 htole32(IWM_SF_TX_RE_AGING_TIMER), 4367 htole32(IWM_SF_TX_RE_IDLE_TIMER) 4368 }, 4369 }; 4370 4371 static void 4372 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd, 4373 struct ieee80211_node *ni) 4374 { 4375 int i, j, watermark; 4376 4377 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN); 4378 4379 /* 4380 * If we are in association flow - check antenna configuration 4381 * capabilities of the AP station, and choose the watermark accordingly. 4382 */ 4383 if (ni) { 4384 if (ni->ni_flags & IEEE80211_NODE_HT) { 4385 #ifdef notyet 4386 if (ni->ni_rxmcs[2] != 0) 4387 watermark = IWM_SF_W_MARK_MIMO3; 4388 else if (ni->ni_rxmcs[1] != 0) 4389 watermark = IWM_SF_W_MARK_MIMO2; 4390 else 4391 #endif 4392 watermark = IWM_SF_W_MARK_SISO; 4393 } else { 4394 watermark = IWM_SF_W_MARK_LEGACY; 4395 } 4396 /* default watermark value for unassociated mode. */ 4397 } else { 4398 watermark = IWM_SF_W_MARK_MIMO2; 4399 } 4400 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark); 4401 4402 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) { 4403 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) { 4404 sf_cmd->long_delay_timeouts[i][j] = 4405 htole32(IWM_SF_LONG_DELAY_AGING_TIMER); 4406 } 4407 } 4408 4409 if (ni) { 4410 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout, 4411 sizeof(iwm_sf_full_timeout)); 4412 } else { 4413 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def, 4414 sizeof(iwm_sf_full_timeout_def)); 4415 } 4416 } 4417 4418 static int 4419 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state) 4420 { 4421 struct ieee80211com *ic = &sc->sc_ic; 4422 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4423 struct iwm_sf_cfg_cmd sf_cmd = { 4424 .state = htole32(IWM_SF_FULL_ON), 4425 }; 4426 int ret = 0; 4427 4428 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) 4429 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF); 4430 4431 switch (new_state) { 4432 case IWM_SF_UNINIT: 4433 case IWM_SF_INIT_OFF: 4434 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL); 4435 break; 4436 case IWM_SF_FULL_ON: 4437 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss); 4438 break; 4439 default: 4440 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE, 4441 "Invalid state: %d. not sending Smart Fifo cmd\n", 4442 new_state); 4443 return EINVAL; 4444 } 4445 4446 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC, 4447 sizeof(sf_cmd), &sf_cmd); 4448 return ret; 4449 } 4450 4451 static int 4452 iwm_send_bt_init_conf(struct iwm_softc *sc) 4453 { 4454 struct iwm_bt_coex_cmd bt_cmd; 4455 4456 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI); 4457 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET); 4458 4459 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), 4460 &bt_cmd); 4461 } 4462 4463 static int 4464 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2) 4465 { 4466 struct iwm_mcc_update_cmd mcc_cmd; 4467 struct iwm_host_cmd hcmd = { 4468 .id = IWM_MCC_UPDATE_CMD, 4469 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB), 4470 .data = { &mcc_cmd }, 4471 }; 4472 int ret; 4473 #ifdef IWM_DEBUG 4474 struct iwm_rx_packet *pkt; 4475 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL; 4476 struct iwm_mcc_update_resp *mcc_resp; 4477 int n_channels; 4478 uint16_t mcc; 4479 #endif 4480 int resp_v2 = isset(sc->sc_enabled_capa, 4481 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2); 4482 4483 memset(&mcc_cmd, 0, sizeof(mcc_cmd)); 4484 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]); 4485 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) || 4486 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC)) 4487 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT; 4488 else 4489 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW; 4490 4491 if (resp_v2) 4492 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd); 4493 else 4494 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1); 4495 4496 IWM_DPRINTF(sc, IWM_DEBUG_NODE, 4497 "send MCC update to FW with '%c%c' src = %d\n", 4498 alpha2[0], alpha2[1], mcc_cmd.source_id); 4499 4500 ret = iwm_send_cmd(sc, &hcmd); 4501 if (ret) 4502 return ret; 4503 4504 #ifdef IWM_DEBUG 4505 pkt = hcmd.resp_pkt; 4506 4507 /* Extract MCC response */ 4508 if (resp_v2) { 4509 mcc_resp = (void *)pkt->data; 4510 mcc = mcc_resp->mcc; 4511 n_channels = le32toh(mcc_resp->n_channels); 4512 } else { 4513 mcc_resp_v1 = (void *)pkt->data; 4514 mcc = mcc_resp_v1->mcc; 4515 n_channels = le32toh(mcc_resp_v1->n_channels); 4516 } 4517 4518 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ 4519 if (mcc == 0) 4520 mcc = 0x3030; /* "00" - world */ 4521 4522 IWM_DPRINTF(sc, IWM_DEBUG_NODE, 4523 "regulatory domain '%c%c' (%d channels available)\n", 4524 mcc >> 8, mcc & 0xff, n_channels); 4525 #endif 4526 iwm_free_resp(sc, &hcmd); 4527 4528 return 0; 4529 } 4530 4531 static void 4532 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff) 4533 { 4534 struct iwm_host_cmd cmd = { 4535 .id = IWM_REPLY_THERMAL_MNG_BACKOFF, 4536 .len = { sizeof(uint32_t), }, 4537 .data = { &backoff, }, 4538 }; 4539 4540 if (iwm_send_cmd(sc, &cmd) != 0) { 4541 device_printf(sc->sc_dev, 4542 "failed to change thermal tx backoff\n"); 4543 } 4544 } 4545 4546 static int 4547 iwm_init_hw(struct iwm_softc *sc) 4548 { 4549 struct ieee80211com *ic = &sc->sc_ic; 4550 int error, i, ac; 4551 4552 if ((error = iwm_start_hw(sc)) != 0) { 4553 kprintf("iwm_start_hw: failed %d\n", error); 4554 return error; 4555 } 4556 4557 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) { 4558 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error); 4559 return error; 4560 } 4561 4562 /* 4563 * should stop and start HW since that INIT 4564 * image just loaded 4565 */ 4566 iwm_stop_device(sc); 4567 if ((error = iwm_start_hw(sc)) != 0) { 4568 device_printf(sc->sc_dev, "could not initialize hardware\n"); 4569 return error; 4570 } 4571 4572 /* omstart, this time with the regular firmware */ 4573 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR); 4574 if (error) { 4575 device_printf(sc->sc_dev, "could not load firmware\n"); 4576 goto error; 4577 } 4578 4579 if ((error = iwm_send_bt_init_conf(sc)) != 0) { 4580 device_printf(sc->sc_dev, "bt init conf failed\n"); 4581 goto error; 4582 } 4583 4584 if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) { 4585 device_printf(sc->sc_dev, "antenna config failed\n"); 4586 goto error; 4587 } 4588 4589 /* Send phy db control command and then phy db calibration*/ 4590 if ((error = iwm_send_phy_db_data(sc)) != 0) { 4591 device_printf(sc->sc_dev, "phy_db_data failed\n"); 4592 goto error; 4593 } 4594 4595 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) { 4596 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n"); 4597 goto error; 4598 } 4599 4600 /* Add auxiliary station for scanning */ 4601 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) { 4602 device_printf(sc->sc_dev, "add_aux_sta failed\n"); 4603 goto error; 4604 } 4605 4606 for (i = 0; i < IWM_NUM_PHY_CTX; i++) { 4607 /* 4608 * The channel used here isn't relevant as it's 4609 * going to be overwritten in the other flows. 4610 * For now use the first channel we have. 4611 */ 4612 if ((error = iwm_mvm_phy_ctxt_add(sc, 4613 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0) 4614 goto error; 4615 } 4616 4617 /* Initialize tx backoffs to the minimum. */ 4618 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) 4619 iwm_mvm_tt_tx_backoff(sc, 0); 4620 4621 error = iwm_mvm_power_update_device(sc); 4622 if (error) 4623 goto error; 4624 4625 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) { 4626 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0) 4627 goto error; 4628 } 4629 4630 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) { 4631 if ((error = iwm_mvm_config_umac_scan(sc)) != 0) 4632 goto error; 4633 } 4634 4635 /* Enable Tx queues. */ 4636 for (ac = 0; ac < WME_NUM_AC; ac++) { 4637 error = iwm_enable_txq(sc, IWM_STATION_ID, ac, 4638 iwm_mvm_ac_to_tx_fifo[ac]); 4639 if (error) 4640 goto error; 4641 } 4642 4643 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) { 4644 device_printf(sc->sc_dev, "failed to disable beacon filter\n"); 4645 goto error; 4646 } 4647 4648 return 0; 4649 4650 error: 4651 iwm_stop_device(sc); 4652 return error; 4653 } 4654 4655 /* Allow multicast from our BSSID. */ 4656 static int 4657 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc) 4658 { 4659 struct ieee80211_node *ni = vap->iv_bss; 4660 struct iwm_mcast_filter_cmd *cmd; 4661 size_t size; 4662 int error; 4663 4664 size = roundup(sizeof(*cmd), 4); 4665 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO); 4666 if (cmd == NULL) 4667 return ENOMEM; 4668 cmd->filter_own = 1; 4669 cmd->port_id = 0; 4670 cmd->count = 0; 4671 cmd->pass_all = 1; 4672 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid); 4673 4674 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 4675 IWM_CMD_SYNC, size, cmd); 4676 kfree(cmd, M_DEVBUF); 4677 4678 return (error); 4679 } 4680 4681 /* 4682 * ifnet interfaces 4683 */ 4684 4685 static void 4686 iwm_init(struct iwm_softc *sc) 4687 { 4688 int error; 4689 4690 if (sc->sc_flags & IWM_FLAG_HW_INITED) { 4691 return; 4692 } 4693 sc->sc_generation++; 4694 sc->sc_flags &= ~IWM_FLAG_STOPPED; 4695 4696 if ((error = iwm_init_hw(sc)) != 0) { 4697 kprintf("iwm_init_hw failed %d\n", error); 4698 iwm_stop(sc); 4699 return; 4700 } 4701 4702 /* 4703 * Ok, firmware loaded and we are jogging 4704 */ 4705 sc->sc_flags |= IWM_FLAG_HW_INITED; 4706 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 4707 } 4708 4709 static int 4710 iwm_transmit(struct ieee80211com *ic, struct mbuf *m) 4711 { 4712 struct iwm_softc *sc; 4713 int error; 4714 4715 sc = ic->ic_softc; 4716 4717 IWM_LOCK(sc); 4718 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 4719 IWM_UNLOCK(sc); 4720 return (ENXIO); 4721 } 4722 error = mbufq_enqueue(&sc->sc_snd, m); 4723 if (error) { 4724 IWM_UNLOCK(sc); 4725 return (error); 4726 } 4727 iwm_start(sc); 4728 IWM_UNLOCK(sc); 4729 return (0); 4730 } 4731 4732 /* 4733 * Dequeue packets from sendq and call send. 4734 */ 4735 static void 4736 iwm_start(struct iwm_softc *sc) 4737 { 4738 struct ieee80211_node *ni; 4739 struct mbuf *m; 4740 int ac = 0; 4741 4742 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__); 4743 while (sc->qfullmsk == 0 && 4744 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 4745 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4746 if (iwm_tx(sc, m, ni, ac) != 0) { 4747 if_inc_counter(ni->ni_vap->iv_ifp, 4748 IFCOUNTER_OERRORS, 1); 4749 ieee80211_free_node(ni); 4750 continue; 4751 } 4752 sc->sc_tx_timer = 15; 4753 } 4754 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__); 4755 } 4756 4757 static void 4758 iwm_stop(struct iwm_softc *sc) 4759 { 4760 4761 sc->sc_flags &= ~IWM_FLAG_HW_INITED; 4762 sc->sc_flags |= IWM_FLAG_STOPPED; 4763 sc->sc_generation++; 4764 iwm_led_blink_stop(sc); 4765 sc->sc_tx_timer = 0; 4766 iwm_stop_device(sc); 4767 } 4768 4769 static void 4770 iwm_watchdog(void *arg) 4771 { 4772 struct iwm_softc *sc = arg; 4773 4774 if (sc->sc_tx_timer > 0) { 4775 if (--sc->sc_tx_timer == 0) { 4776 device_printf(sc->sc_dev, "device timeout\n"); 4777 #ifdef IWM_DEBUG 4778 iwm_nic_error(sc); 4779 #endif 4780 iwm_stop(sc); 4781 #if defined(__DragonFly__) 4782 ++sc->sc_ic.ic_oerrors; 4783 #else 4784 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 4785 #endif 4786 return; 4787 } 4788 } 4789 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 4790 } 4791 4792 static void 4793 iwm_parent(struct ieee80211com *ic) 4794 { 4795 struct iwm_softc *sc = ic->ic_softc; 4796 int startall = 0; 4797 4798 IWM_LOCK(sc); 4799 if (ic->ic_nrunning > 0) { 4800 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) { 4801 iwm_init(sc); 4802 startall = 1; 4803 } 4804 } else if (sc->sc_flags & IWM_FLAG_HW_INITED) 4805 iwm_stop(sc); 4806 IWM_UNLOCK(sc); 4807 if (startall) 4808 ieee80211_start_all(ic); 4809 } 4810 4811 /* 4812 * The interrupt side of things 4813 */ 4814 4815 /* 4816 * error dumping routines are from iwlwifi/mvm/utils.c 4817 */ 4818 4819 /* 4820 * Note: This structure is read from the device with IO accesses, 4821 * and the reading already does the endian conversion. As it is 4822 * read with uint32_t-sized accesses, any members with a different size 4823 * need to be ordered correctly though! 4824 */ 4825 struct iwm_error_event_table { 4826 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 4827 uint32_t error_id; /* type of error */ 4828 uint32_t trm_hw_status0; /* TRM HW status */ 4829 uint32_t trm_hw_status1; /* TRM HW status */ 4830 uint32_t blink2; /* branch link */ 4831 uint32_t ilink1; /* interrupt link */ 4832 uint32_t ilink2; /* interrupt link */ 4833 uint32_t data1; /* error-specific data */ 4834 uint32_t data2; /* error-specific data */ 4835 uint32_t data3; /* error-specific data */ 4836 uint32_t bcon_time; /* beacon timer */ 4837 uint32_t tsf_low; /* network timestamp function timer */ 4838 uint32_t tsf_hi; /* network timestamp function timer */ 4839 uint32_t gp1; /* GP1 timer register */ 4840 uint32_t gp2; /* GP2 timer register */ 4841 uint32_t fw_rev_type; /* firmware revision type */ 4842 uint32_t major; /* uCode version major */ 4843 uint32_t minor; /* uCode version minor */ 4844 uint32_t hw_ver; /* HW Silicon version */ 4845 uint32_t brd_ver; /* HW board version */ 4846 uint32_t log_pc; /* log program counter */ 4847 uint32_t frame_ptr; /* frame pointer */ 4848 uint32_t stack_ptr; /* stack pointer */ 4849 uint32_t hcmd; /* last host command header */ 4850 uint32_t isr0; /* isr status register LMPM_NIC_ISR0: 4851 * rxtx_flag */ 4852 uint32_t isr1; /* isr status register LMPM_NIC_ISR1: 4853 * host_flag */ 4854 uint32_t isr2; /* isr status register LMPM_NIC_ISR2: 4855 * enc_flag */ 4856 uint32_t isr3; /* isr status register LMPM_NIC_ISR3: 4857 * time_flag */ 4858 uint32_t isr4; /* isr status register LMPM_NIC_ISR4: 4859 * wico interrupt */ 4860 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */ 4861 uint32_t wait_event; /* wait event() caller address */ 4862 uint32_t l2p_control; /* L2pControlField */ 4863 uint32_t l2p_duration; /* L2pDurationField */ 4864 uint32_t l2p_mhvalid; /* L2pMhValidBits */ 4865 uint32_t l2p_addr_match; /* L2pAddrMatchStat */ 4866 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on 4867 * (LMPM_PMG_SEL) */ 4868 uint32_t u_timestamp; /* indicate when the date and time of the 4869 * compilation */ 4870 uint32_t flow_handler; /* FH read/write pointers, RX credit */ 4871 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 4872 4873 /* 4874 * UMAC error struct - relevant starting from family 8000 chip. 4875 * Note: This structure is read from the device with IO accesses, 4876 * and the reading already does the endian conversion. As it is 4877 * read with u32-sized accesses, any members with a different size 4878 * need to be ordered correctly though! 4879 */ 4880 struct iwm_umac_error_event_table { 4881 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 4882 uint32_t error_id; /* type of error */ 4883 uint32_t blink1; /* branch link */ 4884 uint32_t blink2; /* branch link */ 4885 uint32_t ilink1; /* interrupt link */ 4886 uint32_t ilink2; /* interrupt link */ 4887 uint32_t data1; /* error-specific data */ 4888 uint32_t data2; /* error-specific data */ 4889 uint32_t data3; /* error-specific data */ 4890 uint32_t umac_major; 4891 uint32_t umac_minor; 4892 uint32_t frame_pointer; /* core register 27*/ 4893 uint32_t stack_pointer; /* core register 28 */ 4894 uint32_t cmd_header; /* latest host cmd sent to UMAC */ 4895 uint32_t nic_isr_pref; /* ISR status register */ 4896 } __packed; 4897 4898 #define ERROR_START_OFFSET (1 * sizeof(uint32_t)) 4899 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t)) 4900 4901 #ifdef IWM_DEBUG 4902 struct { 4903 const char *name; 4904 uint8_t num; 4905 } advanced_lookup[] = { 4906 { "NMI_INTERRUPT_WDG", 0x34 }, 4907 { "SYSASSERT", 0x35 }, 4908 { "UCODE_VERSION_MISMATCH", 0x37 }, 4909 { "BAD_COMMAND", 0x38 }, 4910 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 4911 { "FATAL_ERROR", 0x3D }, 4912 { "NMI_TRM_HW_ERR", 0x46 }, 4913 { "NMI_INTERRUPT_TRM", 0x4C }, 4914 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 4915 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 4916 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 4917 { "NMI_INTERRUPT_HOST", 0x66 }, 4918 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 4919 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 4920 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 4921 { "ADVANCED_SYSASSERT", 0 }, 4922 }; 4923 4924 static const char * 4925 iwm_desc_lookup(uint32_t num) 4926 { 4927 int i; 4928 4929 for (i = 0; i < nitems(advanced_lookup) - 1; i++) 4930 if (advanced_lookup[i].num == num) 4931 return advanced_lookup[i].name; 4932 4933 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 4934 return advanced_lookup[i].name; 4935 } 4936 4937 static void 4938 iwm_nic_umac_error(struct iwm_softc *sc) 4939 { 4940 struct iwm_umac_error_event_table table; 4941 uint32_t base; 4942 4943 base = sc->sc_uc.uc_umac_error_event_table; 4944 4945 if (base < 0x800000) { 4946 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n", 4947 base); 4948 return; 4949 } 4950 4951 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 4952 device_printf(sc->sc_dev, "reading errlog failed\n"); 4953 return; 4954 } 4955 4956 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 4957 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n"); 4958 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 4959 sc->sc_flags, table.valid); 4960 } 4961 4962 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id, 4963 iwm_desc_lookup(table.error_id)); 4964 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1); 4965 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2); 4966 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n", 4967 table.ilink1); 4968 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n", 4969 table.ilink2); 4970 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1); 4971 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2); 4972 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3); 4973 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major); 4974 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor); 4975 device_printf(sc->sc_dev, "0x%08X | frame pointer\n", 4976 table.frame_pointer); 4977 device_printf(sc->sc_dev, "0x%08X | stack pointer\n", 4978 table.stack_pointer); 4979 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header); 4980 device_printf(sc->sc_dev, "0x%08X | isr status reg\n", 4981 table.nic_isr_pref); 4982 } 4983 4984 /* 4985 * Support for dumping the error log seemed like a good idea ... 4986 * but it's mostly hex junk and the only sensible thing is the 4987 * hw/ucode revision (which we know anyway). Since it's here, 4988 * I'll just leave it in, just in case e.g. the Intel guys want to 4989 * help us decipher some "ADVANCED_SYSASSERT" later. 4990 */ 4991 static void 4992 iwm_nic_error(struct iwm_softc *sc) 4993 { 4994 struct iwm_error_event_table table; 4995 uint32_t base; 4996 4997 device_printf(sc->sc_dev, "dumping device error log\n"); 4998 base = sc->sc_uc.uc_error_event_table; 4999 if (base < 0x800000) { 5000 device_printf(sc->sc_dev, 5001 "Invalid error log pointer 0x%08x\n", base); 5002 return; 5003 } 5004 5005 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5006 device_printf(sc->sc_dev, "reading errlog failed\n"); 5007 return; 5008 } 5009 5010 if (!table.valid) { 5011 device_printf(sc->sc_dev, "errlog not found, skipping\n"); 5012 return; 5013 } 5014 5015 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5016 device_printf(sc->sc_dev, "Start Error Log Dump:\n"); 5017 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5018 sc->sc_flags, table.valid); 5019 } 5020 5021 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id, 5022 iwm_desc_lookup(table.error_id)); 5023 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n", 5024 table.trm_hw_status0); 5025 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n", 5026 table.trm_hw_status1); 5027 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2); 5028 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1); 5029 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2); 5030 device_printf(sc->sc_dev, "%08X | data1\n", table.data1); 5031 device_printf(sc->sc_dev, "%08X | data2\n", table.data2); 5032 device_printf(sc->sc_dev, "%08X | data3\n", table.data3); 5033 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time); 5034 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low); 5035 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi); 5036 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1); 5037 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2); 5038 device_printf(sc->sc_dev, "%08X | uCode revision type\n", 5039 table.fw_rev_type); 5040 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major); 5041 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor); 5042 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver); 5043 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver); 5044 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd); 5045 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0); 5046 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1); 5047 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2); 5048 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3); 5049 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4); 5050 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id); 5051 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event); 5052 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control); 5053 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration); 5054 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid); 5055 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match); 5056 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 5057 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp); 5058 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler); 5059 5060 if (sc->sc_uc.uc_umac_error_event_table) 5061 iwm_nic_umac_error(sc); 5062 } 5063 #endif 5064 5065 #define SYNC_RESP_STRUCT(_var_, _pkt_) \ 5066 do { \ 5067 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\ 5068 _var_ = (void *)((_pkt_)+1); \ 5069 } while (/*CONSTCOND*/0) 5070 5071 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \ 5072 do { \ 5073 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\ 5074 _ptr_ = (void *)((_pkt_)+1); \ 5075 } while (/*CONSTCOND*/0) 5076 5077 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT); 5078 5079 /* 5080 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt. 5081 * Basic structure from if_iwn 5082 */ 5083 static void 5084 iwm_notif_intr(struct iwm_softc *sc) 5085 { 5086 uint16_t hw; 5087 5088 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 5089 BUS_DMASYNC_POSTREAD); 5090 5091 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff; 5092 5093 /* 5094 * Process responses 5095 */ 5096 while (sc->rxq.cur != hw) { 5097 struct iwm_rx_ring *ring = &sc->rxq; 5098 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 5099 struct iwm_rx_packet *pkt; 5100 struct iwm_cmd_response *cresp; 5101 int qid, idx, code; 5102 5103 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 5104 BUS_DMASYNC_POSTREAD); 5105 pkt = mtod(data->m, struct iwm_rx_packet *); 5106 5107 qid = pkt->hdr.qid & ~0x80; 5108 idx = pkt->hdr.idx; 5109 5110 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 5111 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5112 "rx packet qid=%d idx=%d type=%x %d %d\n", 5113 pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw); 5114 5115 /* 5116 * randomly get these from the firmware, no idea why. 5117 * they at least seem harmless, so just ignore them for now 5118 */ 5119 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0) 5120 || pkt->len_n_flags == htole32(0x55550000))) { 5121 ADVANCE_RXQ(sc); 5122 continue; 5123 } 5124 5125 switch (code) { 5126 case IWM_REPLY_RX_PHY_CMD: 5127 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data); 5128 break; 5129 5130 case IWM_REPLY_RX_MPDU_CMD: 5131 iwm_mvm_rx_rx_mpdu(sc, pkt, data); 5132 break; 5133 5134 case IWM_TX_CMD: 5135 iwm_mvm_rx_tx_cmd(sc, pkt, data); 5136 break; 5137 5138 case IWM_MISSED_BEACONS_NOTIFICATION: { 5139 struct iwm_missed_beacons_notif *resp; 5140 int missed; 5141 5142 /* XXX look at mac_id to determine interface ID */ 5143 struct ieee80211com *ic = &sc->sc_ic; 5144 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5145 5146 SYNC_RESP_STRUCT(resp, pkt); 5147 missed = le32toh(resp->consec_missed_beacons); 5148 5149 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE, 5150 "%s: MISSED_BEACON: mac_id=%d, " 5151 "consec_since_last_rx=%d, consec=%d, num_expect=%d " 5152 "num_rx=%d\n", 5153 __func__, 5154 le32toh(resp->mac_id), 5155 le32toh(resp->consec_missed_beacons_since_last_rx), 5156 le32toh(resp->consec_missed_beacons), 5157 le32toh(resp->num_expected_beacons), 5158 le32toh(resp->num_recvd_beacons)); 5159 5160 /* Be paranoid */ 5161 if (vap == NULL) 5162 break; 5163 5164 /* XXX no net80211 locking? */ 5165 if (vap->iv_state == IEEE80211_S_RUN && 5166 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 5167 if (missed > vap->iv_bmissthreshold) { 5168 /* XXX bad locking; turn into task */ 5169 IWM_UNLOCK(sc); 5170 ieee80211_beacon_miss(ic); 5171 IWM_LOCK(sc); 5172 } 5173 } 5174 5175 break; } 5176 5177 case IWM_MFUART_LOAD_NOTIFICATION: 5178 break; 5179 5180 case IWM_MVM_ALIVE: { 5181 struct iwm_mvm_alive_resp_v1 *resp1; 5182 struct iwm_mvm_alive_resp_v2 *resp2; 5183 struct iwm_mvm_alive_resp_v3 *resp3; 5184 5185 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) { 5186 SYNC_RESP_STRUCT(resp1, pkt); 5187 sc->sc_uc.uc_error_event_table 5188 = le32toh(resp1->error_event_table_ptr); 5189 sc->sc_uc.uc_log_event_table 5190 = le32toh(resp1->log_event_table_ptr); 5191 sc->sched_base = le32toh(resp1->scd_base_ptr); 5192 if (resp1->status == IWM_ALIVE_STATUS_OK) 5193 sc->sc_uc.uc_ok = 1; 5194 else 5195 sc->sc_uc.uc_ok = 0; 5196 } 5197 5198 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) { 5199 SYNC_RESP_STRUCT(resp2, pkt); 5200 sc->sc_uc.uc_error_event_table 5201 = le32toh(resp2->error_event_table_ptr); 5202 sc->sc_uc.uc_log_event_table 5203 = le32toh(resp2->log_event_table_ptr); 5204 sc->sched_base = le32toh(resp2->scd_base_ptr); 5205 sc->sc_uc.uc_umac_error_event_table 5206 = le32toh(resp2->error_info_addr); 5207 if (resp2->status == IWM_ALIVE_STATUS_OK) 5208 sc->sc_uc.uc_ok = 1; 5209 else 5210 sc->sc_uc.uc_ok = 0; 5211 } 5212 5213 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) { 5214 SYNC_RESP_STRUCT(resp3, pkt); 5215 sc->sc_uc.uc_error_event_table 5216 = le32toh(resp3->error_event_table_ptr); 5217 sc->sc_uc.uc_log_event_table 5218 = le32toh(resp3->log_event_table_ptr); 5219 sc->sched_base = le32toh(resp3->scd_base_ptr); 5220 sc->sc_uc.uc_umac_error_event_table 5221 = le32toh(resp3->error_info_addr); 5222 if (resp3->status == IWM_ALIVE_STATUS_OK) 5223 sc->sc_uc.uc_ok = 1; 5224 else 5225 sc->sc_uc.uc_ok = 0; 5226 } 5227 5228 sc->sc_uc.uc_intr = 1; 5229 wakeup(&sc->sc_uc); 5230 break; } 5231 5232 case IWM_CALIB_RES_NOTIF_PHY_DB: { 5233 struct iwm_calib_res_notif_phy_db *phy_db_notif; 5234 SYNC_RESP_STRUCT(phy_db_notif, pkt); 5235 5236 iwm_phy_db_set_section(sc, phy_db_notif); 5237 5238 break; } 5239 5240 case IWM_STATISTICS_NOTIFICATION: { 5241 struct iwm_notif_statistics *stats; 5242 SYNC_RESP_STRUCT(stats, pkt); 5243 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats)); 5244 sc->sc_noise = iwm_get_noise(&stats->rx.general); 5245 break; } 5246 5247 case IWM_NVM_ACCESS_CMD: 5248 case IWM_MCC_UPDATE_CMD: 5249 if (sc->sc_wantresp == ((qid << 16) | idx)) { 5250 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 5251 BUS_DMASYNC_POSTREAD); 5252 memcpy(sc->sc_cmd_resp, 5253 pkt, sizeof(sc->sc_cmd_resp)); 5254 } 5255 break; 5256 5257 case IWM_MCC_CHUB_UPDATE_CMD: { 5258 struct iwm_mcc_chub_notif *notif; 5259 SYNC_RESP_STRUCT(notif, pkt); 5260 5261 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8; 5262 sc->sc_fw_mcc[1] = notif->mcc & 0xff; 5263 sc->sc_fw_mcc[2] = '\0'; 5264 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 5265 "fw source %d sent CC '%s'\n", 5266 notif->source_id, sc->sc_fw_mcc); 5267 break; } 5268 5269 case IWM_DTS_MEASUREMENT_NOTIFICATION: 5270 break; 5271 5272 case IWM_PHY_CONFIGURATION_CMD: 5273 case IWM_TX_ANT_CONFIGURATION_CMD: 5274 case IWM_ADD_STA: 5275 case IWM_MAC_CONTEXT_CMD: 5276 case IWM_REPLY_SF_CFG_CMD: 5277 case IWM_POWER_TABLE_CMD: 5278 case IWM_PHY_CONTEXT_CMD: 5279 case IWM_BINDING_CONTEXT_CMD: 5280 case IWM_TIME_EVENT_CMD: 5281 case IWM_SCAN_REQUEST_CMD: 5282 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD): 5283 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC): 5284 case IWM_SCAN_OFFLOAD_REQUEST_CMD: 5285 case IWM_REPLY_BEACON_FILTERING_CMD: 5286 case IWM_MAC_PM_POWER_TABLE: 5287 case IWM_TIME_QUOTA_CMD: 5288 case IWM_REMOVE_STA: 5289 case IWM_TXPATH_FLUSH: 5290 case IWM_LQ_CMD: 5291 case IWM_BT_CONFIG: 5292 case IWM_REPLY_THERMAL_MNG_BACKOFF: 5293 SYNC_RESP_STRUCT(cresp, pkt); 5294 if (sc->sc_wantresp == ((qid << 16) | idx)) { 5295 memcpy(sc->sc_cmd_resp, 5296 pkt, sizeof(*pkt)+sizeof(*cresp)); 5297 } 5298 break; 5299 5300 /* ignore */ 5301 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */ 5302 break; 5303 5304 case IWM_INIT_COMPLETE_NOTIF: 5305 sc->sc_init_complete = 1; 5306 wakeup(&sc->sc_init_complete); 5307 break; 5308 5309 case IWM_SCAN_OFFLOAD_COMPLETE: { 5310 struct iwm_periodic_scan_complete *notif; 5311 SYNC_RESP_STRUCT(notif, pkt); 5312 5313 break; } 5314 5315 case IWM_SCAN_ITERATION_COMPLETE: { 5316 struct iwm_lmac_scan_complete_notif *notif; 5317 SYNC_RESP_STRUCT(notif, pkt); 5318 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task); 5319 break; } 5320 5321 case IWM_SCAN_COMPLETE_UMAC: { 5322 struct iwm_umac_scan_complete *notif; 5323 SYNC_RESP_STRUCT(notif, pkt); 5324 5325 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, 5326 "UMAC scan complete, status=0x%x\n", 5327 notif->status); 5328 #if 0 /* XXX This would be a duplicate scan end call */ 5329 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task); 5330 #endif 5331 break; 5332 } 5333 5334 case IWM_SCAN_ITERATION_COMPLETE_UMAC: { 5335 struct iwm_umac_scan_iter_complete_notif *notif; 5336 SYNC_RESP_STRUCT(notif, pkt); 5337 5338 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration " 5339 "complete, status=0x%x, %d channels scanned\n", 5340 notif->status, notif->scanned_channels); 5341 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task); 5342 break; 5343 } 5344 5345 case IWM_REPLY_ERROR: { 5346 struct iwm_error_resp *resp; 5347 SYNC_RESP_STRUCT(resp, pkt); 5348 5349 device_printf(sc->sc_dev, 5350 "firmware error 0x%x, cmd 0x%x\n", 5351 le32toh(resp->error_type), 5352 resp->cmd_id); 5353 break; } 5354 5355 case IWM_TIME_EVENT_NOTIFICATION: { 5356 struct iwm_time_event_notif *notif; 5357 SYNC_RESP_STRUCT(notif, pkt); 5358 5359 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5360 "TE notif status = 0x%x action = 0x%x\n", 5361 notif->status, notif->action); 5362 break; } 5363 5364 case IWM_MCAST_FILTER_CMD: 5365 break; 5366 5367 case IWM_SCD_QUEUE_CFG: { 5368 struct iwm_scd_txq_cfg_rsp *rsp; 5369 SYNC_RESP_STRUCT(rsp, pkt); 5370 5371 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 5372 "queue cfg token=0x%x sta_id=%d " 5373 "tid=%d scd_queue=%d\n", 5374 rsp->token, rsp->sta_id, rsp->tid, 5375 rsp->scd_queue); 5376 break; 5377 } 5378 5379 default: 5380 device_printf(sc->sc_dev, 5381 "frame %d/%d %x UNHANDLED (this should " 5382 "not happen)\n", qid, idx, 5383 pkt->len_n_flags); 5384 break; 5385 } 5386 5387 /* 5388 * Why test bit 0x80? The Linux driver: 5389 * 5390 * There is one exception: uCode sets bit 15 when it 5391 * originates the response/notification, i.e. when the 5392 * response/notification is not a direct response to a 5393 * command sent by the driver. For example, uCode issues 5394 * IWM_REPLY_RX when it sends a received frame to the driver; 5395 * it is not a direct response to any driver command. 5396 * 5397 * Ok, so since when is 7 == 15? Well, the Linux driver 5398 * uses a slightly different format for pkt->hdr, and "qid" 5399 * is actually the upper byte of a two-byte field. 5400 */ 5401 if (!(pkt->hdr.qid & (1 << 7))) { 5402 iwm_cmd_done(sc, pkt); 5403 } 5404 5405 ADVANCE_RXQ(sc); 5406 } 5407 5408 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL, 5409 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 5410 5411 /* 5412 * Tell the firmware what we have processed. 5413 * Seems like the hardware gets upset unless we align 5414 * the write by 8?? 5415 */ 5416 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1; 5417 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7); 5418 } 5419 5420 static void 5421 iwm_intr(void *arg) 5422 { 5423 struct iwm_softc *sc = arg; 5424 int handled = 0; 5425 int r1, r2, rv = 0; 5426 int isperiodic = 0; 5427 5428 #if defined(__DragonFly__) 5429 if (sc->sc_mem == NULL) { 5430 kprintf("iwm_intr: detached\n"); 5431 return; 5432 } 5433 #endif 5434 IWM_LOCK(sc); 5435 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 5436 5437 if (sc->sc_flags & IWM_FLAG_USE_ICT) { 5438 uint32_t *ict = sc->ict_dma.vaddr; 5439 int tmp; 5440 5441 tmp = htole32(ict[sc->ict_cur]); 5442 if (!tmp) 5443 goto out_ena; 5444 5445 /* 5446 * ok, there was something. keep plowing until we have all. 5447 */ 5448 r1 = r2 = 0; 5449 while (tmp) { 5450 r1 |= tmp; 5451 ict[sc->ict_cur] = 0; 5452 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT; 5453 tmp = htole32(ict[sc->ict_cur]); 5454 } 5455 5456 /* this is where the fun begins. don't ask */ 5457 if (r1 == 0xffffffff) 5458 r1 = 0; 5459 5460 /* i am not expected to understand this */ 5461 if (r1 & 0xc0000) 5462 r1 |= 0x8000; 5463 r1 = (0xff & r1) | ((0xff00 & r1) << 16); 5464 } else { 5465 r1 = IWM_READ(sc, IWM_CSR_INT); 5466 /* "hardware gone" (where, fishing?) */ 5467 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 5468 goto out; 5469 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS); 5470 } 5471 if (r1 == 0 && r2 == 0) { 5472 goto out_ena; 5473 } 5474 5475 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask); 5476 5477 /* ignored */ 5478 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/)); 5479 5480 if (r1 & IWM_CSR_INT_BIT_SW_ERR) { 5481 int i; 5482 struct ieee80211com *ic = &sc->sc_ic; 5483 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5484 5485 #ifdef IWM_DEBUG 5486 iwm_nic_error(sc); 5487 #endif 5488 /* Dump driver status (TX and RX rings) while we're here. */ 5489 device_printf(sc->sc_dev, "driver status:\n"); 5490 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) { 5491 struct iwm_tx_ring *ring = &sc->txq[i]; 5492 device_printf(sc->sc_dev, 5493 " tx ring %2d: qid=%-2d cur=%-3d " 5494 "queued=%-3d\n", 5495 i, ring->qid, ring->cur, ring->queued); 5496 } 5497 device_printf(sc->sc_dev, 5498 " rx ring: cur=%d\n", sc->rxq.cur); 5499 device_printf(sc->sc_dev, 5500 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state); 5501 5502 /* Don't stop the device; just do a VAP restart */ 5503 IWM_UNLOCK(sc); 5504 5505 if (vap == NULL) { 5506 kprintf("%s: null vap\n", __func__); 5507 return; 5508 } 5509 5510 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 5511 "restarting\n", __func__, vap->iv_state); 5512 5513 /* XXX TODO: turn this into a callout/taskqueue */ 5514 ieee80211_restart_all(ic); 5515 return; 5516 } 5517 5518 if (r1 & IWM_CSR_INT_BIT_HW_ERR) { 5519 handled |= IWM_CSR_INT_BIT_HW_ERR; 5520 device_printf(sc->sc_dev, "hardware error, stopping device\n"); 5521 iwm_stop(sc); 5522 rv = 1; 5523 goto out; 5524 } 5525 5526 /* firmware chunk loaded */ 5527 if (r1 & IWM_CSR_INT_BIT_FH_TX) { 5528 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK); 5529 handled |= IWM_CSR_INT_BIT_FH_TX; 5530 sc->sc_fw_chunk_done = 1; 5531 wakeup(&sc->sc_fw); 5532 } 5533 5534 if (r1 & IWM_CSR_INT_BIT_RF_KILL) { 5535 handled |= IWM_CSR_INT_BIT_RF_KILL; 5536 if (iwm_check_rfkill(sc)) { 5537 device_printf(sc->sc_dev, 5538 "%s: rfkill switch, disabling interface\n", 5539 __func__); 5540 iwm_stop(sc); 5541 } 5542 } 5543 5544 /* 5545 * The Linux driver uses periodic interrupts to avoid races. 5546 * We cargo-cult like it's going out of fashion. 5547 */ 5548 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) { 5549 handled |= IWM_CSR_INT_BIT_RX_PERIODIC; 5550 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC); 5551 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0) 5552 IWM_WRITE_1(sc, 5553 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS); 5554 isperiodic = 1; 5555 } 5556 5557 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) { 5558 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX); 5559 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK); 5560 5561 iwm_notif_intr(sc); 5562 5563 /* enable periodic interrupt, see above */ 5564 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic) 5565 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, 5566 IWM_CSR_INT_PERIODIC_ENA); 5567 } 5568 5569 if (__predict_false(r1 & ~handled)) 5570 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5571 "%s: unhandled interrupts: %x\n", __func__, r1); 5572 rv = 1; 5573 5574 out_ena: 5575 iwm_restore_interrupts(sc); 5576 out: 5577 IWM_UNLOCK(sc); 5578 return; 5579 } 5580 5581 /* 5582 * Autoconf glue-sniffing 5583 */ 5584 #define PCI_VENDOR_INTEL 0x8086 5585 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3 5586 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4 5587 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165 5588 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166 5589 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1 5590 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2 5591 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a 5592 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b 5593 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3 5594 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4 5595 5596 static const struct iwm_devices { 5597 uint16_t device; 5598 const char *name; 5599 } iwm_devices[] = { 5600 { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" }, 5601 { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" }, 5602 { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" }, 5603 { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" }, 5604 { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" }, 5605 { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" }, 5606 { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" }, 5607 { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" }, 5608 { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" }, 5609 { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" }, 5610 }; 5611 5612 static int 5613 iwm_probe(device_t dev) 5614 { 5615 int i; 5616 5617 for (i = 0; i < nitems(iwm_devices); i++) { 5618 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL && 5619 pci_get_device(dev) == iwm_devices[i].device) { 5620 device_set_desc(dev, iwm_devices[i].name); 5621 return (BUS_PROBE_DEFAULT); 5622 } 5623 } 5624 5625 return (ENXIO); 5626 } 5627 5628 static int 5629 iwm_dev_check(device_t dev) 5630 { 5631 struct iwm_softc *sc; 5632 5633 sc = device_get_softc(dev); 5634 5635 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV); 5636 switch (pci_get_device(dev)) { 5637 case PCI_PRODUCT_INTEL_WL_3160_1: 5638 case PCI_PRODUCT_INTEL_WL_3160_2: 5639 sc->sc_fwname = "iwm3160fw"; 5640 sc->host_interrupt_operation_mode = 1; 5641 sc->sc_device_family = IWM_DEVICE_FAMILY_7000; 5642 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ; 5643 return (0); 5644 case PCI_PRODUCT_INTEL_WL_3165_1: 5645 case PCI_PRODUCT_INTEL_WL_3165_2: 5646 sc->sc_fwname = "iwm7265fw"; 5647 sc->host_interrupt_operation_mode = 0; 5648 sc->sc_device_family = IWM_DEVICE_FAMILY_7000; 5649 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ; 5650 return (0); 5651 case PCI_PRODUCT_INTEL_WL_7260_1: 5652 case PCI_PRODUCT_INTEL_WL_7260_2: 5653 sc->sc_fwname = "iwm7260fw"; 5654 sc->host_interrupt_operation_mode = 1; 5655 sc->sc_device_family = IWM_DEVICE_FAMILY_7000; 5656 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ; 5657 return (0); 5658 case PCI_PRODUCT_INTEL_WL_7265_1: 5659 case PCI_PRODUCT_INTEL_WL_7265_2: 5660 sc->sc_fwname = "iwm7265fw"; 5661 sc->host_interrupt_operation_mode = 0; 5662 sc->sc_device_family = IWM_DEVICE_FAMILY_7000; 5663 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ; 5664 return (0); 5665 case PCI_PRODUCT_INTEL_WL_8260_1: 5666 case PCI_PRODUCT_INTEL_WL_8260_2: 5667 sc->sc_fwname = "iwm8000Cfw"; 5668 sc->host_interrupt_operation_mode = 0; 5669 sc->sc_device_family = IWM_DEVICE_FAMILY_8000; 5670 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000; 5671 return (0); 5672 default: 5673 device_printf(dev, "unknown adapter type\n"); 5674 return ENXIO; 5675 } 5676 } 5677 5678 static int 5679 iwm_pci_attach(device_t dev) 5680 { 5681 struct iwm_softc *sc; 5682 int count, error, rid; 5683 uint16_t reg; 5684 #if defined(__DragonFly__) 5685 int irq_flags; 5686 #endif 5687 5688 sc = device_get_softc(dev); 5689 5690 /* Clear device-specific "PCI retry timeout" register (41h). */ 5691 reg = pci_read_config(dev, 0x40, sizeof(reg)); 5692 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg)); 5693 5694 /* Enable bus-mastering and hardware bug workaround. */ 5695 pci_enable_busmaster(dev); 5696 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg)); 5697 /* if !MSI */ 5698 if (reg & PCIM_STATUS_INTxSTATE) { 5699 reg &= ~PCIM_STATUS_INTxSTATE; 5700 } 5701 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg)); 5702 5703 rid = PCIR_BAR(0); 5704 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 5705 RF_ACTIVE); 5706 if (sc->sc_mem == NULL) { 5707 device_printf(sc->sc_dev, "can't map mem space\n"); 5708 return (ENXIO); 5709 } 5710 sc->sc_st = rman_get_bustag(sc->sc_mem); 5711 sc->sc_sh = rman_get_bushandle(sc->sc_mem); 5712 5713 /* Install interrupt handler. */ 5714 count = 1; 5715 rid = 0; 5716 #if defined(__DragonFly__) 5717 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags); 5718 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags); 5719 #else 5720 if (pci_alloc_msi(dev, &count) == 0) 5721 rid = 1; 5722 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 5723 (rid != 0 ? 0 : RF_SHAREABLE)); 5724 #endif 5725 if (sc->sc_irq == NULL) { 5726 device_printf(dev, "can't map interrupt\n"); 5727 return (ENXIO); 5728 } 5729 #if defined(__DragonFly__) 5730 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE, 5731 iwm_intr, sc, &sc->sc_ih, 5732 &wlan_global_serializer); 5733 #else 5734 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 5735 NULL, iwm_intr, sc, &sc->sc_ih); 5736 #endif 5737 if (sc->sc_ih == NULL) { 5738 device_printf(dev, "can't establish interrupt"); 5739 #if defined(__DragonFly__) 5740 pci_release_msi(dev); 5741 #endif 5742 return (ENXIO); 5743 } 5744 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev); 5745 5746 return (0); 5747 } 5748 5749 static void 5750 iwm_pci_detach(device_t dev) 5751 { 5752 struct iwm_softc *sc = device_get_softc(dev); 5753 5754 if (sc->sc_irq != NULL) { 5755 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 5756 bus_release_resource(dev, SYS_RES_IRQ, 5757 rman_get_rid(sc->sc_irq), sc->sc_irq); 5758 pci_release_msi(dev); 5759 #if defined(__DragonFly__) 5760 sc->sc_irq = NULL; 5761 #endif 5762 } 5763 if (sc->sc_mem != NULL) { 5764 bus_release_resource(dev, SYS_RES_MEMORY, 5765 rman_get_rid(sc->sc_mem), sc->sc_mem); 5766 #if defined(__DragonFly__) 5767 sc->sc_mem = NULL; 5768 #endif 5769 } 5770 } 5771 5772 5773 5774 static int 5775 iwm_attach(device_t dev) 5776 { 5777 struct iwm_softc *sc = device_get_softc(dev); 5778 struct ieee80211com *ic = &sc->sc_ic; 5779 int error; 5780 int txq_i, i; 5781 5782 sc->sc_dev = dev; 5783 IWM_LOCK_INIT(sc); 5784 mbufq_init(&sc->sc_snd, ifqmaxlen); 5785 #if defined(__DragonFly__) 5786 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk); 5787 #else 5788 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0); 5789 #endif 5790 callout_init(&sc->sc_led_blink_to); 5791 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc); 5792 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK, 5793 taskqueue_thread_enqueue, &sc->sc_tq); 5794 #if defined(__DragonFly__) 5795 error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, 5796 -1, "iwm_taskq"); 5797 #else 5798 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq"); 5799 #endif 5800 if (error != 0) { 5801 device_printf(dev, "can't start threads, error %d\n", 5802 error); 5803 goto fail; 5804 } 5805 5806 /* PCI attach */ 5807 error = iwm_pci_attach(dev); 5808 if (error != 0) 5809 goto fail; 5810 5811 sc->sc_wantresp = -1; 5812 5813 /* Check device type */ 5814 error = iwm_dev_check(dev); 5815 if (error != 0) 5816 goto fail; 5817 5818 /* 5819 * We now start fiddling with the hardware 5820 */ 5821 /* 5822 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 5823 * changed, and now the revision step also includes bit 0-1 (no more 5824 * "dash" value). To keep hw_rev backwards compatible - we'll store it 5825 * in the old format. 5826 */ 5827 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) 5828 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) | 5829 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2); 5830 5831 if (iwm_prepare_card_hw(sc) != 0) { 5832 device_printf(dev, "could not initialize hardware\n"); 5833 goto fail; 5834 } 5835 5836 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) { 5837 int ret; 5838 uint32_t hw_step; 5839 5840 /* 5841 * In order to recognize C step the driver should read the 5842 * chip version id located at the AUX bus MISC address. 5843 */ 5844 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, 5845 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 5846 DELAY(2); 5847 5848 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL, 5849 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 5850 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 5851 25000); 5852 if (ret < 0) { 5853 device_printf(sc->sc_dev, 5854 "Failed to wake up the nic\n"); 5855 goto fail; 5856 } 5857 5858 if (iwm_nic_lock(sc)) { 5859 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG); 5860 hw_step |= IWM_ENABLE_WFPM; 5861 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step); 5862 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG); 5863 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF; 5864 if (hw_step == 0x3) 5865 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) | 5866 (IWM_SILICON_C_STEP << 2); 5867 iwm_nic_unlock(sc); 5868 } else { 5869 device_printf(sc->sc_dev, "Failed to lock the nic\n"); 5870 goto fail; 5871 } 5872 } 5873 5874 /* Allocate DMA memory for firmware transfers. */ 5875 if ((error = iwm_alloc_fwmem(sc)) != 0) { 5876 device_printf(dev, "could not allocate memory for firmware\n"); 5877 goto fail; 5878 } 5879 5880 /* Allocate "Keep Warm" page. */ 5881 if ((error = iwm_alloc_kw(sc)) != 0) { 5882 device_printf(dev, "could not allocate keep warm page\n"); 5883 goto fail; 5884 } 5885 5886 /* We use ICT interrupts */ 5887 if ((error = iwm_alloc_ict(sc)) != 0) { 5888 device_printf(dev, "could not allocate ICT table\n"); 5889 goto fail; 5890 } 5891 5892 /* Allocate TX scheduler "rings". */ 5893 if ((error = iwm_alloc_sched(sc)) != 0) { 5894 device_printf(dev, "could not allocate TX scheduler rings\n"); 5895 goto fail; 5896 } 5897 5898 /* Allocate TX rings */ 5899 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) { 5900 if ((error = iwm_alloc_tx_ring(sc, 5901 &sc->txq[txq_i], txq_i)) != 0) { 5902 device_printf(dev, 5903 "could not allocate TX ring %d\n", 5904 txq_i); 5905 goto fail; 5906 } 5907 } 5908 5909 /* Allocate RX ring. */ 5910 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) { 5911 device_printf(dev, "could not allocate RX ring\n"); 5912 goto fail; 5913 } 5914 5915 /* Clear pending interrupts. */ 5916 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff); 5917 5918 ic->ic_softc = sc; 5919 ic->ic_name = device_get_nameunit(sc->sc_dev); 5920 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 5921 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 5922 5923 /* Set device capabilities. */ 5924 ic->ic_caps = 5925 IEEE80211_C_STA | 5926 IEEE80211_C_WPA | /* WPA/RSN */ 5927 IEEE80211_C_WME | 5928 IEEE80211_C_SHSLOT | /* short slot time supported */ 5929 IEEE80211_C_SHPREAMBLE /* short preamble supported */ 5930 // IEEE80211_C_BGSCAN /* capable of bg scanning */ 5931 ; 5932 for (i = 0; i < nitems(sc->sc_phyctxt); i++) { 5933 sc->sc_phyctxt[i].id = i; 5934 sc->sc_phyctxt[i].color = 0; 5935 sc->sc_phyctxt[i].ref = 0; 5936 sc->sc_phyctxt[i].channel = NULL; 5937 } 5938 5939 /* Max RSSI */ 5940 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM; 5941 sc->sc_preinit_hook.ich_func = iwm_preinit; 5942 sc->sc_preinit_hook.ich_arg = sc; 5943 sc->sc_preinit_hook.ich_desc = "iwm"; 5944 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) { 5945 device_printf(dev, "config_intrhook_establish failed\n"); 5946 goto fail; 5947 } 5948 5949 #ifdef IWM_DEBUG 5950 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 5951 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 5952 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging"); 5953 #endif 5954 5955 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 5956 "<-%s\n", __func__); 5957 5958 return 0; 5959 5960 /* Free allocated memory if something failed during attachment. */ 5961 fail: 5962 iwm_detach_local(sc, 0); 5963 5964 return ENXIO; 5965 } 5966 5967 static int 5968 iwm_is_valid_ether_addr(uint8_t *addr) 5969 { 5970 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 5971 5972 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr)) 5973 return (FALSE); 5974 5975 return (TRUE); 5976 } 5977 5978 static int 5979 iwm_update_edca(struct ieee80211com *ic) 5980 { 5981 struct iwm_softc *sc = ic->ic_softc; 5982 5983 device_printf(sc->sc_dev, "%s: called\n", __func__); 5984 return (0); 5985 } 5986 5987 static void 5988 iwm_preinit(void *arg) 5989 { 5990 struct iwm_softc *sc = arg; 5991 device_t dev = sc->sc_dev; 5992 struct ieee80211com *ic = &sc->sc_ic; 5993 int error; 5994 5995 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 5996 "->%s\n", __func__); 5997 5998 IWM_LOCK(sc); 5999 if ((error = iwm_start_hw(sc)) != 0) { 6000 device_printf(dev, "could not initialize hardware\n"); 6001 IWM_UNLOCK(sc); 6002 goto fail; 6003 } 6004 6005 error = iwm_run_init_mvm_ucode(sc, 1); 6006 iwm_stop_device(sc); 6007 if (error) { 6008 IWM_UNLOCK(sc); 6009 goto fail; 6010 } 6011 device_printf(dev, 6012 "hw rev 0x%x, fw ver %s, address %s\n", 6013 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, 6014 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr)); 6015 6016 /* not all hardware can do 5GHz band */ 6017 if (!sc->sc_nvm.sku_cap_band_52GHz_enable) 6018 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0, 6019 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])); 6020 IWM_UNLOCK(sc); 6021 6022 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 6023 ic->ic_channels); 6024 6025 /* 6026 * At this point we've committed - if we fail to do setup, 6027 * we now also have to tear down the net80211 state. 6028 */ 6029 ieee80211_ifattach(ic); 6030 ic->ic_vap_create = iwm_vap_create; 6031 ic->ic_vap_delete = iwm_vap_delete; 6032 ic->ic_raw_xmit = iwm_raw_xmit; 6033 ic->ic_node_alloc = iwm_node_alloc; 6034 ic->ic_scan_start = iwm_scan_start; 6035 ic->ic_scan_end = iwm_scan_end; 6036 ic->ic_update_mcast = iwm_update_mcast; 6037 ic->ic_getradiocaps = iwm_init_channel_map; 6038 ic->ic_set_channel = iwm_set_channel; 6039 ic->ic_scan_curchan = iwm_scan_curchan; 6040 ic->ic_scan_mindwell = iwm_scan_mindwell; 6041 ic->ic_wme.wme_update = iwm_update_edca; 6042 ic->ic_parent = iwm_parent; 6043 ic->ic_transmit = iwm_transmit; 6044 iwm_radiotap_attach(sc); 6045 if (bootverbose) 6046 ieee80211_announce(ic); 6047 6048 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6049 "<-%s\n", __func__); 6050 config_intrhook_disestablish(&sc->sc_preinit_hook); 6051 6052 return; 6053 fail: 6054 config_intrhook_disestablish(&sc->sc_preinit_hook); 6055 iwm_detach_local(sc, 0); 6056 } 6057 6058 /* 6059 * Attach the interface to 802.11 radiotap. 6060 */ 6061 static void 6062 iwm_radiotap_attach(struct iwm_softc *sc) 6063 { 6064 struct ieee80211com *ic = &sc->sc_ic; 6065 6066 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6067 "->%s begin\n", __func__); 6068 ieee80211_radiotap_attach(ic, 6069 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 6070 IWM_TX_RADIOTAP_PRESENT, 6071 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 6072 IWM_RX_RADIOTAP_PRESENT); 6073 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6074 "->%s end\n", __func__); 6075 } 6076 6077 static struct ieee80211vap * 6078 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 6079 enum ieee80211_opmode opmode, int flags, 6080 const uint8_t bssid[IEEE80211_ADDR_LEN], 6081 const uint8_t mac[IEEE80211_ADDR_LEN]) 6082 { 6083 struct iwm_vap *ivp; 6084 struct ieee80211vap *vap; 6085 6086 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 6087 return NULL; 6088 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO); 6089 vap = &ivp->iv_vap; 6090 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 6091 vap->iv_bmissthreshold = 10; /* override default */ 6092 /* Override with driver methods. */ 6093 ivp->iv_newstate = vap->iv_newstate; 6094 vap->iv_newstate = iwm_newstate; 6095 6096 ieee80211_ratectl_init(vap); 6097 /* Complete setup. */ 6098 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status, 6099 mac); 6100 ic->ic_opmode = opmode; 6101 6102 return vap; 6103 } 6104 6105 static void 6106 iwm_vap_delete(struct ieee80211vap *vap) 6107 { 6108 struct iwm_vap *ivp = IWM_VAP(vap); 6109 6110 ieee80211_ratectl_deinit(vap); 6111 ieee80211_vap_detach(vap); 6112 kfree(ivp, M_80211_VAP); 6113 } 6114 6115 static void 6116 iwm_scan_start(struct ieee80211com *ic) 6117 { 6118 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6119 struct iwm_softc *sc = ic->ic_softc; 6120 int error; 6121 6122 IWM_LOCK(sc); 6123 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) 6124 error = iwm_mvm_umac_scan(sc); 6125 else 6126 error = iwm_mvm_lmac_scan(sc); 6127 if (error != 0) { 6128 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n"); 6129 IWM_UNLOCK(sc); 6130 ieee80211_cancel_scan(vap); 6131 } else { 6132 iwm_led_blink_start(sc); 6133 IWM_UNLOCK(sc); 6134 } 6135 } 6136 6137 static void 6138 iwm_scan_end(struct ieee80211com *ic) 6139 { 6140 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6141 struct iwm_softc *sc = ic->ic_softc; 6142 6143 IWM_LOCK(sc); 6144 iwm_led_blink_stop(sc); 6145 if (vap->iv_state == IEEE80211_S_RUN) 6146 iwm_mvm_led_enable(sc); 6147 IWM_UNLOCK(sc); 6148 } 6149 6150 static void 6151 iwm_update_mcast(struct ieee80211com *ic) 6152 { 6153 } 6154 6155 static void 6156 iwm_set_channel(struct ieee80211com *ic) 6157 { 6158 } 6159 6160 static void 6161 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 6162 { 6163 } 6164 6165 static void 6166 iwm_scan_mindwell(struct ieee80211_scan_state *ss) 6167 { 6168 return; 6169 } 6170 6171 void 6172 iwm_init_task(void *arg1) 6173 { 6174 struct iwm_softc *sc = arg1; 6175 6176 IWM_LOCK(sc); 6177 while (sc->sc_flags & IWM_FLAG_BUSY) { 6178 #if defined(__DragonFly__) 6179 iwmsleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0); 6180 #else 6181 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0); 6182 #endif 6183 } 6184 sc->sc_flags |= IWM_FLAG_BUSY; 6185 iwm_stop(sc); 6186 if (sc->sc_ic.ic_nrunning > 0) 6187 iwm_init(sc); 6188 sc->sc_flags &= ~IWM_FLAG_BUSY; 6189 wakeup(&sc->sc_flags); 6190 IWM_UNLOCK(sc); 6191 } 6192 6193 static int 6194 iwm_resume(device_t dev) 6195 { 6196 struct iwm_softc *sc = device_get_softc(dev); 6197 int do_reinit = 0; 6198 uint16_t reg; 6199 6200 /* Clear device-specific "PCI retry timeout" register (41h). */ 6201 reg = pci_read_config(dev, 0x40, sizeof(reg)); 6202 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg)); 6203 iwm_init_task(device_get_softc(dev)); 6204 6205 IWM_LOCK(sc); 6206 if (sc->sc_flags & IWM_FLAG_SCANNING) { 6207 sc->sc_flags &= ~IWM_FLAG_SCANNING; 6208 do_reinit = 1; 6209 } 6210 IWM_UNLOCK(sc); 6211 6212 if (do_reinit) 6213 ieee80211_resume_all(&sc->sc_ic); 6214 6215 return 0; 6216 } 6217 6218 static int 6219 iwm_suspend(device_t dev) 6220 { 6221 int do_stop = 0; 6222 struct iwm_softc *sc = device_get_softc(dev); 6223 6224 do_stop = !! (sc->sc_ic.ic_nrunning > 0); 6225 6226 ieee80211_suspend_all(&sc->sc_ic); 6227 6228 if (do_stop) { 6229 IWM_LOCK(sc); 6230 iwm_stop(sc); 6231 sc->sc_flags |= IWM_FLAG_SCANNING; 6232 IWM_UNLOCK(sc); 6233 } 6234 6235 return (0); 6236 } 6237 6238 static int 6239 iwm_detach_local(struct iwm_softc *sc, int do_net80211) 6240 { 6241 struct iwm_fw_info *fw = &sc->sc_fw; 6242 device_t dev = sc->sc_dev; 6243 int i; 6244 6245 if (sc->sc_tq) { 6246 #if defined(__DragonFly__) 6247 /* doesn't exist for DFly, DFly drains tasks on free */ 6248 #else 6249 taskqueue_drain_all(sc->sc_tq); 6250 #endif 6251 taskqueue_free(sc->sc_tq); 6252 #if defined(__DragonFly__) 6253 sc->sc_tq = NULL; 6254 #endif 6255 } 6256 callout_drain(&sc->sc_led_blink_to); 6257 callout_drain(&sc->sc_watchdog_to); 6258 iwm_stop_device(sc); 6259 if (do_net80211) { 6260 ieee80211_ifdetach(&sc->sc_ic); 6261 } 6262 6263 /* Free descriptor rings */ 6264 for (i = 0; i < nitems(sc->txq); i++) 6265 iwm_free_tx_ring(sc, &sc->txq[i]); 6266 6267 /* Free firmware */ 6268 if (fw->fw_fp != NULL) 6269 iwm_fw_info_free(fw); 6270 6271 /* Free scheduler */ 6272 iwm_free_sched(sc); 6273 if (sc->ict_dma.vaddr != NULL) 6274 iwm_free_ict(sc); 6275 if (sc->kw_dma.vaddr != NULL) 6276 iwm_free_kw(sc); 6277 if (sc->fw_dma.vaddr != NULL) 6278 iwm_free_fwmem(sc); 6279 6280 /* Finished with the hardware - detach things */ 6281 iwm_pci_detach(dev); 6282 6283 mbufq_drain(&sc->sc_snd); 6284 IWM_LOCK_DESTROY(sc); 6285 6286 return (0); 6287 } 6288 6289 static int 6290 iwm_detach(device_t dev) 6291 { 6292 struct iwm_softc *sc = device_get_softc(dev); 6293 6294 return (iwm_detach_local(sc, 1)); 6295 } 6296 6297 static device_method_t iwm_pci_methods[] = { 6298 /* Device interface */ 6299 DEVMETHOD(device_probe, iwm_probe), 6300 DEVMETHOD(device_attach, iwm_attach), 6301 DEVMETHOD(device_detach, iwm_detach), 6302 DEVMETHOD(device_suspend, iwm_suspend), 6303 DEVMETHOD(device_resume, iwm_resume), 6304 6305 DEVMETHOD_END 6306 }; 6307 6308 static driver_t iwm_pci_driver = { 6309 "iwm", 6310 iwm_pci_methods, 6311 sizeof (struct iwm_softc) 6312 }; 6313 6314 static devclass_t iwm_devclass; 6315 6316 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL); 6317 MODULE_DEPEND(iwm, firmware, 1, 1, 1); 6318 MODULE_DEPEND(iwm, pci, 1, 1, 1); 6319 MODULE_DEPEND(iwm, wlan, 1, 1, 1); 6320