1 /*- 2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org> 4 * Copyright (c) 2008 Sam Leffler, Errno Consulting 5 * Copyright (c) 2011 Intel Corporation 6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr> 7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 * 21 * $FreeBSD: head/sys/dev/iwn/if_iwn.c 258118 2013-11-14 07:27:00Z adrian $ 22 */ 23 24 /* 25 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 26 * adapters. 27 */ 28 29 #include "opt_wlan.h" 30 #include "opt_iwn.h" 31 32 #include <sys/param.h> 33 #include <sys/sockio.h> 34 #include <sys/sysctl.h> 35 #include <sys/mbuf.h> 36 #include <sys/kernel.h> 37 #include <sys/socket.h> 38 #include <sys/systm.h> 39 #include <sys/malloc.h> 40 #include <sys/stdbool.h> 41 #include <sys/bus.h> 42 #include <sys/rman.h> 43 #include <sys/endian.h> 44 #include <sys/firmware.h> 45 #include <sys/limits.h> 46 #include <sys/module.h> 47 #include <sys/queue.h> 48 #include <sys/taskqueue.h> 49 #include <sys/libkern.h> 50 51 #include <sys/resource.h> 52 #include <machine/clock.h> 53 54 #include <bus/pci/pcireg.h> 55 #include <bus/pci/pcivar.h> 56 57 #include <net/bpf.h> 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_arp.h> 61 #include <net/ifq_var.h> 62 #include <net/ethernet.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #include <net/if_types.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/if_ether.h> 71 #include <netinet/ip.h> 72 73 #include <netproto/802_11/ieee80211_var.h> 74 #include <netproto/802_11/ieee80211_radiotap.h> 75 #include <netproto/802_11/ieee80211_regdomain.h> 76 #include <netproto/802_11/ieee80211_ratectl.h> 77 78 #include "if_iwnreg.h" 79 #include "if_iwnvar.h" 80 #include "if_iwn_devid.h" 81 #include "if_iwn_chip_cfg.h" 82 #include "if_iwn_debug.h" 83 84 #define nitems(ary) (sizeof(ary) / sizeof((ary)[0])) 85 86 struct iwn_ident { 87 uint16_t vendor; 88 uint16_t device; 89 const char *name; 90 }; 91 92 static const struct iwn_ident iwn_ident_table[] = { 93 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, 94 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, 95 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, 96 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, 97 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, 98 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, 99 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, 100 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, 101 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, 102 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, 103 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, 104 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, 105 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 106 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 107 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ 108 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, 109 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, 110 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, 111 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, 112 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, 113 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, 114 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, 115 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, 116 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, 117 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, 118 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, 119 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, 120 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, 121 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, 122 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, 123 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, 124 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, 125 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, 126 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, 127 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, 128 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, 129 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, 130 /* 131 * These currently don't function; the firmware crashes during 132 * the startup calibration request. 133 */ 134 #if 0 135 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, 136 /* XXX TODO: figure out which ID this one is? */ 137 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, 138 #endif 139 { 0, 0, NULL } 140 }; 141 142 static int iwn_pci_probe(device_t); 143 static int iwn_pci_attach(device_t); 144 static int iwn4965_attach(struct iwn_softc *, uint16_t); 145 static int iwn5000_attach(struct iwn_softc *, uint16_t); 146 static int iwn_config_specific(struct iwn_softc *, uint16_t); 147 static void iwn_radiotap_attach(struct iwn_softc *); 148 static void iwn_sysctlattach(struct iwn_softc *); 149 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 150 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 151 const uint8_t [IEEE80211_ADDR_LEN], 152 const uint8_t [IEEE80211_ADDR_LEN]); 153 static void iwn_vap_delete(struct ieee80211vap *); 154 static int iwn_pci_detach(device_t); 155 static int iwn_pci_shutdown(device_t); 156 static int iwn_pci_suspend(device_t); 157 static int iwn_pci_resume(device_t); 158 static int iwn_nic_lock(struct iwn_softc *); 159 static int iwn_eeprom_lock(struct iwn_softc *); 160 static int iwn_init_otprom(struct iwn_softc *); 161 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 162 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 163 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 164 void **, bus_size_t, bus_size_t); 165 static void iwn_dma_contig_free(struct iwn_dma_info *); 166 static int iwn_alloc_sched(struct iwn_softc *); 167 static void iwn_free_sched(struct iwn_softc *); 168 static int iwn_alloc_kw(struct iwn_softc *); 169 static void iwn_free_kw(struct iwn_softc *); 170 static int iwn_alloc_ict(struct iwn_softc *); 171 static void iwn_free_ict(struct iwn_softc *); 172 static int iwn_alloc_fwmem(struct iwn_softc *); 173 static void iwn_free_fwmem(struct iwn_softc *); 174 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 175 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 176 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 177 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 178 int); 179 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 180 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 181 static void iwn5000_ict_reset(struct iwn_softc *); 182 static int iwn_read_eeprom(struct iwn_softc *, 183 uint8_t macaddr[IEEE80211_ADDR_LEN]); 184 static void iwn4965_read_eeprom(struct iwn_softc *); 185 #ifdef IWN_DEBUG 186 static void iwn4965_print_power_group(struct iwn_softc *, int); 187 #endif 188 static void iwn5000_read_eeprom(struct iwn_softc *); 189 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 190 static void iwn_read_eeprom_band(struct iwn_softc *, int); 191 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 192 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 193 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 194 struct ieee80211_channel *); 195 static int iwn_setregdomain(struct ieee80211com *, 196 struct ieee80211_regdomain *, int, 197 struct ieee80211_channel[]); 198 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 199 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 200 const uint8_t mac[IEEE80211_ADDR_LEN]); 201 static void iwn_newassoc(struct ieee80211_node *, int); 202 static int iwn_media_change(struct ifnet *); 203 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 204 static void iwn_calib_timeout(void *); 205 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 206 struct iwn_rx_data *); 207 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 208 struct iwn_rx_data *); 209 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 210 struct iwn_rx_data *); 211 static void iwn5000_rx_calib_results(struct iwn_softc *, 212 struct iwn_rx_desc *, struct iwn_rx_data *); 213 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 214 struct iwn_rx_data *); 215 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 216 struct iwn_rx_data *); 217 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 218 struct iwn_rx_data *); 219 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 220 uint8_t); 221 static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 222 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 223 static void iwn_notif_intr(struct iwn_softc *); 224 static void iwn_wakeup_intr(struct iwn_softc *); 225 static void iwn_rftoggle_intr(struct iwn_softc *); 226 static void iwn_fatal_intr(struct iwn_softc *); 227 static void iwn_intr(void *); 228 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 229 uint16_t); 230 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 231 uint16_t); 232 #ifdef notyet 233 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 234 #endif 235 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 236 struct ieee80211_node *); 237 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 238 struct ieee80211_node *, 239 const struct ieee80211_bpf_params *params); 240 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 241 const struct ieee80211_bpf_params *); 242 static void iwn_start(struct ifnet *, struct ifaltq_subque *); 243 static void iwn_start_locked(struct ifnet *); 244 static void iwn_watchdog_timeout(void *); 245 static int iwn_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 246 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 247 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 248 int); 249 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 250 int); 251 static int iwn_set_link_quality(struct iwn_softc *, 252 struct ieee80211_node *); 253 static int iwn_add_broadcast_node(struct iwn_softc *, int); 254 static int iwn_updateedca(struct ieee80211com *); 255 static void iwn_update_mcast(struct ifnet *); 256 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 257 static int iwn_set_critical_temp(struct iwn_softc *); 258 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 259 static void iwn4965_power_calibration(struct iwn_softc *, int); 260 static int iwn4965_set_txpower(struct iwn_softc *, 261 struct ieee80211_channel *, int); 262 static int iwn5000_set_txpower(struct iwn_softc *, 263 struct ieee80211_channel *, int); 264 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 265 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 266 static int iwn_get_noise(const struct iwn_rx_general_stats *); 267 static int iwn4965_get_temperature(struct iwn_softc *); 268 static int iwn5000_get_temperature(struct iwn_softc *); 269 static int iwn_init_sensitivity(struct iwn_softc *); 270 static void iwn_collect_noise(struct iwn_softc *, 271 const struct iwn_rx_general_stats *); 272 static int iwn4965_init_gains(struct iwn_softc *); 273 static int iwn5000_init_gains(struct iwn_softc *); 274 static int iwn4965_set_gains(struct iwn_softc *); 275 static int iwn5000_set_gains(struct iwn_softc *); 276 static void iwn_tune_sensitivity(struct iwn_softc *, 277 const struct iwn_rx_stats *); 278 static int iwn_send_sensitivity(struct iwn_softc *); 279 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 280 static int iwn_send_btcoex(struct iwn_softc *); 281 static int iwn_send_advanced_btcoex(struct iwn_softc *); 282 static int iwn5000_runtime_calib(struct iwn_softc *); 283 static int iwn_config(struct iwn_softc *); 284 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); 285 static int iwn_scan(struct iwn_softc *); 286 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 287 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 288 static int iwn_ampdu_rx_start(struct ieee80211_node *, 289 struct ieee80211_rx_ampdu *, int, int, int); 290 static void iwn_ampdu_rx_stop(struct ieee80211_node *, 291 struct ieee80211_rx_ampdu *); 292 static int iwn_addba_request(struct ieee80211_node *, 293 struct ieee80211_tx_ampdu *, int, int, int); 294 static int iwn_addba_response(struct ieee80211_node *, 295 struct ieee80211_tx_ampdu *, int, int, int); 296 static int iwn_ampdu_tx_start(struct ieee80211com *, 297 struct ieee80211_node *, uint8_t); 298 static void iwn_ampdu_tx_stop(struct ieee80211_node *, 299 struct ieee80211_tx_ampdu *); 300 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 301 struct ieee80211_node *, int, uint8_t, uint16_t); 302 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 303 uint8_t, uint16_t); 304 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 305 struct ieee80211_node *, int, uint8_t, uint16_t); 306 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 307 uint8_t, uint16_t); 308 static int iwn5000_query_calibration(struct iwn_softc *); 309 static int iwn5000_send_calibration(struct iwn_softc *); 310 static int iwn5000_send_wimax_coex(struct iwn_softc *); 311 static int iwn5000_crystal_calib(struct iwn_softc *); 312 static int iwn5000_temp_offset_calib(struct iwn_softc *); 313 static int iwn5000_temp_offset_calibv2(struct iwn_softc *); 314 static int iwn4965_post_alive(struct iwn_softc *); 315 static int iwn5000_post_alive(struct iwn_softc *); 316 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 317 int); 318 static int iwn4965_load_firmware(struct iwn_softc *); 319 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 320 const uint8_t *, int); 321 static int iwn5000_load_firmware(struct iwn_softc *); 322 static int iwn_read_firmware_leg(struct iwn_softc *, 323 struct iwn_fw_info *); 324 static int iwn_read_firmware_tlv(struct iwn_softc *, 325 struct iwn_fw_info *, uint16_t); 326 static int iwn_read_firmware(struct iwn_softc *); 327 static int iwn_clock_wait(struct iwn_softc *); 328 static int iwn_apm_init(struct iwn_softc *); 329 static void iwn_apm_stop_master(struct iwn_softc *); 330 static void iwn_apm_stop(struct iwn_softc *); 331 static int iwn4965_nic_config(struct iwn_softc *); 332 static int iwn5000_nic_config(struct iwn_softc *); 333 static int iwn_hw_prepare(struct iwn_softc *); 334 static int iwn_hw_init(struct iwn_softc *); 335 static void iwn_hw_stop(struct iwn_softc *); 336 static void iwn_radio_on_task(void *, int); 337 static void iwn_radio_off_task(void *, int); 338 static void iwn_init_locked(struct iwn_softc *); 339 static void iwn_init(void *); 340 static void iwn_stop_locked(struct iwn_softc *); 341 static void iwn_stop(struct iwn_softc *); 342 static void iwn_scan_start(struct ieee80211com *); 343 static void iwn_scan_end(struct ieee80211com *); 344 static void iwn_set_channel(struct ieee80211com *); 345 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 346 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 347 static void iwn_hw_reset_task(void *, int); 348 #ifdef IWN_DEBUG 349 static char *iwn_get_csr_string(int); 350 static void iwn_debug_register(struct iwn_softc *); 351 #endif 352 353 static device_method_t iwn_methods[] = { 354 /* Device interface */ 355 DEVMETHOD(device_probe, iwn_pci_probe), 356 DEVMETHOD(device_attach, iwn_pci_attach), 357 DEVMETHOD(device_detach, iwn_pci_detach), 358 DEVMETHOD(device_shutdown, iwn_pci_shutdown), 359 DEVMETHOD(device_suspend, iwn_pci_suspend), 360 DEVMETHOD(device_resume, iwn_pci_resume), 361 { 0, 0 } 362 }; 363 364 static driver_t iwn_driver = { 365 "iwn", 366 iwn_methods, 367 sizeof(struct iwn_softc) 368 }; 369 static devclass_t iwn_devclass; 370 371 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); 372 373 MODULE_VERSION(iwn, 1); 374 375 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 376 MODULE_DEPEND(iwn, pci, 1, 1, 1); 377 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 378 MODULE_DEPEND(iwn, wlan_amrr, 1, 1, 1); 379 380 static int 381 iwn_pci_probe(device_t dev) 382 { 383 const struct iwn_ident *ident; 384 385 /* no wlan serializer needed */ 386 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 387 if (pci_get_vendor(dev) == ident->vendor && 388 pci_get_device(dev) == ident->device) { 389 device_set_desc(dev, ident->name); 390 return 0; 391 } 392 } 393 return ENXIO; 394 } 395 396 static int 397 iwn_pci_attach(device_t dev) 398 { 399 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 400 struct ieee80211com *ic; 401 struct ifnet *ifp; 402 uint32_t reg; 403 int i, error; 404 #ifdef OLD_MSI 405 int result; 406 #endif 407 uint8_t macaddr[IEEE80211_ADDR_LEN]; 408 char ethstr[ETHER_ADDRSTRLEN + 1]; 409 410 wlan_serialize_enter(); 411 412 sc->sc_dev = dev; 413 sc->sc_dmat = NULL; 414 415 if (bus_dma_tag_create(sc->sc_dmat, 416 1, 0, 417 BUS_SPACE_MAXADDR_32BIT, 418 BUS_SPACE_MAXADDR, 419 NULL, NULL, 420 BUS_SPACE_MAXSIZE, 421 IWN_MAX_SCATTER, 422 BUS_SPACE_MAXSIZE, 423 BUS_DMA_ALLOCNOW, 424 &sc->sc_dmat)) { 425 device_printf(dev, "cannot allocate DMA tag\n"); 426 error = ENOMEM; 427 goto fail; 428 } 429 430 /* prepare sysctl tree for use in sub modules */ 431 sysctl_ctx_init(&sc->sc_sysctl_ctx); 432 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 433 SYSCTL_STATIC_CHILDREN(_hw), 434 OID_AUTO, 435 device_get_nameunit(sc->sc_dev), 436 CTLFLAG_RD, 0, ""); 437 438 #ifdef IWN_DEBUG 439 error = resource_int_value(device_get_name(sc->sc_dev), 440 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 441 if (error != 0) 442 sc->sc_debug = 0; 443 #else 444 sc->sc_debug = 0; 445 #endif 446 447 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 448 449 /* 450 * Get the offset of the PCI Express Capability Structure in PCI 451 * Configuration Space. 452 */ 453 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 454 if (error != 0) { 455 device_printf(dev, "PCIe capability structure not found!\n"); 456 goto fail2; 457 } 458 459 /* Clear device-specific "PCI retry timeout" register (41h). */ 460 pci_write_config(dev, 0x41, 0, 1); 461 462 /* Hardware bug workaround. */ 463 reg = pci_read_config(dev, PCIR_COMMAND, 2); 464 if (reg & PCIM_CMD_INTxDIS) { 465 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n", 466 __func__); 467 reg &= ~PCIM_CMD_INTxDIS; 468 pci_write_config(dev, PCIR_COMMAND, reg, 2); 469 } 470 471 /* Enable bus-mastering. */ 472 pci_enable_busmaster(dev); 473 474 sc->mem_rid = PCIR_BAR(0); 475 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 476 RF_ACTIVE); 477 if (sc->mem == NULL) { 478 device_printf(dev, "can't map mem space\n"); 479 error = ENOMEM; 480 goto fail2; 481 } 482 sc->sc_st = rman_get_bustag(sc->mem); 483 sc->sc_sh = rman_get_bushandle(sc->mem); 484 485 sc->irq_rid = 0; 486 #ifdef OLD_MSI 487 if ((result = pci_msi_count(dev)) == 1 && 488 pci_alloc_msi(dev, &result) == 0) 489 sc->irq_rid = 1; 490 #endif 491 /* Install interrupt handler. */ 492 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, 493 RF_ACTIVE | RF_SHAREABLE); 494 if (sc->irq == NULL) { 495 device_printf(dev, "can't map interrupt\n"); 496 error = ENOMEM; 497 goto fail; 498 } 499 500 /* Read hardware revision and attach. */ 501 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) 502 & IWN_HW_REV_TYPE_MASK; 503 sc->subdevice_id = pci_get_subdevice(dev); 504 505 /* 506 * 4965 versus 5000 and later have different methods. 507 * Let's set those up first. 508 */ 509 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 510 error = iwn4965_attach(sc, pci_get_device(dev)); 511 else 512 error = iwn5000_attach(sc, pci_get_device(dev)); 513 if (error != 0) { 514 device_printf(dev, "could not attach device, error %d\n", 515 error); 516 goto fail; 517 } 518 519 /* 520 * Next, let's setup the various parameters of each NIC. 521 */ 522 error = iwn_config_specific(sc, pci_get_device(dev)); 523 if (error != 0) { 524 device_printf(dev, "could not attach device, error %d\n", 525 error); 526 goto fail; 527 } 528 529 if ((error = iwn_hw_prepare(sc)) != 0) { 530 device_printf(dev, "hardware not ready, error %d\n", error); 531 goto fail; 532 } 533 534 /* Allocate DMA memory for firmware transfers. */ 535 if ((error = iwn_alloc_fwmem(sc)) != 0) { 536 device_printf(dev, 537 "could not allocate memory for firmware, error %d\n", 538 error); 539 goto fail; 540 } 541 542 /* Allocate "Keep Warm" page. */ 543 if ((error = iwn_alloc_kw(sc)) != 0) { 544 device_printf(dev, 545 "could not allocate keep warm page, error %d\n", error); 546 goto fail; 547 } 548 549 /* Allocate ICT table for 5000 Series. */ 550 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 551 (error = iwn_alloc_ict(sc)) != 0) { 552 device_printf(dev, "could not allocate ICT table, error %d\n", 553 error); 554 goto fail; 555 } 556 557 /* Allocate TX scheduler "rings". */ 558 if ((error = iwn_alloc_sched(sc)) != 0) { 559 device_printf(dev, 560 "could not allocate TX scheduler rings, error %d\n", error); 561 goto fail; 562 } 563 564 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 565 for (i = 0; i < sc->ntxqs; i++) { 566 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 567 device_printf(dev, 568 "could not allocate TX ring %d, error %d\n", i, 569 error); 570 goto fail; 571 } 572 } 573 574 /* Allocate RX ring. */ 575 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 576 device_printf(dev, "could not allocate RX ring, error %d\n", 577 error); 578 goto fail; 579 } 580 581 /* Clear pending interrupts. */ 582 IWN_WRITE(sc, IWN_INT, 0xffffffff); 583 584 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 585 if (ifp == NULL) { 586 device_printf(dev, "can not allocate ifnet structure\n"); 587 goto fail; 588 } 589 590 ic = ifp->if_l2com; 591 ic->ic_ifp = ifp; 592 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 593 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 594 595 /* Set device capabilities. */ 596 ic->ic_caps = 597 IEEE80211_C_STA /* station mode supported */ 598 | IEEE80211_C_MONITOR /* monitor mode supported */ 599 | IEEE80211_C_BGSCAN /* background scanning */ 600 | IEEE80211_C_TXPMGT /* tx power management */ 601 | IEEE80211_C_SHSLOT /* short slot time supported */ 602 | IEEE80211_C_WPA 603 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 604 #if 0 605 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 606 #endif 607 | IEEE80211_C_WME /* WME */ 608 | IEEE80211_C_PMGT /* Station-side power mgmt */ 609 ; 610 611 /* Read MAC address, channels, etc from EEPROM. */ 612 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { 613 device_printf(dev, "could not read EEPROM, error %d\n", 614 error); 615 goto fail; 616 } 617 618 /* Count the number of available chains. */ 619 sc->ntxchains = 620 ((sc->txchainmask >> 2) & 1) + 621 ((sc->txchainmask >> 1) & 1) + 622 ((sc->txchainmask >> 0) & 1); 623 sc->nrxchains = 624 ((sc->rxchainmask >> 2) & 1) + 625 ((sc->rxchainmask >> 1) & 1) + 626 ((sc->rxchainmask >> 0) & 1); 627 if (bootverbose) { 628 device_printf(dev, "MIMO %dT%dR, %.4s, address %s\n", 629 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 630 kether_ntoa(macaddr, ethstr)); 631 } 632 633 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 634 #if notyet 635 ic->ic_rxstream = sc->nrxchains; 636 ic->ic_txstream = sc->ntxchains; 637 #endif 638 639 /* 640 * The NICs we currently support cap out at 2x2 support 641 * separate from the chains being used. 642 * 643 * This is a total hack to work around that until some 644 * per-device method is implemented to return the 645 * actual stream support. 646 * 647 * XXX Note: the 5350 is a 3x3 device; so we shouldn't 648 * cap this! But, anything that touches rates in the 649 * driver needs to be audited first before 3x3 is enabled. 650 */ 651 #if notyet 652 if (ic->ic_rxstream > 2) 653 ic->ic_rxstream = 2; 654 if (ic->ic_txstream > 2) 655 ic->ic_txstream = 2; 656 #endif 657 658 ic->ic_htcaps = 659 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 660 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 661 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 662 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 663 #ifdef notyet 664 | IEEE80211_HTCAP_GREENFIELD 665 #if IWN_RBUF_SIZE == 8192 666 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 667 #else 668 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 669 #endif 670 #endif 671 /* s/w capabilities */ 672 | IEEE80211_HTC_HT /* HT operation */ 673 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 674 #ifdef notyet 675 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 676 #endif 677 ; 678 } 679 680 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 681 ifp->if_softc = sc; 682 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 683 ifp->if_init = iwn_init; 684 ifp->if_ioctl = iwn_ioctl; 685 ifp->if_start = iwn_start; 686 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN); 687 #ifdef notyet 688 ifq_set_ready(&ifp->if_snd); 689 #endif 690 691 ieee80211_ifattach(ic, macaddr); 692 ic->ic_vap_create = iwn_vap_create; 693 ic->ic_vap_delete = iwn_vap_delete; 694 ic->ic_raw_xmit = iwn_raw_xmit; 695 ic->ic_node_alloc = iwn_node_alloc; 696 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 697 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 698 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 699 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 700 sc->sc_addba_request = ic->ic_addba_request; 701 ic->ic_addba_request = iwn_addba_request; 702 sc->sc_addba_response = ic->ic_addba_response; 703 ic->ic_addba_response = iwn_addba_response; 704 sc->sc_addba_stop = ic->ic_addba_stop; 705 ic->ic_addba_stop = iwn_ampdu_tx_stop; 706 ic->ic_newassoc = iwn_newassoc; 707 ic->ic_wme.wme_update = iwn_updateedca; 708 ic->ic_update_mcast = iwn_update_mcast; 709 ic->ic_scan_start = iwn_scan_start; 710 ic->ic_scan_end = iwn_scan_end; 711 ic->ic_set_channel = iwn_set_channel; 712 ic->ic_scan_curchan = iwn_scan_curchan; 713 ic->ic_scan_mindwell = iwn_scan_mindwell; 714 ic->ic_setregdomain = iwn_setregdomain; 715 716 iwn_radiotap_attach(sc); 717 718 callout_init(&sc->calib_to); 719 callout_init(&sc->watchdog_to); 720 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset_task, sc); 721 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on_task, sc); 722 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off_task, sc); 723 724 iwn_sysctlattach(sc); 725 726 /* 727 * Hook our interrupt after all initialization is complete. 728 */ 729 error = bus_setup_intr(dev, sc->irq, INTR_MPSAFE, 730 iwn_intr, sc, &sc->sc_ih, 731 &wlan_global_serializer); 732 if (error != 0) { 733 device_printf(dev, "can't establish interrupt, error %d\n", 734 error); 735 goto fail; 736 } 737 738 if (bootverbose) 739 ieee80211_announce(ic); 740 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 741 wlan_serialize_exit(); 742 return 0; 743 fail: 744 wlan_serialize_exit(); 745 iwn_pci_detach(dev); 746 wlan_serialize_enter(); 747 fail2: 748 wlan_serialize_exit(); 749 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 750 return error; 751 } 752 753 /* 754 * Define specific configuration based on device id and subdevice id 755 * pid : PCI device id 756 */ 757 static int 758 iwn_config_specific(struct iwn_softc *sc, uint16_t pid) 759 { 760 761 switch (pid) { 762 /* 4965 series */ 763 case IWN_DID_4965_1: 764 case IWN_DID_4965_2: 765 case IWN_DID_4965_3: 766 case IWN_DID_4965_4: 767 sc->base_params = &iwn4965_base_params; 768 sc->limits = &iwn4965_sensitivity_limits; 769 sc->fwname = "iwn4965fw"; 770 /* Override chains masks, ROM is known to be broken. */ 771 sc->txchainmask = IWN_ANT_AB; 772 sc->rxchainmask = IWN_ANT_ABC; 773 /* Enable normal btcoex */ 774 sc->sc_flags |= IWN_FLAG_BTCOEX; 775 break; 776 /* 1000 Series */ 777 case IWN_DID_1000_1: 778 case IWN_DID_1000_2: 779 switch(sc->subdevice_id) { 780 case IWN_SDID_1000_1: 781 case IWN_SDID_1000_2: 782 case IWN_SDID_1000_3: 783 case IWN_SDID_1000_4: 784 case IWN_SDID_1000_5: 785 case IWN_SDID_1000_6: 786 case IWN_SDID_1000_7: 787 case IWN_SDID_1000_8: 788 case IWN_SDID_1000_9: 789 case IWN_SDID_1000_10: 790 case IWN_SDID_1000_11: 791 case IWN_SDID_1000_12: 792 sc->limits = &iwn1000_sensitivity_limits; 793 sc->base_params = &iwn1000_base_params; 794 sc->fwname = "iwn1000fw"; 795 break; 796 default: 797 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 798 "0x%04x rev %d not supported (subdevice)\n", pid, 799 sc->subdevice_id,sc->hw_type); 800 return ENOTSUP; 801 } 802 break; 803 /* 6x00 Series */ 804 case IWN_DID_6x00_2: 805 case IWN_DID_6x00_4: 806 case IWN_DID_6x00_1: 807 case IWN_DID_6x00_3: 808 sc->fwname = "iwn6000fw"; 809 sc->limits = &iwn6000_sensitivity_limits; 810 switch(sc->subdevice_id) { 811 case IWN_SDID_6x00_1: 812 case IWN_SDID_6x00_2: 813 case IWN_SDID_6x00_8: 814 //iwl6000_3agn_cfg 815 sc->base_params = &iwn_6000_base_params; 816 break; 817 case IWN_SDID_6x00_3: 818 case IWN_SDID_6x00_6: 819 case IWN_SDID_6x00_9: 820 ////iwl6000i_2agn 821 case IWN_SDID_6x00_4: 822 case IWN_SDID_6x00_7: 823 case IWN_SDID_6x00_10: 824 //iwl6000i_2abg_cfg 825 case IWN_SDID_6x00_5: 826 //iwl6000i_2bg_cfg 827 sc->base_params = &iwn_6000i_base_params; 828 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 829 sc->txchainmask = IWN_ANT_BC; 830 sc->rxchainmask = IWN_ANT_BC; 831 break; 832 default: 833 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 834 "0x%04x rev %d not supported (subdevice)\n", pid, 835 sc->subdevice_id,sc->hw_type); 836 return ENOTSUP; 837 } 838 break; 839 /* 6x05 Series */ 840 case IWN_DID_6x05_1: 841 case IWN_DID_6x05_2: 842 switch(sc->subdevice_id) { 843 case IWN_SDID_6x05_1: 844 case IWN_SDID_6x05_4: 845 case IWN_SDID_6x05_6: 846 //iwl6005_2agn_cfg 847 case IWN_SDID_6x05_2: 848 case IWN_SDID_6x05_5: 849 case IWN_SDID_6x05_7: 850 //iwl6005_2abg_cfg 851 case IWN_SDID_6x05_3: 852 //iwl6005_2bg_cfg 853 case IWN_SDID_6x05_8: 854 case IWN_SDID_6x05_9: 855 //iwl6005_2agn_sff_cfg 856 case IWN_SDID_6x05_10: 857 //iwl6005_2agn_d_cfg 858 case IWN_SDID_6x05_11: 859 //iwl6005_2agn_mow1_cfg 860 case IWN_SDID_6x05_12: 861 //iwl6005_2agn_mow2_cfg 862 sc->fwname = "iwn6000g2afw"; 863 sc->limits = &iwn6000_sensitivity_limits; 864 sc->base_params = &iwn_6000g2_base_params; 865 break; 866 default: 867 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 868 "0x%04x rev %d not supported (subdevice)\n", pid, 869 sc->subdevice_id,sc->hw_type); 870 return ENOTSUP; 871 } 872 break; 873 /* 6x35 Series */ 874 case IWN_DID_6035_1: 875 case IWN_DID_6035_2: 876 switch(sc->subdevice_id) { 877 case IWN_SDID_6035_1: 878 case IWN_SDID_6035_2: 879 case IWN_SDID_6035_3: 880 case IWN_SDID_6035_4: 881 sc->fwname = "iwn6000g2bfw"; 882 sc->limits = &iwn6000_sensitivity_limits; 883 sc->base_params = &iwn_6000g2b_base_params; 884 break; 885 default: 886 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 887 "0x%04x rev %d not supported (subdevice)\n", pid, 888 sc->subdevice_id,sc->hw_type); 889 return ENOTSUP; 890 } 891 break; 892 /* 6x50 WiFi/WiMax Series */ 893 case IWN_DID_6050_1: 894 case IWN_DID_6050_2: 895 switch(sc->subdevice_id) { 896 case IWN_SDID_6050_1: 897 case IWN_SDID_6050_3: 898 case IWN_SDID_6050_5: 899 //iwl6050_2agn_cfg 900 case IWN_SDID_6050_2: 901 case IWN_SDID_6050_4: 902 case IWN_SDID_6050_6: 903 //iwl6050_2abg_cfg 904 sc->fwname = "iwn6050fw"; 905 sc->txchainmask = IWN_ANT_AB; 906 sc->rxchainmask = IWN_ANT_AB; 907 sc->limits = &iwn6000_sensitivity_limits; 908 sc->base_params = &iwn_6050_base_params; 909 break; 910 default: 911 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 912 "0x%04x rev %d not supported (subdevice)\n", pid, 913 sc->subdevice_id,sc->hw_type); 914 return ENOTSUP; 915 } 916 break; 917 /* 6150 WiFi/WiMax Series */ 918 case IWN_DID_6150_1: 919 case IWN_DID_6150_2: 920 switch(sc->subdevice_id) { 921 case IWN_SDID_6150_1: 922 case IWN_SDID_6150_3: 923 case IWN_SDID_6150_5: 924 // iwl6150_bgn_cfg 925 case IWN_SDID_6150_2: 926 case IWN_SDID_6150_4: 927 case IWN_SDID_6150_6: 928 //iwl6150_bg_cfg 929 sc->fwname = "iwn6050fw"; 930 sc->limits = &iwn6000_sensitivity_limits; 931 sc->base_params = &iwn_6150_base_params; 932 break; 933 default: 934 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 935 "0x%04x rev %d not supported (subdevice)\n", pid, 936 sc->subdevice_id,sc->hw_type); 937 return ENOTSUP; 938 } 939 break; 940 /* 6030 Series and 1030 Series */ 941 case IWN_DID_x030_1: 942 case IWN_DID_x030_2: 943 case IWN_DID_x030_3: 944 case IWN_DID_x030_4: 945 switch(sc->subdevice_id) { 946 case IWN_SDID_x030_1: 947 case IWN_SDID_x030_3: 948 case IWN_SDID_x030_5: 949 // iwl1030_bgn_cfg 950 case IWN_SDID_x030_2: 951 case IWN_SDID_x030_4: 952 case IWN_SDID_x030_6: 953 //iwl1030_bg_cfg 954 case IWN_SDID_x030_7: 955 case IWN_SDID_x030_10: 956 case IWN_SDID_x030_14: 957 //iwl6030_2agn_cfg 958 case IWN_SDID_x030_8: 959 case IWN_SDID_x030_11: 960 case IWN_SDID_x030_15: 961 // iwl6030_2bgn_cfg 962 case IWN_SDID_x030_9: 963 case IWN_SDID_x030_12: 964 case IWN_SDID_x030_16: 965 // iwl6030_2abg_cfg 966 case IWN_SDID_x030_13: 967 //iwl6030_2bg_cfg 968 sc->fwname = "iwn6000g2bfw"; 969 sc->limits = &iwn6000_sensitivity_limits; 970 sc->base_params = &iwn_6000g2b_base_params; 971 break; 972 default: 973 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 974 "0x%04x rev %d not supported (subdevice)\n", pid, 975 sc->subdevice_id,sc->hw_type); 976 return ENOTSUP; 977 } 978 break; 979 /* 130 Series WiFi */ 980 /* XXX: This series will need adjustment for rate. 981 * see rx_with_siso_diversity in linux kernel 982 */ 983 case IWN_DID_130_1: 984 case IWN_DID_130_2: 985 switch(sc->subdevice_id) { 986 case IWN_SDID_130_1: 987 case IWN_SDID_130_3: 988 case IWN_SDID_130_5: 989 //iwl130_bgn_cfg 990 case IWN_SDID_130_2: 991 case IWN_SDID_130_4: 992 case IWN_SDID_130_6: 993 //iwl130_bg_cfg 994 sc->fwname = "iwn6000g2bfw"; 995 sc->limits = &iwn6000_sensitivity_limits; 996 sc->base_params = &iwn_6000g2b_base_params; 997 break; 998 default: 999 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1000 "0x%04x rev %d not supported (subdevice)\n", pid, 1001 sc->subdevice_id,sc->hw_type); 1002 return ENOTSUP; 1003 } 1004 break; 1005 /* 100 Series WiFi */ 1006 case IWN_DID_100_1: 1007 case IWN_DID_100_2: 1008 switch(sc->subdevice_id) { 1009 case IWN_SDID_100_1: 1010 case IWN_SDID_100_2: 1011 case IWN_SDID_100_3: 1012 case IWN_SDID_100_4: 1013 case IWN_SDID_100_5: 1014 case IWN_SDID_100_6: 1015 sc->limits = &iwn1000_sensitivity_limits; 1016 sc->base_params = &iwn1000_base_params; 1017 sc->fwname = "iwn100fw"; 1018 break; 1019 default: 1020 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1021 "0x%04x rev %d not supported (subdevice)\n", pid, 1022 sc->subdevice_id,sc->hw_type); 1023 return ENOTSUP; 1024 } 1025 break; 1026 1027 /* 2x00 Series */ 1028 case IWN_DID_2x00_1: 1029 case IWN_DID_2x00_2: 1030 switch(sc->subdevice_id) { 1031 case IWN_SDID_2x00_1: 1032 case IWN_SDID_2x00_2: 1033 case IWN_SDID_2x00_3: 1034 //iwl2000_2bgn_cfg 1035 case IWN_SDID_2x00_4: 1036 //iwl2000_2bgn_d_cfg 1037 sc->limits = &iwn2030_sensitivity_limits; 1038 sc->base_params = &iwn2000_base_params; 1039 sc->fwname = "iwn2000fw"; 1040 break; 1041 default: 1042 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1043 "0x%04x rev %d not supported (subdevice) \n", 1044 pid, sc->subdevice_id, sc->hw_type); 1045 return ENOTSUP; 1046 } 1047 break; 1048 /* 2x30 Series */ 1049 case IWN_DID_2x30_1: 1050 case IWN_DID_2x30_2: 1051 switch(sc->subdevice_id) { 1052 case IWN_SDID_2x30_1: 1053 case IWN_SDID_2x30_3: 1054 case IWN_SDID_2x30_5: 1055 //iwl100_bgn_cfg 1056 case IWN_SDID_2x30_2: 1057 case IWN_SDID_2x30_4: 1058 case IWN_SDID_2x30_6: 1059 //iwl100_bg_cfg 1060 sc->limits = &iwn2030_sensitivity_limits; 1061 sc->base_params = &iwn2030_base_params; 1062 sc->fwname = "iwn2030fw"; 1063 break; 1064 default: 1065 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1066 "0x%04x rev %d not supported (subdevice)\n", pid, 1067 sc->subdevice_id,sc->hw_type); 1068 return ENOTSUP; 1069 } 1070 break; 1071 /* 5x00 Series */ 1072 case IWN_DID_5x00_1: 1073 case IWN_DID_5x00_2: 1074 case IWN_DID_5x00_3: 1075 case IWN_DID_5x00_4: 1076 sc->limits = &iwn5000_sensitivity_limits; 1077 sc->base_params = &iwn5000_base_params; 1078 sc->fwname = "iwn5000fw"; 1079 switch(sc->subdevice_id) { 1080 case IWN_SDID_5x00_1: 1081 case IWN_SDID_5x00_2: 1082 case IWN_SDID_5x00_3: 1083 case IWN_SDID_5x00_4: 1084 case IWN_SDID_5x00_9: 1085 case IWN_SDID_5x00_10: 1086 case IWN_SDID_5x00_11: 1087 case IWN_SDID_5x00_12: 1088 case IWN_SDID_5x00_17: 1089 case IWN_SDID_5x00_18: 1090 case IWN_SDID_5x00_19: 1091 case IWN_SDID_5x00_20: 1092 //iwl5100_agn_cfg 1093 sc->txchainmask = IWN_ANT_B; 1094 sc->rxchainmask = IWN_ANT_AB; 1095 break; 1096 case IWN_SDID_5x00_5: 1097 case IWN_SDID_5x00_6: 1098 case IWN_SDID_5x00_13: 1099 case IWN_SDID_5x00_14: 1100 case IWN_SDID_5x00_21: 1101 case IWN_SDID_5x00_22: 1102 //iwl5100_bgn_cfg 1103 sc->txchainmask = IWN_ANT_B; 1104 sc->rxchainmask = IWN_ANT_AB; 1105 break; 1106 case IWN_SDID_5x00_7: 1107 case IWN_SDID_5x00_8: 1108 case IWN_SDID_5x00_15: 1109 case IWN_SDID_5x00_16: 1110 case IWN_SDID_5x00_23: 1111 case IWN_SDID_5x00_24: 1112 //iwl5100_abg_cfg 1113 sc->txchainmask = IWN_ANT_B; 1114 sc->rxchainmask = IWN_ANT_AB; 1115 break; 1116 case IWN_SDID_5x00_25: 1117 case IWN_SDID_5x00_26: 1118 case IWN_SDID_5x00_27: 1119 case IWN_SDID_5x00_28: 1120 case IWN_SDID_5x00_29: 1121 case IWN_SDID_5x00_30: 1122 case IWN_SDID_5x00_31: 1123 case IWN_SDID_5x00_32: 1124 case IWN_SDID_5x00_33: 1125 case IWN_SDID_5x00_34: 1126 case IWN_SDID_5x00_35: 1127 case IWN_SDID_5x00_36: 1128 //iwl5300_agn_cfg 1129 sc->txchainmask = IWN_ANT_ABC; 1130 sc->rxchainmask = IWN_ANT_ABC; 1131 break; 1132 default: 1133 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1134 "0x%04x rev %d not supported (subdevice)\n", pid, 1135 sc->subdevice_id,sc->hw_type); 1136 return ENOTSUP; 1137 } 1138 break; 1139 /* 5x50 Series */ 1140 case IWN_DID_5x50_1: 1141 case IWN_DID_5x50_2: 1142 case IWN_DID_5x50_3: 1143 case IWN_DID_5x50_4: 1144 sc->limits = &iwn5000_sensitivity_limits; 1145 sc->base_params = &iwn5000_base_params; 1146 sc->fwname = "iwn5000fw"; 1147 switch(sc->subdevice_id) { 1148 case IWN_SDID_5x50_1: 1149 case IWN_SDID_5x50_2: 1150 case IWN_SDID_5x50_3: 1151 //iwl5350_agn_cfg 1152 sc->limits = &iwn5000_sensitivity_limits; 1153 sc->base_params = &iwn5000_base_params; 1154 sc->fwname = "iwn5000fw"; 1155 break; 1156 case IWN_SDID_5x50_4: 1157 case IWN_SDID_5x50_5: 1158 case IWN_SDID_5x50_8: 1159 case IWN_SDID_5x50_9: 1160 case IWN_SDID_5x50_10: 1161 case IWN_SDID_5x50_11: 1162 //iwl5150_agn_cfg 1163 case IWN_SDID_5x50_6: 1164 case IWN_SDID_5x50_7: 1165 case IWN_SDID_5x50_12: 1166 case IWN_SDID_5x50_13: 1167 //iwl5150_abg_cfg 1168 sc->limits = &iwn5000_sensitivity_limits; 1169 sc->fwname = "iwn5150fw"; 1170 sc->base_params = &iwn_5x50_base_params; 1171 break; 1172 default: 1173 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1174 "0x%04x rev %d not supported (subdevice)\n", pid, 1175 sc->subdevice_id,sc->hw_type); 1176 return ENOTSUP; 1177 } 1178 break; 1179 default: 1180 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" 1181 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, 1182 sc->hw_type); 1183 return ENOTSUP; 1184 } 1185 return 0; 1186 } 1187 1188 static int 1189 iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 1190 { 1191 struct iwn_ops *ops = &sc->ops; 1192 1193 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1194 ops->load_firmware = iwn4965_load_firmware; 1195 ops->read_eeprom = iwn4965_read_eeprom; 1196 ops->post_alive = iwn4965_post_alive; 1197 ops->nic_config = iwn4965_nic_config; 1198 ops->update_sched = iwn4965_update_sched; 1199 ops->get_temperature = iwn4965_get_temperature; 1200 ops->get_rssi = iwn4965_get_rssi; 1201 ops->set_txpower = iwn4965_set_txpower; 1202 ops->init_gains = iwn4965_init_gains; 1203 ops->set_gains = iwn4965_set_gains; 1204 ops->add_node = iwn4965_add_node; 1205 ops->tx_done = iwn4965_tx_done; 1206 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 1207 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 1208 sc->ntxqs = IWN4965_NTXQUEUES; 1209 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 1210 sc->ndmachnls = IWN4965_NDMACHNLS; 1211 sc->broadcast_id = IWN4965_ID_BROADCAST; 1212 sc->rxonsz = IWN4965_RXONSZ; 1213 sc->schedsz = IWN4965_SCHEDSZ; 1214 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 1215 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 1216 sc->fwsz = IWN4965_FWSZ; 1217 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 1218 sc->limits = &iwn4965_sensitivity_limits; 1219 sc->fwname = "iwn4965fw"; 1220 /* Override chains masks, ROM is known to be broken. */ 1221 sc->txchainmask = IWN_ANT_AB; 1222 sc->rxchainmask = IWN_ANT_ABC; 1223 /* Enable normal btcoex */ 1224 sc->sc_flags |= IWN_FLAG_BTCOEX; 1225 1226 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 1227 1228 return 0; 1229 } 1230 1231 static int 1232 iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 1233 { 1234 struct iwn_ops *ops = &sc->ops; 1235 1236 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1237 1238 ops->load_firmware = iwn5000_load_firmware; 1239 ops->read_eeprom = iwn5000_read_eeprom; 1240 ops->post_alive = iwn5000_post_alive; 1241 ops->nic_config = iwn5000_nic_config; 1242 ops->update_sched = iwn5000_update_sched; 1243 ops->get_temperature = iwn5000_get_temperature; 1244 ops->get_rssi = iwn5000_get_rssi; 1245 ops->set_txpower = iwn5000_set_txpower; 1246 ops->init_gains = iwn5000_init_gains; 1247 ops->set_gains = iwn5000_set_gains; 1248 ops->add_node = iwn5000_add_node; 1249 ops->tx_done = iwn5000_tx_done; 1250 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 1251 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 1252 sc->ntxqs = IWN5000_NTXQUEUES; 1253 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 1254 sc->ndmachnls = IWN5000_NDMACHNLS; 1255 sc->broadcast_id = IWN5000_ID_BROADCAST; 1256 sc->rxonsz = IWN5000_RXONSZ; 1257 sc->schedsz = IWN5000_SCHEDSZ; 1258 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 1259 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 1260 sc->fwsz = IWN5000_FWSZ; 1261 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 1262 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 1263 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 1264 1265 return 0; 1266 } 1267 1268 /* 1269 * Attach the interface to 802.11 radiotap. 1270 */ 1271 static void 1272 iwn_radiotap_attach(struct iwn_softc *sc) 1273 { 1274 struct ifnet *ifp = sc->sc_ifp; 1275 struct ieee80211com *ic = ifp->if_l2com; 1276 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1277 ieee80211_radiotap_attach(ic, 1278 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 1279 IWN_TX_RADIOTAP_PRESENT, 1280 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 1281 IWN_RX_RADIOTAP_PRESENT); 1282 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1283 } 1284 1285 static void 1286 iwn_sysctlattach(struct iwn_softc *sc) 1287 { 1288 #ifdef IWN_DEBUG 1289 struct sysctl_ctx_list *ctx; 1290 struct sysctl_oid *tree; 1291 1292 ctx = &sc->sc_sysctl_ctx; 1293 tree = sc->sc_sysctl_tree; 1294 1295 if (tree) { 1296 device_printf(sc->sc_dev, "can't add sysctl node\n"); 1297 return; 1298 } 1299 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1300 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 1301 "control debugging printfs"); 1302 #endif 1303 } 1304 1305 static struct ieee80211vap * 1306 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1307 enum ieee80211_opmode opmode, int flags, 1308 const uint8_t bssid[IEEE80211_ADDR_LEN], 1309 const uint8_t mac[IEEE80211_ADDR_LEN]) 1310 { 1311 struct iwn_vap *ivp; 1312 struct ieee80211vap *vap; 1313 uint8_t mac1[IEEE80211_ADDR_LEN]; 1314 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1315 1316 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 1317 return NULL; 1318 1319 IEEE80211_ADDR_COPY(mac1, mac); 1320 1321 ivp = kmalloc(sizeof(struct iwn_vap), M_80211_VAP, M_INTWAIT | M_ZERO); 1322 vap = &ivp->iv_vap; 1323 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1); 1324 ivp->ctx = IWN_RXON_BSS_CTX; 1325 IEEE80211_ADDR_COPY(ivp->macaddr, mac1); 1326 vap->iv_bmissthreshold = 10; /* override default */ 1327 /* Override with driver methods. */ 1328 ivp->iv_newstate = vap->iv_newstate; 1329 vap->iv_newstate = iwn_newstate; 1330 sc->ivap[IWN_RXON_BSS_CTX] = vap; 1331 1332 ieee80211_ratectl_init(vap); 1333 /* Complete setup. */ 1334 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 1335 ic->ic_opmode = opmode; 1336 return vap; 1337 } 1338 1339 static void 1340 iwn_vap_delete(struct ieee80211vap *vap) 1341 { 1342 struct iwn_vap *ivp = IWN_VAP(vap); 1343 1344 ieee80211_ratectl_deinit(vap); 1345 ieee80211_vap_detach(vap); 1346 kfree(ivp, M_80211_VAP); 1347 } 1348 1349 static int 1350 iwn_pci_detach(device_t dev) 1351 { 1352 struct iwn_softc *sc = device_get_softc(dev); 1353 struct ifnet *ifp = sc->sc_ifp; 1354 struct ieee80211com *ic; 1355 int qid; 1356 1357 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1358 1359 wlan_serialize_enter(); 1360 1361 if (ifp != NULL) { 1362 ic = ifp->if_l2com; 1363 1364 ieee80211_draintask(ic, &sc->sc_reinit_task); 1365 ieee80211_draintask(ic, &sc->sc_radioon_task); 1366 ieee80211_draintask(ic, &sc->sc_radiooff_task); 1367 1368 iwn_stop(sc); 1369 callout_stop(&sc->watchdog_to); 1370 callout_stop(&sc->calib_to); 1371 ieee80211_ifdetach(ic); 1372 } 1373 1374 /* cleanup sysctl nodes */ 1375 sysctl_ctx_free(&sc->sc_sysctl_ctx); 1376 1377 /* Uninstall interrupt handler. */ 1378 if (sc->irq != NULL) { 1379 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 1380 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); 1381 if (sc->irq_rid == 1) 1382 pci_release_msi(dev); 1383 sc->irq = NULL; 1384 } 1385 1386 /* Free DMA resources. */ 1387 iwn_free_rx_ring(sc, &sc->rxq); 1388 for (qid = 0; qid < sc->ntxqs; qid++) 1389 iwn_free_tx_ring(sc, &sc->txq[qid]); 1390 iwn_free_sched(sc); 1391 iwn_free_kw(sc); 1392 if (sc->ict != NULL) { 1393 iwn_free_ict(sc); 1394 sc->ict = NULL; 1395 } 1396 iwn_free_fwmem(sc); 1397 1398 if (sc->mem != NULL) { 1399 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); 1400 sc->mem = NULL; 1401 } 1402 1403 if (ifp != NULL) { 1404 if_free(ifp); 1405 sc->sc_ifp = NULL; 1406 } 1407 1408 bus_dma_tag_destroy(sc->sc_dmat); 1409 1410 wlan_serialize_exit(); 1411 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); 1412 return 0; 1413 } 1414 1415 static int 1416 iwn_pci_shutdown(device_t dev) 1417 { 1418 struct iwn_softc *sc = device_get_softc(dev); 1419 1420 wlan_serialize_enter(); 1421 iwn_stop_locked(sc); 1422 wlan_serialize_exit(); 1423 1424 return 0; 1425 } 1426 1427 static int 1428 iwn_pci_suspend(device_t dev) 1429 { 1430 struct iwn_softc *sc = device_get_softc(dev); 1431 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1432 1433 ieee80211_suspend_all(ic); 1434 return 0; 1435 } 1436 1437 static int 1438 iwn_pci_resume(device_t dev) 1439 { 1440 struct iwn_softc *sc = device_get_softc(dev); 1441 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1442 1443 /* Clear device-specific "PCI retry timeout" register (41h). */ 1444 pci_write_config(dev, 0x41, 0, 1); 1445 1446 ieee80211_resume_all(ic); 1447 return 0; 1448 } 1449 1450 static int 1451 iwn_nic_lock(struct iwn_softc *sc) 1452 { 1453 int ntries; 1454 1455 /* Request exclusive access to NIC. */ 1456 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1457 1458 /* Spin until we actually get the lock. */ 1459 for (ntries = 0; ntries < 1000; ntries++) { 1460 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1461 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1462 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1463 return 0; 1464 DELAY(10); 1465 } 1466 return ETIMEDOUT; 1467 } 1468 1469 static __inline void 1470 iwn_nic_unlock(struct iwn_softc *sc) 1471 { 1472 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1473 } 1474 1475 static __inline uint32_t 1476 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1477 { 1478 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1479 IWN_BARRIER_READ_WRITE(sc); 1480 return IWN_READ(sc, IWN_PRPH_RDATA); 1481 } 1482 1483 static __inline void 1484 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1485 { 1486 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1487 IWN_BARRIER_WRITE(sc); 1488 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1489 } 1490 1491 static __inline void 1492 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1493 { 1494 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1495 } 1496 1497 static __inline void 1498 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1499 { 1500 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1501 } 1502 1503 static __inline void 1504 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1505 const uint32_t *data, int count) 1506 { 1507 for (; count > 0; count--, data++, addr += 4) 1508 iwn_prph_write(sc, addr, *data); 1509 } 1510 1511 static __inline uint32_t 1512 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1513 { 1514 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1515 IWN_BARRIER_READ_WRITE(sc); 1516 return IWN_READ(sc, IWN_MEM_RDATA); 1517 } 1518 1519 static __inline void 1520 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1521 { 1522 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1523 IWN_BARRIER_WRITE(sc); 1524 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1525 } 1526 1527 static __inline void 1528 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1529 { 1530 uint32_t tmp; 1531 1532 tmp = iwn_mem_read(sc, addr & ~3); 1533 if (addr & 3) 1534 tmp = (tmp & 0x0000ffff) | data << 16; 1535 else 1536 tmp = (tmp & 0xffff0000) | data; 1537 iwn_mem_write(sc, addr & ~3, tmp); 1538 } 1539 1540 static __inline void 1541 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1542 int count) 1543 { 1544 for (; count > 0; count--, addr += 4) 1545 *data++ = iwn_mem_read(sc, addr); 1546 } 1547 1548 static __inline void 1549 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1550 int count) 1551 { 1552 for (; count > 0; count--, addr += 4) 1553 iwn_mem_write(sc, addr, val); 1554 } 1555 1556 static int 1557 iwn_eeprom_lock(struct iwn_softc *sc) 1558 { 1559 int i, ntries; 1560 1561 for (i = 0; i < 100; i++) { 1562 /* Request exclusive access to EEPROM. */ 1563 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1564 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1565 1566 /* Spin until we actually get the lock. */ 1567 for (ntries = 0; ntries < 100; ntries++) { 1568 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1569 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1570 return 0; 1571 DELAY(10); 1572 } 1573 } 1574 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1575 return ETIMEDOUT; 1576 } 1577 1578 static __inline void 1579 iwn_eeprom_unlock(struct iwn_softc *sc) 1580 { 1581 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1582 } 1583 1584 /* 1585 * Initialize access by host to One Time Programmable ROM. 1586 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1587 */ 1588 static int 1589 iwn_init_otprom(struct iwn_softc *sc) 1590 { 1591 uint16_t prev, base, next; 1592 int count, error; 1593 1594 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1595 1596 /* Wait for clock stabilization before accessing prph. */ 1597 if ((error = iwn_clock_wait(sc)) != 0) 1598 return error; 1599 1600 if ((error = iwn_nic_lock(sc)) != 0) 1601 return error; 1602 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1603 DELAY(5); 1604 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1605 iwn_nic_unlock(sc); 1606 1607 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1608 if (sc->base_params->shadow_ram_support) { 1609 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1610 IWN_RESET_LINK_PWR_MGMT_DIS); 1611 } 1612 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1613 /* Clear ECC status. */ 1614 IWN_SETBITS(sc, IWN_OTP_GP, 1615 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1616 1617 /* 1618 * Find the block before last block (contains the EEPROM image) 1619 * for HW without OTP shadow RAM. 1620 */ 1621 if (! sc->base_params->shadow_ram_support) { 1622 /* Switch to absolute addressing mode. */ 1623 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1624 base = prev = 0; 1625 for (count = 0; count < sc->base_params->max_ll_items; 1626 count++) { 1627 error = iwn_read_prom_data(sc, base, &next, 2); 1628 if (error != 0) 1629 return error; 1630 if (next == 0) /* End of linked-list. */ 1631 break; 1632 prev = base; 1633 base = le16toh(next); 1634 } 1635 if (count == 0 || count == sc->base_params->max_ll_items) 1636 return EIO; 1637 /* Skip "next" word. */ 1638 sc->prom_base = prev + 1; 1639 } 1640 1641 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1642 1643 return 0; 1644 } 1645 1646 static int 1647 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1648 { 1649 uint8_t *out = data; 1650 uint32_t val, tmp; 1651 int ntries; 1652 1653 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1654 1655 addr += sc->prom_base; 1656 for (; count > 0; count -= 2, addr++) { 1657 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1658 for (ntries = 0; ntries < 10; ntries++) { 1659 val = IWN_READ(sc, IWN_EEPROM); 1660 if (val & IWN_EEPROM_READ_VALID) 1661 break; 1662 DELAY(5); 1663 } 1664 if (ntries == 10) { 1665 device_printf(sc->sc_dev, 1666 "timeout reading ROM at 0x%x\n", addr); 1667 return ETIMEDOUT; 1668 } 1669 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1670 /* OTPROM, check for ECC errors. */ 1671 tmp = IWN_READ(sc, IWN_OTP_GP); 1672 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1673 device_printf(sc->sc_dev, 1674 "OTPROM ECC error at 0x%x\n", addr); 1675 return EIO; 1676 } 1677 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1678 /* Correctable ECC error, clear bit. */ 1679 IWN_SETBITS(sc, IWN_OTP_GP, 1680 IWN_OTP_GP_ECC_CORR_STTS); 1681 } 1682 } 1683 *out++ = val >> 16; 1684 if (count > 1) 1685 *out++ = val >> 24; 1686 } 1687 1688 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1689 1690 return 0; 1691 } 1692 1693 static void 1694 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1695 { 1696 if (error != 0) 1697 return; 1698 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1699 *(bus_addr_t *)arg = segs[0].ds_addr; 1700 } 1701 1702 static int 1703 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1704 void **kvap, bus_size_t size, bus_size_t alignment) 1705 { 1706 int error; 1707 1708 dma->tag = NULL; 1709 dma->size = size; 1710 1711 error = bus_dma_tag_create(sc->sc_dmat, alignment, 1712 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1713 1, size, BUS_DMA_NOWAIT, &dma->tag); 1714 if (error != 0) 1715 goto fail; 1716 1717 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1718 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1719 if (error != 0) 1720 goto fail; 1721 1722 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1723 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1724 if (error != 0) 1725 goto fail; 1726 1727 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1728 1729 if (kvap != NULL) 1730 *kvap = dma->vaddr; 1731 1732 return 0; 1733 1734 fail: iwn_dma_contig_free(dma); 1735 return error; 1736 } 1737 1738 static void 1739 iwn_dma_contig_free(struct iwn_dma_info *dma) 1740 { 1741 if (dma->map != NULL) { 1742 if (dma->vaddr != NULL) { 1743 bus_dmamap_sync(dma->tag, dma->map, 1744 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1745 bus_dmamap_unload(dma->tag, dma->map); 1746 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1747 dma->vaddr = NULL; 1748 } 1749 bus_dmamap_destroy(dma->tag, dma->map); 1750 dma->map = NULL; 1751 } 1752 if (dma->tag != NULL) { 1753 bus_dma_tag_destroy(dma->tag); 1754 dma->tag = NULL; 1755 } 1756 } 1757 1758 static int 1759 iwn_alloc_sched(struct iwn_softc *sc) 1760 { 1761 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1762 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1763 sc->schedsz, 1024); 1764 } 1765 1766 static void 1767 iwn_free_sched(struct iwn_softc *sc) 1768 { 1769 iwn_dma_contig_free(&sc->sched_dma); 1770 } 1771 1772 static int 1773 iwn_alloc_kw(struct iwn_softc *sc) 1774 { 1775 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1776 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1777 } 1778 1779 static void 1780 iwn_free_kw(struct iwn_softc *sc) 1781 { 1782 iwn_dma_contig_free(&sc->kw_dma); 1783 } 1784 1785 static int 1786 iwn_alloc_ict(struct iwn_softc *sc) 1787 { 1788 /* ICT table must be aligned on a 4KB boundary. */ 1789 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1790 IWN_ICT_SIZE, 4096); 1791 } 1792 1793 static void 1794 iwn_free_ict(struct iwn_softc *sc) 1795 { 1796 iwn_dma_contig_free(&sc->ict_dma); 1797 } 1798 1799 static int 1800 iwn_alloc_fwmem(struct iwn_softc *sc) 1801 { 1802 /* Must be aligned on a 16-byte boundary. */ 1803 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1804 } 1805 1806 static void 1807 iwn_free_fwmem(struct iwn_softc *sc) 1808 { 1809 iwn_dma_contig_free(&sc->fw_dma); 1810 } 1811 1812 static int 1813 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1814 { 1815 bus_size_t size; 1816 int i, error; 1817 1818 ring->cur = 0; 1819 1820 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1821 1822 /* Allocate RX descriptors (256-byte aligned). */ 1823 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1824 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1825 size, 256); 1826 if (error != 0) { 1827 device_printf(sc->sc_dev, 1828 "%s: could not allocate RX ring DMA memory, error %d\n", 1829 __func__, error); 1830 goto fail; 1831 } 1832 1833 /* Allocate RX status area (16-byte aligned). */ 1834 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1835 sizeof (struct iwn_rx_status), 16); 1836 if (error != 0) { 1837 device_printf(sc->sc_dev, 1838 "%s: could not allocate RX status DMA memory, error %d\n", 1839 __func__, error); 1840 goto fail; 1841 } 1842 1843 /* Create RX buffer DMA tag. */ 1844 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1845 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1846 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, &ring->data_dmat); 1847 if (error != 0) { 1848 device_printf(sc->sc_dev, 1849 "%s: could not create RX buf DMA tag, error %d\n", 1850 __func__, error); 1851 goto fail; 1852 } 1853 1854 /* 1855 * Allocate and map RX buffers. 1856 */ 1857 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1858 struct iwn_rx_data *data = &ring->data[i]; 1859 bus_addr_t paddr; 1860 1861 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1862 if (error != 0) { 1863 device_printf(sc->sc_dev, 1864 "%s: could not create RX buf DMA map, error %d\n", 1865 __func__, error); 1866 goto fail; 1867 } 1868 1869 data->m = m_getjcl(MB_DONTWAIT, MT_DATA, 1870 M_PKTHDR, IWN_RBUF_SIZE); 1871 if (data->m == NULL) { 1872 device_printf(sc->sc_dev, 1873 "%s: could not allocate RX mbuf\n", __func__); 1874 error = ENOBUFS; 1875 goto fail; 1876 } 1877 1878 error = bus_dmamap_load(ring->data_dmat, data->map, 1879 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1880 &paddr, BUS_DMA_NOWAIT); 1881 if (error != 0 && error != EFBIG) { 1882 device_printf(sc->sc_dev, 1883 "%s: can't not map mbuf, error %d\n", __func__, 1884 error); 1885 goto fail; 1886 } 1887 1888 /* Set physical address of RX buffer (256-byte aligned). */ 1889 ring->desc[i] = htole32(paddr >> 8); 1890 } 1891 1892 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1893 BUS_DMASYNC_PREWRITE); 1894 1895 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1896 1897 return 0; 1898 1899 fail: iwn_free_rx_ring(sc, ring); 1900 1901 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1902 1903 return error; 1904 } 1905 1906 static void 1907 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1908 { 1909 int ntries; 1910 1911 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 1912 1913 if (iwn_nic_lock(sc) == 0) { 1914 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1915 for (ntries = 0; ntries < 1000; ntries++) { 1916 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1917 IWN_FH_RX_STATUS_IDLE) 1918 break; 1919 DELAY(10); 1920 } 1921 iwn_nic_unlock(sc); 1922 } 1923 ring->cur = 0; 1924 sc->last_rx_valid = 0; 1925 } 1926 1927 static void 1928 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1929 { 1930 int i; 1931 1932 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1933 1934 iwn_dma_contig_free(&ring->desc_dma); 1935 iwn_dma_contig_free(&ring->stat_dma); 1936 1937 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1938 struct iwn_rx_data *data = &ring->data[i]; 1939 1940 if (data->m != NULL) { 1941 bus_dmamap_sync(ring->data_dmat, data->map, 1942 BUS_DMASYNC_POSTREAD); 1943 bus_dmamap_unload(ring->data_dmat, data->map); 1944 m_freem(data->m); 1945 data->m = NULL; 1946 } 1947 if (data->map != NULL) 1948 bus_dmamap_destroy(ring->data_dmat, data->map); 1949 } 1950 if (ring->data_dmat != NULL) { 1951 bus_dma_tag_destroy(ring->data_dmat); 1952 ring->data_dmat = NULL; 1953 } 1954 } 1955 1956 static int 1957 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1958 { 1959 bus_addr_t paddr; 1960 bus_size_t size; 1961 int i, error; 1962 1963 ring->qid = qid; 1964 ring->queued = 0; 1965 ring->cur = 0; 1966 1967 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1968 1969 /* Allocate TX descriptors (256-byte aligned). */ 1970 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1971 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1972 size, 256); 1973 if (error != 0) { 1974 device_printf(sc->sc_dev, 1975 "%s: could not allocate TX ring DMA memory, error %d\n", 1976 __func__, error); 1977 goto fail; 1978 } 1979 1980 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1981 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1982 size, 4); 1983 if (error != 0) { 1984 device_printf(sc->sc_dev, 1985 "%s: could not allocate TX cmd DMA memory, error %d\n", 1986 __func__, error); 1987 goto fail; 1988 } 1989 1990 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1991 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1992 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, &ring->data_dmat); 1993 if (error != 0) { 1994 device_printf(sc->sc_dev, 1995 "%s: could not create TX buf DMA tag, error %d\n", 1996 __func__, error); 1997 goto fail; 1998 } 1999 2000 paddr = ring->cmd_dma.paddr; 2001 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2002 struct iwn_tx_data *data = &ring->data[i]; 2003 2004 data->cmd_paddr = paddr; 2005 data->scratch_paddr = paddr + 12; 2006 paddr += sizeof (struct iwn_tx_cmd); 2007 2008 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2009 if (error != 0) { 2010 device_printf(sc->sc_dev, 2011 "%s: could not create TX buf DMA map, error %d\n", 2012 __func__, error); 2013 goto fail; 2014 } 2015 } 2016 2017 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2018 2019 return 0; 2020 2021 fail: iwn_free_tx_ring(sc, ring); 2022 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2023 return error; 2024 } 2025 2026 static void 2027 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2028 { 2029 int i; 2030 2031 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 2032 2033 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2034 struct iwn_tx_data *data = &ring->data[i]; 2035 2036 if (data->m != NULL) { 2037 bus_dmamap_sync(ring->data_dmat, data->map, 2038 BUS_DMASYNC_POSTWRITE); 2039 bus_dmamap_unload(ring->data_dmat, data->map); 2040 m_freem(data->m); 2041 data->m = NULL; 2042 } 2043 } 2044 /* Clear TX descriptors. */ 2045 memset(ring->desc, 0, ring->desc_dma.size); 2046 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2047 BUS_DMASYNC_PREWRITE); 2048 sc->qfullmsk &= ~(1 << ring->qid); 2049 ring->queued = 0; 2050 ring->cur = 0; 2051 } 2052 2053 static void 2054 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2055 { 2056 int i; 2057 2058 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 2059 2060 iwn_dma_contig_free(&ring->desc_dma); 2061 iwn_dma_contig_free(&ring->cmd_dma); 2062 2063 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2064 struct iwn_tx_data *data = &ring->data[i]; 2065 2066 if (data->m != NULL) { 2067 bus_dmamap_sync(ring->data_dmat, data->map, 2068 BUS_DMASYNC_POSTWRITE); 2069 bus_dmamap_unload(ring->data_dmat, data->map); 2070 m_freem(data->m); 2071 } 2072 if (data->map != NULL) 2073 bus_dmamap_destroy(ring->data_dmat, data->map); 2074 } 2075 if (ring->data_dmat != NULL) { 2076 bus_dma_tag_destroy(ring->data_dmat); 2077 ring->data_dmat = NULL; 2078 } 2079 } 2080 2081 static void 2082 iwn5000_ict_reset(struct iwn_softc *sc) 2083 { 2084 /* Disable interrupts. */ 2085 IWN_WRITE(sc, IWN_INT_MASK, 0); 2086 2087 /* Reset ICT table. */ 2088 memset(sc->ict, 0, IWN_ICT_SIZE); 2089 sc->ict_cur = 0; 2090 2091 /* Set physical address of ICT table (4KB aligned). */ 2092 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 2093 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 2094 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 2095 2096 /* Enable periodic RX interrupt. */ 2097 sc->int_mask |= IWN_INT_RX_PERIODIC; 2098 /* Switch to ICT interrupt mode in driver. */ 2099 sc->sc_flags |= IWN_FLAG_USE_ICT; 2100 2101 /* Re-enable interrupts. */ 2102 IWN_WRITE(sc, IWN_INT, 0xffffffff); 2103 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2104 } 2105 2106 static int 2107 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 2108 { 2109 struct iwn_ops *ops = &sc->ops; 2110 uint16_t val; 2111 int error; 2112 2113 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2114 2115 /* Check whether adapter has an EEPROM or an OTPROM. */ 2116 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 2117 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 2118 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 2119 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 2120 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 2121 2122 /* Adapter has to be powered on for EEPROM access to work. */ 2123 if ((error = iwn_apm_init(sc)) != 0) { 2124 device_printf(sc->sc_dev, 2125 "%s: could not power ON adapter, error %d\n", __func__, 2126 error); 2127 return error; 2128 } 2129 2130 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 2131 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 2132 return EIO; 2133 } 2134 if ((error = iwn_eeprom_lock(sc)) != 0) { 2135 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 2136 __func__, error); 2137 return error; 2138 } 2139 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 2140 if ((error = iwn_init_otprom(sc)) != 0) { 2141 device_printf(sc->sc_dev, 2142 "%s: could not initialize OTPROM, error %d\n", 2143 __func__, error); 2144 return error; 2145 } 2146 } 2147 2148 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 2149 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 2150 /* Check if HT support is bonded out. */ 2151 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 2152 sc->sc_flags |= IWN_FLAG_HAS_11N; 2153 2154 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 2155 sc->rfcfg = le16toh(val); 2156 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 2157 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 2158 if (sc->txchainmask == 0) 2159 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 2160 if (sc->rxchainmask == 0) 2161 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 2162 2163 /* Read MAC address. */ 2164 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 2165 2166 /* Read adapter-specific information from EEPROM. */ 2167 ops->read_eeprom(sc); 2168 2169 iwn_apm_stop(sc); /* Power OFF adapter. */ 2170 2171 iwn_eeprom_unlock(sc); 2172 2173 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2174 2175 return 0; 2176 } 2177 2178 static void 2179 iwn4965_read_eeprom(struct iwn_softc *sc) 2180 { 2181 uint32_t addr; 2182 uint16_t val; 2183 int i; 2184 2185 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2186 2187 /* Read regulatory domain (4 ASCII characters). */ 2188 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 2189 2190 /* Read the list of authorized channels (20MHz ones only). */ 2191 for (i = 0; i < IWN_NBANDS - 1; i++) { 2192 addr = iwn4965_regulatory_bands[i]; 2193 iwn_read_eeprom_channels(sc, i, addr); 2194 } 2195 2196 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 2197 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 2198 sc->maxpwr2GHz = val & 0xff; 2199 sc->maxpwr5GHz = val >> 8; 2200 /* Check that EEPROM values are within valid range. */ 2201 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 2202 sc->maxpwr5GHz = 38; 2203 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 2204 sc->maxpwr2GHz = 38; 2205 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 2206 sc->maxpwr2GHz, sc->maxpwr5GHz); 2207 2208 /* Read samples for each TX power group. */ 2209 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 2210 sizeof sc->bands); 2211 2212 /* Read voltage at which samples were taken. */ 2213 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 2214 sc->eeprom_voltage = (int16_t)le16toh(val); 2215 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 2216 sc->eeprom_voltage); 2217 2218 #ifdef IWN_DEBUG 2219 /* Print samples. */ 2220 if (sc->sc_debug & IWN_DEBUG_ANY) { 2221 for (i = 0; i < IWN_NBANDS - 1; i++) 2222 iwn4965_print_power_group(sc, i); 2223 } 2224 #endif 2225 2226 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2227 } 2228 2229 #ifdef IWN_DEBUG 2230 static void 2231 iwn4965_print_power_group(struct iwn_softc *sc, int i) 2232 { 2233 struct iwn4965_eeprom_band *band = &sc->bands[i]; 2234 struct iwn4965_eeprom_chan_samples *chans = band->chans; 2235 int j, c; 2236 2237 kprintf("===band %d===\n", i); 2238 kprintf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 2239 kprintf("chan1 num=%d\n", chans[0].num); 2240 for (c = 0; c < 2; c++) { 2241 for (j = 0; j < IWN_NSAMPLES; j++) { 2242 kprintf("chain %d, sample %d: temp=%d gain=%d " 2243 "power=%d pa_det=%d\n", c, j, 2244 chans[0].samples[c][j].temp, 2245 chans[0].samples[c][j].gain, 2246 chans[0].samples[c][j].power, 2247 chans[0].samples[c][j].pa_det); 2248 } 2249 } 2250 kprintf("chan2 num=%d\n", chans[1].num); 2251 for (c = 0; c < 2; c++) { 2252 for (j = 0; j < IWN_NSAMPLES; j++) { 2253 kprintf("chain %d, sample %d: temp=%d gain=%d " 2254 "power=%d pa_det=%d\n", c, j, 2255 chans[1].samples[c][j].temp, 2256 chans[1].samples[c][j].gain, 2257 chans[1].samples[c][j].power, 2258 chans[1].samples[c][j].pa_det); 2259 } 2260 } 2261 } 2262 #endif 2263 2264 static void 2265 iwn5000_read_eeprom(struct iwn_softc *sc) 2266 { 2267 struct iwn5000_eeprom_calib_hdr hdr; 2268 int32_t volt; 2269 uint32_t base, addr; 2270 uint16_t val; 2271 int i; 2272 2273 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2274 2275 /* Read regulatory domain (4 ASCII characters). */ 2276 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2277 base = le16toh(val); 2278 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 2279 sc->eeprom_domain, 4); 2280 2281 /* Read the list of authorized channels (20MHz ones only). */ 2282 for (i = 0; i < IWN_NBANDS - 1; i++) { 2283 addr = base + sc->base_params->regulatory_bands[i]; 2284 iwn_read_eeprom_channels(sc, i, addr); 2285 } 2286 2287 /* Read enhanced TX power information for 6000 Series. */ 2288 if (sc->base_params->enhanced_TX_power) 2289 iwn_read_eeprom_enhinfo(sc); 2290 2291 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 2292 base = le16toh(val); 2293 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 2294 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2295 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 2296 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 2297 sc->calib_ver = hdr.version; 2298 2299 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 2300 sc->eeprom_voltage = le16toh(hdr.volt); 2301 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2302 sc->eeprom_temp_high=le16toh(val); 2303 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2304 sc->eeprom_temp = le16toh(val); 2305 } 2306 2307 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 2308 /* Compute temperature offset. */ 2309 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2310 sc->eeprom_temp = le16toh(val); 2311 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2312 volt = le16toh(val); 2313 sc->temp_off = sc->eeprom_temp - (volt / -5); 2314 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 2315 sc->eeprom_temp, volt, sc->temp_off); 2316 } else { 2317 /* Read crystal calibration. */ 2318 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 2319 &sc->eeprom_crystal, sizeof (uint32_t)); 2320 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 2321 le32toh(sc->eeprom_crystal)); 2322 } 2323 2324 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2325 2326 } 2327 2328 /* 2329 * Translate EEPROM flags to net80211. 2330 */ 2331 static uint32_t 2332 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 2333 { 2334 uint32_t nflags; 2335 2336 nflags = 0; 2337 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 2338 nflags |= IEEE80211_CHAN_PASSIVE; 2339 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 2340 nflags |= IEEE80211_CHAN_NOADHOC; 2341 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 2342 nflags |= IEEE80211_CHAN_DFS; 2343 /* XXX apparently IBSS may still be marked */ 2344 nflags |= IEEE80211_CHAN_NOADHOC; 2345 } 2346 2347 return nflags; 2348 } 2349 2350 static void 2351 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 2352 { 2353 struct ifnet *ifp = sc->sc_ifp; 2354 struct ieee80211com *ic = ifp->if_l2com; 2355 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2356 const struct iwn_chan_band *band = &iwn_bands[n]; 2357 struct ieee80211_channel *c; 2358 uint8_t chan; 2359 int i, nflags; 2360 2361 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2362 2363 for (i = 0; i < band->nchan; i++) { 2364 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2365 DPRINTF(sc, IWN_DEBUG_RESET, 2366 "skip chan %d flags 0x%x maxpwr %d\n", 2367 band->chan[i], channels[i].flags, 2368 channels[i].maxpwr); 2369 continue; 2370 } 2371 chan = band->chan[i]; 2372 nflags = iwn_eeprom_channel_flags(&channels[i]); 2373 2374 c = &ic->ic_channels[ic->ic_nchans++]; 2375 c->ic_ieee = chan; 2376 c->ic_maxregpower = channels[i].maxpwr; 2377 c->ic_maxpower = 2*c->ic_maxregpower; 2378 2379 if (n == 0) { /* 2GHz band */ 2380 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); 2381 /* G =>'s B is supported */ 2382 c->ic_flags = IEEE80211_CHAN_B | nflags; 2383 c = &ic->ic_channels[ic->ic_nchans++]; 2384 c[0] = c[-1]; 2385 c->ic_flags = IEEE80211_CHAN_G | nflags; 2386 } else { /* 5GHz band */ 2387 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); 2388 c->ic_flags = IEEE80211_CHAN_A | nflags; 2389 } 2390 2391 /* Save maximum allowed TX power for this channel. */ 2392 sc->maxpwr[chan] = channels[i].maxpwr; 2393 2394 DPRINTF(sc, IWN_DEBUG_RESET, 2395 "add chan %d flags 0x%x maxpwr %d\n", chan, 2396 channels[i].flags, channels[i].maxpwr); 2397 2398 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 2399 /* add HT20, HT40 added separately */ 2400 c = &ic->ic_channels[ic->ic_nchans++]; 2401 c[0] = c[-1]; 2402 c->ic_flags |= IEEE80211_CHAN_HT20; 2403 } 2404 } 2405 2406 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2407 2408 } 2409 2410 static void 2411 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 2412 { 2413 struct ifnet *ifp = sc->sc_ifp; 2414 struct ieee80211com *ic = ifp->if_l2com; 2415 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2416 const struct iwn_chan_band *band = &iwn_bands[n]; 2417 struct ieee80211_channel *c, *cent, *extc; 2418 uint8_t chan; 2419 int i, nflags; 2420 2421 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 2422 2423 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 2424 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 2425 return; 2426 } 2427 2428 for (i = 0; i < band->nchan; i++) { 2429 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2430 DPRINTF(sc, IWN_DEBUG_RESET, 2431 "skip chan %d flags 0x%x maxpwr %d\n", 2432 band->chan[i], channels[i].flags, 2433 channels[i].maxpwr); 2434 continue; 2435 } 2436 chan = band->chan[i]; 2437 nflags = iwn_eeprom_channel_flags(&channels[i]); 2438 2439 /* 2440 * Each entry defines an HT40 channel pair; find the 2441 * center channel, then the extension channel above. 2442 */ 2443 cent = ieee80211_find_channel_byieee(ic, chan, 2444 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2445 if (cent == NULL) { /* XXX shouldn't happen */ 2446 device_printf(sc->sc_dev, 2447 "%s: no entry for channel %d\n", __func__, chan); 2448 continue; 2449 } 2450 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 2451 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2452 if (extc == NULL) { 2453 DPRINTF(sc, IWN_DEBUG_RESET, 2454 "%s: skip chan %d, extension channel not found\n", 2455 __func__, chan); 2456 continue; 2457 } 2458 2459 DPRINTF(sc, IWN_DEBUG_RESET, 2460 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2461 chan, channels[i].flags, channels[i].maxpwr); 2462 2463 c = &ic->ic_channels[ic->ic_nchans++]; 2464 c[0] = cent[0]; 2465 c->ic_extieee = extc->ic_ieee; 2466 c->ic_flags &= ~IEEE80211_CHAN_HT; 2467 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; 2468 c = &ic->ic_channels[ic->ic_nchans++]; 2469 c[0] = extc[0]; 2470 c->ic_extieee = cent->ic_ieee; 2471 c->ic_flags &= ~IEEE80211_CHAN_HT; 2472 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; 2473 } 2474 2475 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2476 2477 } 2478 2479 static void 2480 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2481 { 2482 struct ifnet *ifp = sc->sc_ifp; 2483 struct ieee80211com *ic = ifp->if_l2com; 2484 2485 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2486 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2487 2488 if (n < 5) 2489 iwn_read_eeprom_band(sc, n); 2490 else 2491 iwn_read_eeprom_ht40(sc, n); 2492 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2493 } 2494 2495 static struct iwn_eeprom_chan * 2496 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2497 { 2498 int band, chan, i, j; 2499 2500 if (IEEE80211_IS_CHAN_HT40(c)) { 2501 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2502 if (IEEE80211_IS_CHAN_HT40D(c)) 2503 chan = c->ic_extieee; 2504 else 2505 chan = c->ic_ieee; 2506 for (i = 0; i < iwn_bands[band].nchan; i++) { 2507 if (iwn_bands[band].chan[i] == chan) 2508 return &sc->eeprom_channels[band][i]; 2509 } 2510 } else { 2511 for (j = 0; j < 5; j++) { 2512 for (i = 0; i < iwn_bands[j].nchan; i++) { 2513 if (iwn_bands[j].chan[i] == c->ic_ieee) 2514 return &sc->eeprom_channels[j][i]; 2515 } 2516 } 2517 } 2518 return NULL; 2519 } 2520 2521 /* 2522 * Enforce flags read from EEPROM. 2523 */ 2524 static int 2525 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2526 int nchan, struct ieee80211_channel chans[]) 2527 { 2528 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2529 int i; 2530 2531 for (i = 0; i < nchan; i++) { 2532 struct ieee80211_channel *c = &chans[i]; 2533 struct iwn_eeprom_chan *channel; 2534 2535 channel = iwn_find_eeprom_channel(sc, c); 2536 if (channel == NULL) { 2537 if_printf(ic->ic_ifp, 2538 "%s: invalid channel %u freq %u/0x%x\n", 2539 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2540 return EINVAL; 2541 } 2542 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2543 } 2544 2545 return 0; 2546 } 2547 2548 static void 2549 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2550 { 2551 struct iwn_eeprom_enhinfo enhinfo[35]; 2552 struct ifnet *ifp = sc->sc_ifp; 2553 struct ieee80211com *ic = ifp->if_l2com; 2554 struct ieee80211_channel *c; 2555 uint16_t val, base; 2556 int8_t maxpwr; 2557 uint8_t flags; 2558 int i, j; 2559 2560 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2561 2562 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2563 base = le16toh(val); 2564 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2565 enhinfo, sizeof enhinfo); 2566 2567 for (i = 0; i < nitems(enhinfo); i++) { 2568 flags = enhinfo[i].flags; 2569 if (!(flags & IWN_ENHINFO_VALID)) 2570 continue; /* Skip invalid entries. */ 2571 2572 maxpwr = 0; 2573 if (sc->txchainmask & IWN_ANT_A) 2574 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2575 if (sc->txchainmask & IWN_ANT_B) 2576 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2577 if (sc->txchainmask & IWN_ANT_C) 2578 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2579 if (sc->ntxchains == 2) 2580 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2581 else if (sc->ntxchains == 3) 2582 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2583 2584 for (j = 0; j < ic->ic_nchans; j++) { 2585 c = &ic->ic_channels[j]; 2586 if ((flags & IWN_ENHINFO_5GHZ)) { 2587 if (!IEEE80211_IS_CHAN_A(c)) 2588 continue; 2589 } else if ((flags & IWN_ENHINFO_OFDM)) { 2590 if (!IEEE80211_IS_CHAN_G(c)) 2591 continue; 2592 } else if (!IEEE80211_IS_CHAN_B(c)) 2593 continue; 2594 if ((flags & IWN_ENHINFO_HT40)) { 2595 if (!IEEE80211_IS_CHAN_HT40(c)) 2596 continue; 2597 } else { 2598 if (IEEE80211_IS_CHAN_HT40(c)) 2599 continue; 2600 } 2601 if (enhinfo[i].chan != 0 && 2602 enhinfo[i].chan != c->ic_ieee) 2603 continue; 2604 2605 DPRINTF(sc, IWN_DEBUG_RESET, 2606 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2607 c->ic_flags, maxpwr / 2); 2608 c->ic_maxregpower = maxpwr / 2; 2609 c->ic_maxpower = maxpwr; 2610 } 2611 } 2612 2613 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2614 2615 } 2616 2617 static struct ieee80211_node * 2618 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2619 { 2620 return kmalloc(sizeof(struct iwn_node), M_80211_NODE, 2621 M_INTWAIT | M_ZERO); 2622 } 2623 2624 static __inline int 2625 rate2plcp(int rate) 2626 { 2627 switch (rate & 0xff) { 2628 case 12: return 0xd; 2629 case 18: return 0xf; 2630 case 24: return 0x5; 2631 case 36: return 0x7; 2632 case 48: return 0x9; 2633 case 72: return 0xb; 2634 case 96: return 0x1; 2635 case 108: return 0x3; 2636 case 2: return 10; 2637 case 4: return 20; 2638 case 11: return 55; 2639 case 22: return 110; 2640 } 2641 return 0; 2642 } 2643 2644 /* 2645 * Calculate the required PLCP value from the given rate, 2646 * to the given node. 2647 * 2648 * This will take the node configuration (eg 11n, rate table 2649 * setup, etc) into consideration. 2650 */ 2651 static uint32_t 2652 iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2653 uint8_t rate) 2654 { 2655 #define RV(v) ((v) & IEEE80211_RATE_VAL) 2656 struct ieee80211com *ic = ni->ni_ic; 2657 uint8_t txant1, txant2; 2658 uint32_t plcp = 0; 2659 int ridx; 2660 2661 /* Use the first valid TX antenna. */ 2662 txant1 = IWN_LSB(sc->txchainmask); 2663 txant2 = IWN_LSB(sc->txchainmask & ~txant1); 2664 2665 /* 2666 * If it's an MCS rate, let's set the plcp correctly 2667 * and set the relevant flags based on the node config. 2668 */ 2669 if (rate & IEEE80211_RATE_MCS) { 2670 /* 2671 * Set the initial PLCP value to be between 0->31 for 2672 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2673 * flag. 2674 */ 2675 plcp = RV(rate) | IWN_RFLAG_MCS; 2676 2677 /* 2678 * XXX the following should only occur if both 2679 * the local configuration _and_ the remote node 2680 * advertise these capabilities. Thus this code 2681 * may need fixing! 2682 */ 2683 2684 /* 2685 * Set the channel width and guard interval. 2686 */ 2687 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2688 plcp |= IWN_RFLAG_HT40; 2689 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2690 plcp |= IWN_RFLAG_SGI; 2691 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2692 plcp |= IWN_RFLAG_SGI; 2693 } 2694 2695 /* 2696 * If it's a two stream rate, enable TX on both 2697 * antennas. 2698 * 2699 * XXX three stream rates? 2700 */ 2701 if (rate > 0x87) 2702 plcp |= IWN_RFLAG_ANT(txant1 | txant2); 2703 else 2704 plcp |= IWN_RFLAG_ANT(txant1); 2705 } else { 2706 /* 2707 * Set the initial PLCP - fine for both 2708 * OFDM and CCK rates. 2709 */ 2710 plcp = rate2plcp(rate); 2711 2712 /* Set CCK flag if it's CCK */ 2713 2714 /* XXX It would be nice to have a method 2715 * to map the ridx -> phy table entry 2716 * so we could just query that, rather than 2717 * this hack to check against IWN_RIDX_OFDM6. 2718 */ 2719 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2720 rate & IEEE80211_RATE_VAL); 2721 if (ridx < IWN_RIDX_OFDM6 && 2722 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2723 plcp |= IWN_RFLAG_CCK; 2724 2725 /* Set antenna configuration */ 2726 plcp |= IWN_RFLAG_ANT(txant1); 2727 } 2728 2729 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2730 __func__, 2731 rate, 2732 plcp); 2733 2734 return (htole32(plcp)); 2735 #undef RV 2736 } 2737 2738 static void 2739 iwn_newassoc(struct ieee80211_node *ni, int isnew) 2740 { 2741 /* Doesn't do anything at the moment */ 2742 } 2743 2744 static int 2745 iwn_media_change(struct ifnet *ifp) 2746 { 2747 int error; 2748 2749 error = ieee80211_media_change(ifp); 2750 /* NB: only the fixed rate can change and that doesn't need a reset */ 2751 return (error == ENETRESET ? 0 : error); 2752 } 2753 2754 static int 2755 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2756 { 2757 struct iwn_vap *ivp = IWN_VAP(vap); 2758 struct ieee80211com *ic = vap->iv_ic; 2759 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2760 int error = 0; 2761 2762 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2763 2764 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2765 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2766 2767 callout_stop(&sc->calib_to); 2768 2769 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 2770 2771 switch (nstate) { 2772 case IEEE80211_S_ASSOC: 2773 if (vap->iv_state != IEEE80211_S_RUN) 2774 break; 2775 /* FALLTHROUGH */ 2776 case IEEE80211_S_AUTH: 2777 if (vap->iv_state == IEEE80211_S_AUTH) 2778 break; 2779 2780 /* 2781 * !AUTH -> AUTH transition requires state reset to handle 2782 * reassociations correctly. 2783 */ 2784 sc->rxon->associd = 0; 2785 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); 2786 sc->calib.state = IWN_CALIB_STATE_INIT; 2787 2788 if ((error = iwn_auth(sc, vap)) != 0) { 2789 device_printf(sc->sc_dev, 2790 "%s: could not move to auth state\n", __func__); 2791 } 2792 break; 2793 2794 case IEEE80211_S_RUN: 2795 /* 2796 * RUN -> RUN transition; Just restart the timers. 2797 */ 2798 if (vap->iv_state == IEEE80211_S_RUN) { 2799 sc->calib_cnt = 0; 2800 break; 2801 } 2802 2803 /* 2804 * !RUN -> RUN requires setting the association id 2805 * which is done with a firmware cmd. We also defer 2806 * starting the timers until that work is done. 2807 */ 2808 if ((error = iwn_run(sc, vap)) != 0) { 2809 device_printf(sc->sc_dev, 2810 "%s: could not move to run state\n", __func__); 2811 } 2812 break; 2813 2814 case IEEE80211_S_INIT: 2815 sc->calib.state = IWN_CALIB_STATE_INIT; 2816 break; 2817 2818 default: 2819 break; 2820 } 2821 if (error != 0){ 2822 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2823 return error; 2824 } 2825 2826 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2827 2828 return ivp->iv_newstate(vap, nstate, arg); 2829 } 2830 2831 static void 2832 iwn_calib_timeout(void *arg) 2833 { 2834 struct iwn_softc *sc = arg; 2835 2836 wlan_serialize_enter(); 2837 2838 /* Force automatic TX power calibration every 60 secs. */ 2839 if (++sc->calib_cnt >= 120) { 2840 uint32_t flags = 0; 2841 2842 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2843 "sending request for statistics"); 2844 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2845 sizeof flags, 1); 2846 sc->calib_cnt = 0; 2847 } 2848 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 2849 sc); 2850 wlan_serialize_exit(); 2851 } 2852 2853 /* 2854 * Process an RX_PHY firmware notification. This is usually immediately 2855 * followed by an MPDU_RX_DONE notification. 2856 */ 2857 static void 2858 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2859 struct iwn_rx_data *data) 2860 { 2861 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2862 2863 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2864 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2865 2866 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2867 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2868 sc->last_rx_valid = 1; 2869 } 2870 2871 /* 2872 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2873 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2874 */ 2875 static void 2876 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2877 struct iwn_rx_data *data) 2878 { 2879 struct iwn_ops *ops = &sc->ops; 2880 struct ifnet *ifp = sc->sc_ifp; 2881 struct ieee80211com *ic = ifp->if_l2com; 2882 struct iwn_rx_ring *ring = &sc->rxq; 2883 struct ieee80211_frame *wh; 2884 struct ieee80211_node *ni; 2885 struct mbuf *m, *m1; 2886 struct iwn_rx_stat *stat; 2887 caddr_t head; 2888 bus_addr_t paddr; 2889 uint32_t flags; 2890 int error, len, rssi, nf; 2891 2892 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2893 2894 if (desc->type == IWN_MPDU_RX_DONE) { 2895 /* Check for prior RX_PHY notification. */ 2896 if (!sc->last_rx_valid) { 2897 DPRINTF(sc, IWN_DEBUG_ANY, 2898 "%s: missing RX_PHY\n", __func__); 2899 return; 2900 } 2901 stat = &sc->last_rx_stat; 2902 } else 2903 stat = (struct iwn_rx_stat *)(desc + 1); 2904 2905 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2906 2907 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2908 device_printf(sc->sc_dev, 2909 "%s: invalid RX statistic header, len %d\n", __func__, 2910 stat->cfg_phy_len); 2911 return; 2912 } 2913 if (desc->type == IWN_MPDU_RX_DONE) { 2914 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2915 head = (caddr_t)(mpdu + 1); 2916 len = le16toh(mpdu->len); 2917 } else { 2918 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2919 len = le16toh(stat->len); 2920 } 2921 2922 flags = le32toh(*(uint32_t *)(head + len)); 2923 2924 /* Discard frames with a bad FCS early. */ 2925 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2926 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 2927 __func__, flags); 2928 IFNET_STAT_INC(ifp, ierrors, 1); 2929 return; 2930 } 2931 /* Discard frames that are too short. */ 2932 if (len < sizeof (*wh)) { 2933 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2934 __func__, len); 2935 IFNET_STAT_INC(ifp, ierrors, 1); 2936 return; 2937 } 2938 2939 m1 = m_getjcl(MB_DONTWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 2940 if (m1 == NULL) { 2941 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2942 __func__); 2943 IFNET_STAT_INC(ifp, ierrors, 1); 2944 return; 2945 } 2946 bus_dmamap_unload(ring->data_dmat, data->map); 2947 2948 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 2949 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2950 if (error != 0 && error != EFBIG) { 2951 device_printf(sc->sc_dev, 2952 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2953 m_freem(m1); 2954 2955 /* Try to reload the old mbuf. */ 2956 error = bus_dmamap_load(ring->data_dmat, data->map, 2957 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 2958 &paddr, BUS_DMA_NOWAIT); 2959 if (error != 0 && error != EFBIG) { 2960 panic("%s: could not load old RX mbuf", __func__); 2961 } 2962 /* Physical address may have changed. */ 2963 ring->desc[ring->cur] = htole32(paddr >> 8); 2964 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2965 BUS_DMASYNC_PREWRITE); 2966 IFNET_STAT_INC(ifp, ierrors, 1); 2967 return; 2968 } 2969 2970 m = data->m; 2971 data->m = m1; 2972 /* Update RX descriptor. */ 2973 ring->desc[ring->cur] = htole32(paddr >> 8); 2974 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2975 BUS_DMASYNC_PREWRITE); 2976 2977 /* Finalize mbuf. */ 2978 m->m_pkthdr.rcvif = ifp; 2979 m->m_data = head; 2980 m->m_pkthdr.len = m->m_len = len; 2981 2982 /* Grab a reference to the source node. */ 2983 wh = mtod(m, struct ieee80211_frame *); 2984 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2985 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2986 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2987 2988 rssi = ops->get_rssi(sc, stat); 2989 2990 if (ieee80211_radiotap_active(ic)) { 2991 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2992 2993 tap->wr_flags = 0; 2994 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2995 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2996 tap->wr_dbm_antsignal = (int8_t)rssi; 2997 tap->wr_dbm_antnoise = (int8_t)nf; 2998 tap->wr_tsft = stat->tstamp; 2999 switch (stat->rate) { 3000 /* CCK rates. */ 3001 case 10: tap->wr_rate = 2; break; 3002 case 20: tap->wr_rate = 4; break; 3003 case 55: tap->wr_rate = 11; break; 3004 case 110: tap->wr_rate = 22; break; 3005 /* OFDM rates. */ 3006 case 0xd: tap->wr_rate = 12; break; 3007 case 0xf: tap->wr_rate = 18; break; 3008 case 0x5: tap->wr_rate = 24; break; 3009 case 0x7: tap->wr_rate = 36; break; 3010 case 0x9: tap->wr_rate = 48; break; 3011 case 0xb: tap->wr_rate = 72; break; 3012 case 0x1: tap->wr_rate = 96; break; 3013 case 0x3: tap->wr_rate = 108; break; 3014 /* Unknown rate: should not happen. */ 3015 default: tap->wr_rate = 0; 3016 } 3017 } 3018 3019 /* Send the frame to the 802.11 layer. */ 3020 if (ni != NULL) { 3021 if (ni->ni_flags & IEEE80211_NODE_HT) 3022 m->m_flags |= M_AMPDU; 3023 (void)ieee80211_input(ni, m, rssi - nf, nf); 3024 /* Node is no longer needed. */ 3025 ieee80211_free_node(ni); 3026 } else { 3027 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 3028 } 3029 3030 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3031 3032 } 3033 3034 /* Process an incoming Compressed BlockAck. */ 3035 static void 3036 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3037 struct iwn_rx_data *data) 3038 { 3039 struct iwn_ops *ops = &sc->ops; 3040 struct ifnet *ifp = sc->sc_ifp; 3041 struct iwn_node *wn; 3042 struct ieee80211_node *ni; 3043 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 3044 struct iwn_tx_ring *txq; 3045 struct iwn_tx_data *txdata; 3046 struct ieee80211_tx_ampdu *tap; 3047 struct mbuf *m; 3048 uint64_t bitmap; 3049 uint16_t ssn; 3050 uint8_t tid; 3051 int ackfailcnt = 0, i, lastidx, qid, *res, shift; 3052 3053 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3054 3055 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3056 3057 qid = le16toh(ba->qid); 3058 txq = &sc->txq[ba->qid]; 3059 tap = sc->qid2tap[ba->qid]; 3060 tid = tap->txa_ac; 3061 wn = (void *)tap->txa_ni; 3062 3063 res = NULL; 3064 ssn = 0; 3065 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3066 res = tap->txa_private; 3067 ssn = tap->txa_start & 0xfff; 3068 } 3069 3070 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { 3071 txdata = &txq->data[txq->read]; 3072 3073 /* Unmap and free mbuf. */ 3074 bus_dmamap_sync(txq->data_dmat, txdata->map, 3075 BUS_DMASYNC_POSTWRITE); 3076 bus_dmamap_unload(txq->data_dmat, txdata->map); 3077 m = txdata->m, txdata->m = NULL; 3078 ni = txdata->ni, txdata->ni = NULL; 3079 3080 KASSERT(ni != NULL, ("no node")); 3081 KASSERT(m != NULL, ("no mbuf")); 3082 3083 ieee80211_tx_complete(ni, m, 1); 3084 3085 txq->queued--; 3086 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 3087 } 3088 3089 if (txq->queued == 0 && res != NULL) { 3090 iwn_nic_lock(sc); 3091 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3092 iwn_nic_unlock(sc); 3093 sc->qid2tap[qid] = NULL; 3094 kfree(res, M_DEVBUF); 3095 return; 3096 } 3097 3098 if (wn->agg[tid].bitmap == 0) 3099 return; 3100 3101 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 3102 if (shift < 0) 3103 shift += 0x100; 3104 3105 if (wn->agg[tid].nframes > (64 - shift)) 3106 return; 3107 3108 ni = tap->txa_ni; 3109 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 3110 for (i = 0; bitmap; i++) { 3111 if ((bitmap & 1) == 0) { 3112 IFNET_STAT_INC(ifp, oerrors, 1); 3113 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3114 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3115 } else { 3116 IFNET_STAT_INC(ifp, opackets, 1); 3117 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3118 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3119 } 3120 bitmap >>= 1; 3121 } 3122 3123 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3124 3125 } 3126 3127 /* 3128 * Process a CALIBRATION_RESULT notification sent by the initialization 3129 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 3130 */ 3131 static void 3132 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3133 struct iwn_rx_data *data) 3134 { 3135 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 3136 int len, idx = -1; 3137 3138 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3139 3140 /* Runtime firmware should not send such a notification. */ 3141 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 3142 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", 3143 __func__); 3144 return; 3145 } 3146 len = (le32toh(desc->len) & 0x3fff) - 4; 3147 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3148 3149 switch (calib->code) { 3150 case IWN5000_PHY_CALIB_DC: 3151 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) 3152 idx = 0; 3153 break; 3154 case IWN5000_PHY_CALIB_LO: 3155 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) 3156 idx = 1; 3157 break; 3158 case IWN5000_PHY_CALIB_TX_IQ: 3159 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) 3160 idx = 2; 3161 break; 3162 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 3163 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) 3164 idx = 3; 3165 break; 3166 case IWN5000_PHY_CALIB_BASE_BAND: 3167 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) 3168 idx = 4; 3169 break; 3170 } 3171 if (idx == -1) /* Ignore other results. */ 3172 return; 3173 3174 /* Save calibration result. */ 3175 if (sc->calibcmd[idx].buf != NULL) 3176 kfree(sc->calibcmd[idx].buf, M_DEVBUF); 3177 sc->calibcmd[idx].buf = kmalloc(len, M_DEVBUF, M_INTWAIT); 3178 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3179 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); 3180 sc->calibcmd[idx].len = len; 3181 memcpy(sc->calibcmd[idx].buf, calib, len); 3182 } 3183 3184 /* 3185 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 3186 * The latter is sent by the firmware after each received beacon. 3187 */ 3188 static void 3189 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3190 struct iwn_rx_data *data) 3191 { 3192 struct iwn_ops *ops = &sc->ops; 3193 struct ifnet *ifp = sc->sc_ifp; 3194 struct ieee80211com *ic = ifp->if_l2com; 3195 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3196 struct iwn_calib_state *calib = &sc->calib; 3197 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 3198 int temp; 3199 3200 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3201 3202 /* Ignore statistics received during a scan. */ 3203 if (vap->iv_state != IEEE80211_S_RUN || 3204 (ic->ic_flags & IEEE80211_F_SCAN)){ 3205 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 3206 __func__); 3207 return; 3208 } 3209 3210 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3211 3212 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n", 3213 __func__, desc->type); 3214 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 3215 3216 /* Test if temperature has changed. */ 3217 if (stats->general.temp != sc->rawtemp) { 3218 /* Convert "raw" temperature to degC. */ 3219 sc->rawtemp = stats->general.temp; 3220 temp = ops->get_temperature(sc); 3221 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 3222 __func__, temp); 3223 3224 /* Update TX power if need be (4965AGN only). */ 3225 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3226 iwn4965_power_calibration(sc, temp); 3227 } 3228 3229 if (desc->type != IWN_BEACON_STATISTICS) 3230 return; /* Reply to a statistics request. */ 3231 3232 sc->noise = iwn_get_noise(&stats->rx.general); 3233 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 3234 3235 /* Test that RSSI and noise are present in stats report. */ 3236 if (le32toh(stats->rx.general.flags) != 1) { 3237 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 3238 "received statistics without RSSI"); 3239 return; 3240 } 3241 3242 if (calib->state == IWN_CALIB_STATE_ASSOC) 3243 iwn_collect_noise(sc, &stats->rx.general); 3244 else if (calib->state == IWN_CALIB_STATE_RUN) 3245 iwn_tune_sensitivity(sc, &stats->rx); 3246 3247 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3248 } 3249 3250 /* 3251 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 3252 * and 5000 adapters have different incompatible TX status formats. 3253 */ 3254 static void 3255 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3256 struct iwn_rx_data *data) 3257 { 3258 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 3259 struct iwn_tx_ring *ring; 3260 int qid; 3261 3262 qid = desc->qid & 0xf; 3263 ring = &sc->txq[qid]; 3264 3265 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3266 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3267 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3268 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3269 le32toh(stat->status)); 3270 3271 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3272 if (qid >= sc->firstaggqueue) { 3273 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3274 &stat->status); 3275 } else { 3276 iwn_tx_done(sc, desc, stat->ackfailcnt, 3277 le32toh(stat->status) & 0xff); 3278 } 3279 } 3280 3281 static void 3282 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3283 struct iwn_rx_data *data) 3284 { 3285 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 3286 struct iwn_tx_ring *ring; 3287 int qid; 3288 3289 qid = desc->qid & 0xf; 3290 ring = &sc->txq[qid]; 3291 3292 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3293 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3294 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3295 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3296 le32toh(stat->status)); 3297 3298 #ifdef notyet 3299 /* Reset TX scheduler slot. */ 3300 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 3301 #endif 3302 3303 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3304 if (qid >= sc->firstaggqueue) { 3305 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3306 &stat->status); 3307 } else { 3308 iwn_tx_done(sc, desc, stat->ackfailcnt, 3309 le16toh(stat->status) & 0xff); 3310 } 3311 } 3312 3313 /* 3314 * Adapter-independent backend for TX_DONE firmware notifications. 3315 */ 3316 static void 3317 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 3318 uint8_t status) 3319 { 3320 struct ifnet *ifp = sc->sc_ifp; 3321 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 3322 struct iwn_tx_data *data = &ring->data[desc->idx]; 3323 struct mbuf *m; 3324 struct ieee80211_node *ni; 3325 struct ieee80211vap *vap; 3326 3327 KASSERT(data->ni != NULL, ("no node")); 3328 3329 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3330 3331 /* Unmap and free mbuf. */ 3332 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 3333 bus_dmamap_unload(ring->data_dmat, data->map); 3334 m = data->m, data->m = NULL; 3335 ni = data->ni, data->ni = NULL; 3336 vap = ni->ni_vap; 3337 3338 /* 3339 * Update rate control statistics for the node. 3340 */ 3341 if (status & IWN_TX_FAIL) { 3342 IFNET_STAT_INC(ifp, oerrors, 1); 3343 ieee80211_ratectl_tx_complete(vap, ni, 3344 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3345 } else { 3346 IFNET_STAT_INC(ifp, opackets, 1); 3347 ieee80211_ratectl_tx_complete(vap, ni, 3348 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3349 } 3350 3351 /* 3352 * Channels marked for "radar" require traffic to be received 3353 * to unlock before we can transmit. Until traffic is seen 3354 * any attempt to transmit is returned immediately with status 3355 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 3356 * happen on first authenticate after scanning. To workaround 3357 * this we ignore a failure of this sort in AUTH state so the 3358 * 802.11 layer will fall back to using a timeout to wait for 3359 * the AUTH reply. This allows the firmware time to see 3360 * traffic so a subsequent retry of AUTH succeeds. It's 3361 * unclear why the firmware does not maintain state for 3362 * channels recently visited as this would allow immediate 3363 * use of the channel after a scan (where we see traffic). 3364 */ 3365 if (status == IWN_TX_FAIL_TX_LOCKED && 3366 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 3367 ieee80211_tx_complete(ni, m, 0); 3368 else 3369 ieee80211_tx_complete(ni, m, 3370 (status & IWN_TX_FAIL) != 0); 3371 3372 sc->sc_tx_timer = 0; 3373 if (--ring->queued < IWN_TX_RING_LOMARK) { 3374 sc->qfullmsk &= ~(1 << ring->qid); 3375 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) { 3376 ifq_clr_oactive(&ifp->if_snd); 3377 iwn_start_locked(ifp); 3378 } 3379 } 3380 3381 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3382 3383 } 3384 3385 /* 3386 * Process a "command done" firmware notification. This is where we wakeup 3387 * processes waiting for a synchronous command completion. 3388 */ 3389 static void 3390 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3391 { 3392 struct iwn_tx_ring *ring; 3393 struct iwn_tx_data *data; 3394 int cmd_queue_num; 3395 3396 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 3397 cmd_queue_num = IWN_PAN_CMD_QUEUE; 3398 else 3399 cmd_queue_num = IWN_CMD_QUEUE_NUM; 3400 3401 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) 3402 return; /* Not a command ack. */ 3403 3404 ring = &sc->txq[cmd_queue_num]; 3405 data = &ring->data[desc->idx]; 3406 3407 /* If the command was mapped in an mbuf, free it. */ 3408 if (data->m != NULL) { 3409 bus_dmamap_sync(ring->data_dmat, data->map, 3410 BUS_DMASYNC_POSTWRITE); 3411 bus_dmamap_unload(ring->data_dmat, data->map); 3412 m_freem(data->m); 3413 data->m = NULL; 3414 } 3415 wakeup(&ring->desc[desc->idx]); 3416 } 3417 3418 static void 3419 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 3420 void *stat) 3421 { 3422 struct iwn_ops *ops = &sc->ops; 3423 struct ifnet *ifp = sc->sc_ifp; 3424 struct iwn_tx_ring *ring = &sc->txq[qid]; 3425 struct iwn_tx_data *data; 3426 struct mbuf *m; 3427 struct iwn_node *wn; 3428 struct ieee80211_node *ni; 3429 struct ieee80211_tx_ampdu *tap; 3430 uint64_t bitmap; 3431 uint32_t *status = stat; 3432 uint16_t *aggstatus = stat; 3433 uint16_t ssn; 3434 uint8_t tid; 3435 int bit, i, lastidx, *res, seqno, shift, start; 3436 3437 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3438 3439 if (nframes == 1) { 3440 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) { 3441 #ifdef NOT_YET 3442 kprintf("ieee80211_send_bar()\n"); 3443 #endif 3444 /* 3445 * If we completely fail a transmit, make sure a 3446 * notification is pushed up to the rate control 3447 * layer. 3448 */ 3449 tap = sc->qid2tap[qid]; 3450 tid = tap->txa_ac; 3451 wn = (void *)tap->txa_ni; 3452 ni = tap->txa_ni; 3453 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3454 IEEE80211_RATECTL_TX_FAILURE, &nframes, NULL); 3455 } 3456 } 3457 3458 bitmap = 0; 3459 start = idx; 3460 for (i = 0; i < nframes; i++) { 3461 if (le16toh(aggstatus[i * 2]) & 0xc) 3462 continue; 3463 3464 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 3465 bit = idx - start; 3466 shift = 0; 3467 if (bit >= 64) { 3468 shift = 0x100 - idx + start; 3469 bit = 0; 3470 start = idx; 3471 } else if (bit <= -64) 3472 bit = 0x100 - start + idx; 3473 else if (bit < 0) { 3474 shift = start - idx; 3475 start = idx; 3476 bit = 0; 3477 } 3478 bitmap = bitmap << shift; 3479 bitmap |= 1ULL << bit; 3480 } 3481 tap = sc->qid2tap[qid]; 3482 tid = tap->txa_ac; 3483 wn = (void *)tap->txa_ni; 3484 wn->agg[tid].bitmap = bitmap; 3485 wn->agg[tid].startidx = start; 3486 wn->agg[tid].nframes = nframes; 3487 3488 res = NULL; 3489 ssn = 0; 3490 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3491 res = tap->txa_private; 3492 ssn = tap->txa_start & 0xfff; 3493 } 3494 3495 seqno = le32toh(*(status + nframes)) & 0xfff; 3496 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 3497 data = &ring->data[ring->read]; 3498 3499 /* Unmap and free mbuf. */ 3500 bus_dmamap_sync(ring->data_dmat, data->map, 3501 BUS_DMASYNC_POSTWRITE); 3502 bus_dmamap_unload(ring->data_dmat, data->map); 3503 m = data->m, data->m = NULL; 3504 ni = data->ni, data->ni = NULL; 3505 3506 KASSERT(ni != NULL, ("no node")); 3507 KASSERT(m != NULL, ("no mbuf")); 3508 3509 ieee80211_tx_complete(ni, m, 1); 3510 3511 ring->queued--; 3512 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 3513 } 3514 3515 if (ring->queued == 0 && res != NULL) { 3516 iwn_nic_lock(sc); 3517 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3518 iwn_nic_unlock(sc); 3519 sc->qid2tap[qid] = NULL; 3520 kfree(res, M_DEVBUF); 3521 return; 3522 } 3523 3524 sc->sc_tx_timer = 0; 3525 if (ring->queued < IWN_TX_RING_LOMARK) { 3526 sc->qfullmsk &= ~(1 << ring->qid); 3527 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) { 3528 ifq_clr_oactive(&ifp->if_snd); 3529 iwn_start_locked(ifp); 3530 } 3531 } 3532 3533 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3534 3535 } 3536 3537 /* 3538 * Process an INT_FH_RX or INT_SW_RX interrupt. 3539 */ 3540 static void 3541 iwn_notif_intr(struct iwn_softc *sc) 3542 { 3543 struct iwn_ops *ops = &sc->ops; 3544 struct ifnet *ifp = sc->sc_ifp; 3545 struct ieee80211com *ic = ifp->if_l2com; 3546 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3547 uint16_t hw; 3548 3549 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3550 BUS_DMASYNC_POSTREAD); 3551 3552 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3553 while (sc->rxq.cur != hw) { 3554 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3555 struct iwn_rx_desc *desc; 3556 3557 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3558 BUS_DMASYNC_POSTREAD); 3559 desc = mtod(data->m, struct iwn_rx_desc *); 3560 3561 DPRINTF(sc, IWN_DEBUG_RECV, 3562 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 3563 __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags, 3564 desc->type, iwn_intr_str(desc->type), 3565 le16toh(desc->len)); 3566 3567 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ 3568 iwn_cmd_done(sc, desc); 3569 3570 switch (desc->type) { 3571 case IWN_RX_PHY: 3572 iwn_rx_phy(sc, desc, data); 3573 break; 3574 3575 case IWN_RX_DONE: /* 4965AGN only. */ 3576 case IWN_MPDU_RX_DONE: 3577 /* An 802.11 frame has been received. */ 3578 iwn_rx_done(sc, desc, data); 3579 break; 3580 3581 case IWN_RX_COMPRESSED_BA: 3582 /* A Compressed BlockAck has been received. */ 3583 iwn_rx_compressed_ba(sc, desc, data); 3584 break; 3585 3586 case IWN_TX_DONE: 3587 /* An 802.11 frame has been transmitted. */ 3588 ops->tx_done(sc, desc, data); 3589 break; 3590 3591 case IWN_RX_STATISTICS: 3592 case IWN_BEACON_STATISTICS: 3593 iwn_rx_statistics(sc, desc, data); 3594 break; 3595 3596 case IWN_BEACON_MISSED: 3597 { 3598 struct iwn_beacon_missed *miss = 3599 (struct iwn_beacon_missed *)(desc + 1); 3600 int misses; 3601 3602 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3603 BUS_DMASYNC_POSTREAD); 3604 misses = le32toh(miss->consecutive); 3605 3606 DPRINTF(sc, IWN_DEBUG_STATE, 3607 "%s: beacons missed %d/%d\n", __func__, 3608 misses, le32toh(miss->total)); 3609 /* 3610 * If more than 5 consecutive beacons are missed, 3611 * reinitialize the sensitivity state machine. 3612 */ 3613 if (vap->iv_state == IEEE80211_S_RUN && 3614 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 3615 if (misses > 5) 3616 (void)iwn_init_sensitivity(sc); 3617 if (misses >= vap->iv_bmissthreshold) { 3618 ieee80211_beacon_miss(ic); 3619 } 3620 } 3621 break; 3622 } 3623 case IWN_UC_READY: 3624 { 3625 struct iwn_ucode_info *uc = 3626 (struct iwn_ucode_info *)(desc + 1); 3627 3628 /* The microcontroller is ready. */ 3629 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3630 BUS_DMASYNC_POSTREAD); 3631 DPRINTF(sc, IWN_DEBUG_RESET, 3632 "microcode alive notification version=%d.%d " 3633 "subtype=%x alive=%x\n", uc->major, uc->minor, 3634 uc->subtype, le32toh(uc->valid)); 3635 3636 if (le32toh(uc->valid) != 1) { 3637 device_printf(sc->sc_dev, 3638 "microcontroller initialization failed"); 3639 break; 3640 } 3641 if (uc->subtype == IWN_UCODE_INIT) { 3642 /* Save microcontroller report. */ 3643 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 3644 } 3645 /* Save the address of the error log in SRAM. */ 3646 sc->errptr = le32toh(uc->errptr); 3647 break; 3648 } 3649 case IWN_STATE_CHANGED: 3650 { 3651 /* 3652 * State change allows hardware switch change to be 3653 * noted. However, we handle this in iwn_intr as we 3654 * get both the enable/disble intr. 3655 */ 3656 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3657 BUS_DMASYNC_POSTREAD); 3658 #ifdef IWN_DEBUG 3659 uint32_t *status = (uint32_t *)(desc + 1); 3660 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, 3661 "state changed to %x\n", 3662 le32toh(*status)); 3663 #endif 3664 break; 3665 } 3666 case IWN_START_SCAN: 3667 { 3668 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3669 BUS_DMASYNC_POSTREAD); 3670 #ifdef IWN_DEBUG 3671 struct iwn_start_scan *scan = 3672 (struct iwn_start_scan *)(desc + 1); 3673 DPRINTF(sc, IWN_DEBUG_ANY, 3674 "%s: scanning channel %d status %x\n", 3675 __func__, scan->chan, le32toh(scan->status)); 3676 #endif 3677 break; 3678 } 3679 case IWN_STOP_SCAN: 3680 { 3681 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3682 BUS_DMASYNC_POSTREAD); 3683 #ifdef IWN_DEBUG 3684 struct iwn_stop_scan *scan = 3685 (struct iwn_stop_scan *)(desc + 1); 3686 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, 3687 "scan finished nchan=%d status=%d chan=%d\n", 3688 scan->nchan, scan->status, scan->chan); 3689 #endif 3690 sc->sc_is_scanning = 0; 3691 ieee80211_scan_next(vap); 3692 break; 3693 } 3694 case IWN5000_CALIBRATION_RESULT: 3695 iwn5000_rx_calib_results(sc, desc, data); 3696 break; 3697 3698 case IWN5000_CALIBRATION_DONE: 3699 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3700 wakeup(sc); 3701 break; 3702 } 3703 3704 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3705 } 3706 3707 /* Tell the firmware what we have processed. */ 3708 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3709 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3710 } 3711 3712 /* 3713 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3714 * from power-down sleep mode. 3715 */ 3716 static void 3717 iwn_wakeup_intr(struct iwn_softc *sc) 3718 { 3719 int qid; 3720 3721 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 3722 __func__); 3723 3724 /* Wakeup RX and TX rings. */ 3725 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3726 for (qid = 0; qid < sc->ntxqs; qid++) { 3727 struct iwn_tx_ring *ring = &sc->txq[qid]; 3728 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3729 } 3730 } 3731 3732 static void 3733 iwn_rftoggle_intr(struct iwn_softc *sc) 3734 { 3735 struct ifnet *ifp = sc->sc_ifp; 3736 struct ieee80211com *ic = ifp->if_l2com; 3737 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 3738 3739 device_printf(sc->sc_dev, "RF switch: radio %s\n", 3740 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 3741 if (tmp & IWN_GP_CNTRL_RFKILL) 3742 ieee80211_runtask(ic, &sc->sc_radioon_task); 3743 else 3744 ieee80211_runtask(ic, &sc->sc_radiooff_task); 3745 } 3746 3747 /* 3748 * Dump the error log of the firmware when a firmware panic occurs. Although 3749 * we can't debug the firmware because it is neither open source nor free, it 3750 * can help us to identify certain classes of problems. 3751 */ 3752 static void 3753 iwn_fatal_intr(struct iwn_softc *sc) 3754 { 3755 struct iwn_fw_dump dump; 3756 int i; 3757 3758 /* Force a complete recalibration on next init. */ 3759 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3760 3761 /* Check that the error log address is valid. */ 3762 if (sc->errptr < IWN_FW_DATA_BASE || 3763 sc->errptr + sizeof (dump) > 3764 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3765 kprintf("%s: bad firmware error log address 0x%08x\n", __func__, 3766 sc->errptr); 3767 return; 3768 } 3769 if (iwn_nic_lock(sc) != 0) { 3770 kprintf("%s: could not read firmware error log\n", __func__); 3771 return; 3772 } 3773 /* Read firmware error log from SRAM. */ 3774 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3775 sizeof (dump) / sizeof (uint32_t)); 3776 iwn_nic_unlock(sc); 3777 3778 if (dump.valid == 0) { 3779 kprintf("%s: firmware error log is empty\n", __func__); 3780 return; 3781 } 3782 kprintf("firmware error log:\n"); 3783 kprintf(" error type = \"%s\" (0x%08X)\n", 3784 (dump.id < nitems(iwn_fw_errmsg)) ? 3785 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3786 dump.id); 3787 kprintf(" program counter = 0x%08X\n", dump.pc); 3788 kprintf(" source line = 0x%08X\n", dump.src_line); 3789 kprintf(" error data = 0x%08X%08X\n", 3790 dump.error_data[0], dump.error_data[1]); 3791 kprintf(" branch link = 0x%08X%08X\n", 3792 dump.branch_link[0], dump.branch_link[1]); 3793 kprintf(" interrupt link = 0x%08X%08X\n", 3794 dump.interrupt_link[0], dump.interrupt_link[1]); 3795 kprintf(" time = %u\n", dump.time[0]); 3796 3797 /* Dump driver status (TX and RX rings) while we're here. */ 3798 kprintf("driver status:\n"); 3799 for (i = 0; i < sc->ntxqs; i++) { 3800 struct iwn_tx_ring *ring = &sc->txq[i]; 3801 kprintf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3802 i, ring->qid, ring->cur, ring->queued); 3803 } 3804 kprintf(" rx ring: cur=%d\n", sc->rxq.cur); 3805 } 3806 3807 static void 3808 iwn_intr(void *arg) 3809 { 3810 struct iwn_softc *sc = arg; 3811 struct ifnet *ifp = sc->sc_ifp; 3812 uint32_t r1, r2, tmp; 3813 3814 /* Disable interrupts. */ 3815 IWN_WRITE(sc, IWN_INT_MASK, 0); 3816 3817 /* Read interrupts from ICT (fast) or from registers (slow). */ 3818 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3819 tmp = 0; 3820 while (sc->ict[sc->ict_cur] != 0) { 3821 tmp |= sc->ict[sc->ict_cur]; 3822 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3823 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3824 } 3825 tmp = le32toh(tmp); 3826 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3827 tmp = 0; 3828 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3829 tmp |= 0x8000; 3830 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3831 r2 = 0; /* Unused. */ 3832 } else { 3833 r1 = IWN_READ(sc, IWN_INT); 3834 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3835 return; /* Hardware gone! */ 3836 r2 = IWN_READ(sc, IWN_FH_INT); 3837 } 3838 3839 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 3840 , r1, r2); 3841 3842 if (r1 == 0 && r2 == 0) 3843 goto done; /* Interrupt not for us. */ 3844 3845 /* Acknowledge interrupts. */ 3846 IWN_WRITE(sc, IWN_INT, r1); 3847 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3848 IWN_WRITE(sc, IWN_FH_INT, r2); 3849 3850 if (r1 & IWN_INT_RF_TOGGLED) { 3851 iwn_rftoggle_intr(sc); 3852 goto done; 3853 } 3854 if (r1 & IWN_INT_CT_REACHED) { 3855 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 3856 __func__); 3857 } 3858 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3859 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 3860 __func__); 3861 #ifdef IWN_DEBUG 3862 iwn_debug_register(sc); 3863 #endif 3864 /* Dump firmware error log and stop. */ 3865 iwn_fatal_intr(sc); 3866 ifp->if_flags &= ~IFF_UP; 3867 iwn_stop_locked(sc); 3868 goto done; 3869 } 3870 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3871 (r2 & IWN_FH_INT_RX)) { 3872 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3873 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3874 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3875 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3876 IWN_INT_PERIODIC_DIS); 3877 iwn_notif_intr(sc); 3878 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 3879 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3880 IWN_INT_PERIODIC_ENA); 3881 } 3882 } else 3883 iwn_notif_intr(sc); 3884 } 3885 3886 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 3887 if (sc->sc_flags & IWN_FLAG_USE_ICT) 3888 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 3889 wakeup(sc); /* FH DMA transfer completed. */ 3890 } 3891 3892 if (r1 & IWN_INT_ALIVE) 3893 wakeup(sc); /* Firmware is alive. */ 3894 3895 if (r1 & IWN_INT_WAKEUP) 3896 iwn_wakeup_intr(sc); 3897 3898 done: 3899 /* Re-enable interrupts. */ 3900 if (ifp->if_flags & IFF_UP) 3901 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 3902 } 3903 3904 /* 3905 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 3906 * 5000 adapters use a slightly different format). 3907 */ 3908 static void 3909 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3910 uint16_t len) 3911 { 3912 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 3913 3914 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3915 3916 *w = htole16(len + 8); 3917 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3918 BUS_DMASYNC_PREWRITE); 3919 if (idx < IWN_SCHED_WINSZ) { 3920 *(w + IWN_TX_RING_COUNT) = *w; 3921 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3922 BUS_DMASYNC_PREWRITE); 3923 } 3924 } 3925 3926 static void 3927 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3928 uint16_t len) 3929 { 3930 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3931 3932 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3933 3934 *w = htole16(id << 12 | (len + 8)); 3935 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3936 BUS_DMASYNC_PREWRITE); 3937 if (idx < IWN_SCHED_WINSZ) { 3938 *(w + IWN_TX_RING_COUNT) = *w; 3939 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3940 BUS_DMASYNC_PREWRITE); 3941 } 3942 } 3943 3944 #ifdef notyet 3945 static void 3946 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 3947 { 3948 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3949 3950 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3951 3952 *w = (*w & htole16(0xf000)) | htole16(1); 3953 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3954 BUS_DMASYNC_PREWRITE); 3955 if (idx < IWN_SCHED_WINSZ) { 3956 *(w + IWN_TX_RING_COUNT) = *w; 3957 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3958 BUS_DMASYNC_PREWRITE); 3959 } 3960 } 3961 #endif 3962 3963 /* 3964 * Check whether OFDM 11g protection will be enabled for the given rate. 3965 * 3966 * The original driver code only enabled protection for OFDM rates. 3967 * It didn't check to see whether it was operating in 11a or 11bg mode. 3968 */ 3969 static int 3970 iwn_check_rate_needs_protection(struct iwn_softc *sc, 3971 struct ieee80211vap *vap, uint8_t rate) 3972 { 3973 struct ieee80211com *ic = vap->iv_ic; 3974 3975 /* 3976 * Not in 2GHz mode? Then there's no need to enable OFDM 3977 * 11bg protection. 3978 */ 3979 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { 3980 return (0); 3981 } 3982 3983 /* 3984 * 11bg protection not enabled? Then don't use it. 3985 */ 3986 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) 3987 return (0); 3988 3989 /* 3990 * If it's an 11n rate, then for now we enable 3991 * protection. 3992 */ 3993 if (rate & IEEE80211_RATE_MCS) { 3994 return (1); 3995 } 3996 3997 /* 3998 * Do a rate table lookup. If the PHY is CCK, 3999 * don't do protection. 4000 */ 4001 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) 4002 return (0); 4003 4004 /* 4005 * Yup, enable protection. 4006 */ 4007 return (1); 4008 } 4009 4010 /* 4011 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into 4012 * the link quality table that reflects this particular entry. 4013 */ 4014 static int 4015 iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, 4016 uint8_t rate) 4017 { 4018 struct ieee80211_rateset *rs; 4019 int is_11n; 4020 int nr; 4021 int i; 4022 uint8_t cmp_rate; 4023 4024 /* 4025 * Figure out if we're using 11n or not here. 4026 */ 4027 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) 4028 is_11n = 1; 4029 else 4030 is_11n = 0; 4031 4032 /* 4033 * Use the correct rate table. 4034 */ 4035 if (is_11n) { 4036 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4037 nr = ni->ni_htrates.rs_nrates; 4038 } else { 4039 rs = &ni->ni_rates; 4040 nr = rs->rs_nrates; 4041 } 4042 4043 /* 4044 * Find the relevant link quality entry in the table. 4045 */ 4046 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { 4047 /* 4048 * The link quality table index starts at 0 == highest 4049 * rate, so we walk the rate table backwards. 4050 */ 4051 cmp_rate = rs->rs_rates[(nr - 1) - i]; 4052 if (rate & IEEE80211_RATE_MCS) 4053 cmp_rate |= IEEE80211_RATE_MCS; 4054 4055 #if 0 4056 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", 4057 __func__, 4058 i, 4059 nr, 4060 rate, 4061 cmp_rate); 4062 #endif 4063 4064 if (cmp_rate == rate) 4065 return (i); 4066 } 4067 4068 /* Failed? Start at the end */ 4069 return (IWN_MAX_TX_RETRIES - 1); 4070 } 4071 4072 static int 4073 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 4074 { 4075 struct iwn_ops *ops = &sc->ops; 4076 const struct ieee80211_txparam *tp; 4077 struct ieee80211vap *vap = ni->ni_vap; 4078 struct ieee80211com *ic = ni->ni_ic; 4079 struct iwn_node *wn = (void *)ni; 4080 struct iwn_tx_ring *ring; 4081 struct iwn_tx_desc *desc; 4082 struct iwn_tx_data *data; 4083 struct iwn_tx_cmd *cmd; 4084 struct iwn_cmd_data *tx; 4085 struct ieee80211_frame *wh; 4086 struct ieee80211_key *k = NULL; 4087 struct mbuf *m1; 4088 uint32_t flags; 4089 uint16_t qos; 4090 u_int hdrlen; 4091 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4092 uint8_t tid, type; 4093 int ac, i, totlen, error, pad, nsegs = 0, rate; 4094 4095 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4096 4097 wh = mtod(m, struct ieee80211_frame *); 4098 hdrlen = ieee80211_anyhdrsize(wh); 4099 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4100 4101 /* Select EDCA Access Category and TX ring for this frame. */ 4102 if (IEEE80211_QOS_HAS_SEQ(wh)) { 4103 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 4104 tid = qos & IEEE80211_QOS_TID; 4105 } else { 4106 qos = 0; 4107 tid = 0; 4108 } 4109 ac = M_WME_GETAC(m); 4110 if (m->m_flags & M_AMPDU_MPDU) { 4111 uint16_t seqno; 4112 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 4113 4114 if (!IEEE80211_AMPDU_RUNNING(tap)) { 4115 m_freem(m); 4116 return EINVAL; 4117 } 4118 4119 /* 4120 * Queue this frame to the hardware ring that we've 4121 * negotiated AMPDU TX on. 4122 * 4123 * Note that the sequence number must match the TX slot 4124 * being used! 4125 */ 4126 ac = *(int *)tap->txa_private; 4127 seqno = ni->ni_txseqs[tid]; 4128 *(uint16_t *)wh->i_seq = 4129 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 4130 ring = &sc->txq[ac]; 4131 if ((seqno % 256) != ring->cur) { 4132 device_printf(sc->sc_dev, 4133 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", 4134 __func__, 4135 m, 4136 seqno, 4137 seqno % 256, 4138 ring->cur); 4139 } 4140 ni->ni_txseqs[tid]++; 4141 } 4142 ring = &sc->txq[ac]; 4143 desc = &ring->desc[ring->cur]; 4144 data = &ring->data[ring->cur]; 4145 4146 /* Choose a TX rate index. */ 4147 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 4148 if (type == IEEE80211_FC0_TYPE_MGT) 4149 rate = tp->mgmtrate; 4150 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 4151 rate = tp->mcastrate; 4152 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 4153 rate = tp->ucastrate; 4154 else if (m->m_flags & M_EAPOL) 4155 rate = tp->mgmtrate; 4156 else { 4157 /* XXX pass pktlen */ 4158 (void) ieee80211_ratectl_rate(ni, NULL, 0); 4159 rate = ni->ni_txrate; 4160 } 4161 4162 /* Encrypt the frame if need be. */ 4163 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 4164 /* Retrieve key for TX. */ 4165 k = ieee80211_crypto_encap(ni, m); 4166 if (k == NULL) { 4167 m_freem(m); 4168 return ENOBUFS; 4169 } 4170 /* 802.11 header may have moved. */ 4171 wh = mtod(m, struct ieee80211_frame *); 4172 } 4173 totlen = m->m_pkthdr.len; 4174 4175 if (ieee80211_radiotap_active_vap(vap)) { 4176 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4177 4178 tap->wt_flags = 0; 4179 tap->wt_rate = rate; 4180 if (k != NULL) 4181 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 4182 4183 ieee80211_radiotap_tx(vap, m); 4184 } 4185 4186 /* Prepare TX firmware command. */ 4187 cmd = &ring->cmd[ring->cur]; 4188 cmd->code = IWN_CMD_TX_DATA; 4189 cmd->flags = 0; 4190 cmd->qid = ring->qid; 4191 cmd->idx = ring->cur; 4192 4193 tx = (struct iwn_cmd_data *)cmd->data; 4194 /* NB: No need to clear tx, all fields are reinitialized here. */ 4195 tx->scratch = 0; /* clear "scratch" area */ 4196 4197 flags = 0; 4198 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4199 /* Unicast frame, check if an ACK is expected. */ 4200 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 4201 IEEE80211_QOS_ACKPOLICY_NOACK) 4202 flags |= IWN_TX_NEED_ACK; 4203 } 4204 if ((wh->i_fc[0] & 4205 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 4206 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 4207 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 4208 4209 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 4210 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 4211 4212 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 4213 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4214 /* NB: Group frames are sent using CCK in 802.11b/g. */ 4215 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 4216 flags |= IWN_TX_NEED_RTS; 4217 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { 4218 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 4219 flags |= IWN_TX_NEED_CTS; 4220 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 4221 flags |= IWN_TX_NEED_RTS; 4222 } 4223 4224 /* XXX HT protection? */ 4225 4226 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 4227 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4228 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4229 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 4230 flags |= IWN_TX_NEED_PROTECTION; 4231 } else 4232 flags |= IWN_TX_FULL_TXOP; 4233 } 4234 } 4235 4236 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 4237 type != IEEE80211_FC0_TYPE_DATA) 4238 tx->id = sc->broadcast_id; 4239 else 4240 tx->id = wn->id; 4241 4242 if (type == IEEE80211_FC0_TYPE_MGT) { 4243 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4244 4245 /* Tell HW to set timestamp in probe responses. */ 4246 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4247 flags |= IWN_TX_INSERT_TSTAMP; 4248 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4249 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4250 tx->timeout = htole16(3); 4251 else 4252 tx->timeout = htole16(2); 4253 } else 4254 tx->timeout = htole16(0); 4255 4256 if (hdrlen & 3) { 4257 /* First segment length must be a multiple of 4. */ 4258 flags |= IWN_TX_NEED_PADDING; 4259 pad = 4 - (hdrlen & 3); 4260 } else 4261 pad = 0; 4262 4263 tx->len = htole16(totlen); 4264 tx->tid = tid; 4265 tx->rts_ntries = 60; 4266 tx->data_ntries = 15; 4267 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4268 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4269 if (tx->id == sc->broadcast_id) { 4270 /* Group or management frame. */ 4271 tx->linkq = 0; 4272 } else { 4273 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); 4274 flags |= IWN_TX_LINKQ; /* enable MRR */ 4275 } 4276 4277 /* Set physical address of "scratch area". */ 4278 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4279 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4280 4281 /* Copy 802.11 header in TX command. */ 4282 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4283 4284 /* Trim 802.11 header. */ 4285 m_adj(m, hdrlen); 4286 tx->security = 0; 4287 tx->flags = htole32(flags); 4288 4289 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, 4290 m, segs, IWN_MAX_SCATTER - 1, 4291 &nsegs, BUS_DMA_NOWAIT); 4292 if (error != 0) { 4293 if (error != EFBIG) { 4294 device_printf(sc->sc_dev, 4295 "%s: can't map mbuf (error %d)\n", __func__, error); 4296 m_freem(m); 4297 return error; 4298 } 4299 /* Too many DMA segments, linearize mbuf. */ 4300 m1 = m_defrag(m, MB_DONTWAIT); 4301 if (m1 == NULL) { 4302 device_printf(sc->sc_dev, 4303 "%s: could not defrag mbuf\n", __func__); 4304 m_freem(m); 4305 return ENOBUFS; 4306 } 4307 m = m1; 4308 4309 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, 4310 data->map, m, segs, 4311 IWN_MAX_SCATTER - 1, 4312 &nsegs, BUS_DMA_NOWAIT); 4313 if (error != 0) { 4314 device_printf(sc->sc_dev, 4315 "%s: can't map mbuf (error %d)\n", __func__, error); 4316 m_freem(m); 4317 return error; 4318 } 4319 } 4320 4321 data->m = m; 4322 data->ni = ni; 4323 4324 DPRINTF(sc, IWN_DEBUG_XMIT, 4325 "%s: qid %d idx %d len %d nsegs %d rate %04x plcp 0x%08x\n", 4326 __func__, 4327 ring->qid, 4328 ring->cur, 4329 m->m_pkthdr.len, 4330 nsegs, 4331 rate, 4332 tx->rate); 4333 4334 /* Fill TX descriptor. */ 4335 desc->nsegs = 1; 4336 if (m->m_len != 0) 4337 desc->nsegs += nsegs; 4338 /* First DMA segment is used by the TX command. */ 4339 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4340 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4341 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4342 /* Other DMA segments are for data payload. */ 4343 seg = &segs[0]; 4344 for (i = 1; i <= nsegs; i++) { 4345 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4346 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4347 seg->ds_len << 4); 4348 seg++; 4349 } 4350 4351 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4352 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4353 BUS_DMASYNC_PREWRITE); 4354 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4355 BUS_DMASYNC_PREWRITE); 4356 4357 /* Update TX scheduler. */ 4358 if (ring->qid >= sc->firstaggqueue) 4359 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4360 4361 /* Kick TX ring. */ 4362 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4363 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4364 4365 /* Mark TX ring as full if we reach a certain threshold. */ 4366 if (++ring->queued > IWN_TX_RING_HIMARK) 4367 sc->qfullmsk |= 1 << ring->qid; 4368 4369 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4370 4371 return 0; 4372 } 4373 4374 static int 4375 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 4376 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 4377 { 4378 struct iwn_ops *ops = &sc->ops; 4379 // struct ifnet *ifp = sc->sc_ifp; 4380 struct ieee80211vap *vap = ni->ni_vap; 4381 // struct ieee80211com *ic = ifp->if_l2com; 4382 struct iwn_tx_cmd *cmd; 4383 struct iwn_cmd_data *tx; 4384 struct ieee80211_frame *wh; 4385 struct iwn_tx_ring *ring; 4386 struct iwn_tx_desc *desc; 4387 struct iwn_tx_data *data; 4388 struct mbuf *m1; 4389 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4390 uint32_t flags; 4391 u_int hdrlen; 4392 int ac, totlen, error, pad, nsegs = 0, i, rate; 4393 uint8_t type; 4394 4395 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4396 4397 wh = mtod(m, struct ieee80211_frame *); 4398 hdrlen = ieee80211_anyhdrsize(wh); 4399 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4400 4401 ac = params->ibp_pri & 3; 4402 4403 ring = &sc->txq[ac]; 4404 desc = &ring->desc[ring->cur]; 4405 data = &ring->data[ring->cur]; 4406 4407 /* Choose a TX rate. */ 4408 rate = params->ibp_rate0; 4409 totlen = m->m_pkthdr.len; 4410 4411 /* Prepare TX firmware command. */ 4412 cmd = &ring->cmd[ring->cur]; 4413 cmd->code = IWN_CMD_TX_DATA; 4414 cmd->flags = 0; 4415 cmd->qid = ring->qid; 4416 cmd->idx = ring->cur; 4417 4418 tx = (struct iwn_cmd_data *)cmd->data; 4419 /* NB: No need to clear tx, all fields are reinitialized here. */ 4420 tx->scratch = 0; /* clear "scratch" area */ 4421 4422 flags = 0; 4423 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 4424 flags |= IWN_TX_NEED_ACK; 4425 if (params->ibp_flags & IEEE80211_BPF_RTS) { 4426 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4427 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4428 flags &= ~IWN_TX_NEED_RTS; 4429 flags |= IWN_TX_NEED_PROTECTION; 4430 } else 4431 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 4432 } 4433 if (params->ibp_flags & IEEE80211_BPF_CTS) { 4434 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4435 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4436 flags &= ~IWN_TX_NEED_CTS; 4437 flags |= IWN_TX_NEED_PROTECTION; 4438 } else 4439 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 4440 } 4441 if (type == IEEE80211_FC0_TYPE_MGT) { 4442 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4443 4444 /* Tell HW to set timestamp in probe responses. */ 4445 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4446 flags |= IWN_TX_INSERT_TSTAMP; 4447 4448 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4449 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4450 tx->timeout = htole16(3); 4451 else 4452 tx->timeout = htole16(2); 4453 } else 4454 tx->timeout = htole16(0); 4455 4456 if (hdrlen & 3) { 4457 /* First segment length must be a multiple of 4. */ 4458 flags |= IWN_TX_NEED_PADDING; 4459 pad = 4 - (hdrlen & 3); 4460 } else 4461 pad = 0; 4462 4463 if (ieee80211_radiotap_active_vap(vap)) { 4464 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4465 4466 tap->wt_flags = 0; 4467 tap->wt_rate = rate; 4468 4469 ieee80211_radiotap_tx(vap, m); 4470 } 4471 4472 tx->len = htole16(totlen); 4473 tx->tid = 0; 4474 tx->id = sc->broadcast_id; 4475 tx->rts_ntries = params->ibp_try1; 4476 tx->data_ntries = params->ibp_try0; 4477 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4478 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4479 4480 /* Group or management frame. */ 4481 tx->linkq = 0; 4482 4483 /* Set physical address of "scratch area". */ 4484 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4485 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4486 4487 /* Copy 802.11 header in TX command. */ 4488 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4489 4490 /* Trim 802.11 header. */ 4491 m_adj(m, hdrlen); 4492 tx->security = 0; 4493 tx->flags = htole32(flags); 4494 4495 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, 4496 m, segs, 4497 IWN_MAX_SCATTER - 1, 4498 &nsegs, BUS_DMA_NOWAIT); 4499 if (error != 0) { 4500 if (error != EFBIG) { 4501 device_printf(sc->sc_dev, 4502 "%s: can't map mbuf (error %d)\n", __func__, error); 4503 m_freem(m); 4504 return error; 4505 } 4506 /* Too many DMA segments, linearize mbuf. */ 4507 m1 = m_defrag(m, M_NOWAIT); 4508 if (m1 == NULL) { 4509 device_printf(sc->sc_dev, 4510 "%s: could not defrag mbuf\n", __func__); 4511 m_freem(m); 4512 return ENOBUFS; 4513 } 4514 m = m1; 4515 4516 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, 4517 data->map, m, segs, 4518 IWN_MAX_SCATTER - 1, 4519 &nsegs, BUS_DMA_NOWAIT); 4520 if (error != 0) { 4521 device_printf(sc->sc_dev, 4522 "%s: can't map mbuf (error %d)\n", __func__, error); 4523 m_freem(m); 4524 return error; 4525 } 4526 } 4527 4528 data->m = m; 4529 data->ni = ni; 4530 4531 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 4532 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 4533 4534 /* Fill TX descriptor. */ 4535 desc->nsegs = 1; 4536 if (m->m_len != 0) 4537 desc->nsegs += nsegs; 4538 /* First DMA segment is used by the TX command. */ 4539 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4540 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4541 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4542 /* Other DMA segments are for data payload. */ 4543 seg = &segs[0]; 4544 for (i = 1; i <= nsegs; i++) { 4545 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4546 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4547 seg->ds_len << 4); 4548 seg++; 4549 } 4550 4551 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4552 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4553 BUS_DMASYNC_PREWRITE); 4554 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4555 BUS_DMASYNC_PREWRITE); 4556 4557 /* Update TX scheduler. */ 4558 if (ring->qid >= sc->firstaggqueue) 4559 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4560 4561 /* Kick TX ring. */ 4562 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4563 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4564 4565 /* Mark TX ring as full if we reach a certain threshold. */ 4566 if (++ring->queued > IWN_TX_RING_HIMARK) 4567 sc->qfullmsk |= 1 << ring->qid; 4568 4569 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4570 4571 return 0; 4572 } 4573 4574 static int 4575 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 4576 const struct ieee80211_bpf_params *params) 4577 { 4578 struct ieee80211com *ic = ni->ni_ic; 4579 struct ifnet *ifp = ic->ic_ifp; 4580 struct iwn_softc *sc = ifp->if_softc; 4581 int error = 0; 4582 4583 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4584 4585 if ((ifp->if_flags & IFF_RUNNING) == 0) { 4586 ieee80211_free_node(ni); 4587 m_freem(m); 4588 return ENETDOWN; 4589 } 4590 4591 if (params == NULL) { 4592 /* 4593 * Legacy path; interpret frame contents to decide 4594 * precisely how to send the frame. 4595 */ 4596 error = iwn_tx_data(sc, m, ni); 4597 } else { 4598 /* 4599 * Caller supplied explicit parameters to use in 4600 * sending the frame. 4601 */ 4602 error = iwn_tx_data_raw(sc, m, ni, params); 4603 } 4604 if (error != 0) { 4605 /* NB: m is reclaimed on tx failure */ 4606 ieee80211_free_node(ni); 4607 IFNET_STAT_INC(ifp, oerrors, 1); 4608 } 4609 sc->sc_tx_timer = 5; 4610 4611 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4612 4613 return error; 4614 } 4615 4616 static void 4617 iwn_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 4618 { 4619 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 4620 iwn_start_locked(ifp); 4621 } 4622 4623 static void 4624 iwn_start_locked(struct ifnet *ifp) 4625 { 4626 struct iwn_softc *sc = ifp->if_softc; 4627 struct ieee80211_node *ni; 4628 struct mbuf *m; 4629 4630 wlan_assert_serialized(); 4631 4632 if ((ifp->if_flags & IFF_RUNNING) == 0 || 4633 ifq_is_oactive(&ifp->if_snd)) 4634 return; 4635 4636 for (;;) { 4637 if (sc->qfullmsk != 0) { 4638 ifq_set_oactive(&ifp->if_snd); 4639 break; 4640 } 4641 m = ifq_dequeue(&ifp->if_snd); 4642 if (m == NULL) 4643 break; 4644 KKASSERT(M_TRAILINGSPACE(m) >= 0); 4645 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4646 if (iwn_tx_data(sc, m, ni) != 0) { 4647 ieee80211_free_node(ni); 4648 IFNET_STAT_INC(ifp, oerrors, 1); 4649 continue; 4650 } 4651 sc->sc_tx_timer = 5; 4652 } 4653 } 4654 4655 static void 4656 iwn_watchdog_timeout(void *arg) 4657 { 4658 struct iwn_softc *sc = arg; 4659 struct ifnet *ifp = sc->sc_ifp; 4660 struct ieee80211com *ic = ifp->if_l2com; 4661 4662 wlan_serialize_enter(); 4663 4664 KASSERT(ifp->if_flags & IFF_RUNNING, ("not running")); 4665 4666 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4667 4668 if (sc->sc_tx_timer > 0) { 4669 if (--sc->sc_tx_timer == 0) { 4670 if_printf(ifp, "device timeout\n"); 4671 ieee80211_runtask(ic, &sc->sc_reinit_task); 4672 return; 4673 } 4674 } 4675 callout_reset(&sc->watchdog_to, hz, iwn_watchdog_timeout, sc); 4676 wlan_serialize_exit(); 4677 } 4678 4679 static int 4680 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *ucred) 4681 { 4682 struct iwn_softc *sc = ifp->if_softc; 4683 struct ieee80211com *ic = ifp->if_l2com; 4684 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4685 struct ifreq *ifr = (struct ifreq *) data; 4686 int error = 0, startall = 0, stop = 0; 4687 4688 wlan_assert_serialized(); 4689 4690 switch (cmd) { 4691 case SIOCGIFADDR: 4692 error = ether_ioctl(ifp, cmd, data); 4693 break; 4694 case SIOCSIFFLAGS: 4695 if (ifp->if_flags & IFF_UP) { 4696 if (!(ifp->if_flags & IFF_RUNNING)) { 4697 iwn_init_locked(sc); 4698 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 4699 startall = 1; 4700 else 4701 stop = 1; 4702 } 4703 } else { 4704 if (ifp->if_flags & IFF_RUNNING) 4705 iwn_stop_locked(sc); 4706 } 4707 if (startall) 4708 ieee80211_start_all(ic); 4709 else if (vap != NULL && stop) 4710 ieee80211_stop(vap); 4711 break; 4712 case SIOCGIFMEDIA: 4713 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4714 break; 4715 default: 4716 error = EINVAL; 4717 break; 4718 } 4719 return error; 4720 } 4721 4722 /* 4723 * Send a command to the firmware. 4724 */ 4725 static int 4726 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 4727 { 4728 struct iwn_tx_ring *ring; 4729 struct iwn_tx_desc *desc; 4730 struct iwn_tx_data *data; 4731 struct iwn_tx_cmd *cmd; 4732 struct mbuf *m; 4733 bus_addr_t paddr; 4734 int totlen, error; 4735 int cmd_queue_num; 4736 4737 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4738 4739 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 4740 cmd_queue_num = IWN_PAN_CMD_QUEUE; 4741 else 4742 cmd_queue_num = IWN_CMD_QUEUE_NUM; 4743 4744 ring = &sc->txq[cmd_queue_num]; 4745 desc = &ring->desc[ring->cur]; 4746 data = &ring->data[ring->cur]; 4747 totlen = 4 + size; 4748 4749 if (size > sizeof cmd->data) { 4750 /* Command is too large to fit in a descriptor. */ 4751 if (totlen > MJUMPAGESIZE) 4752 return EINVAL; 4753 m = m_getjcl(MB_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 4754 if (m == NULL) 4755 return ENOMEM; 4756 cmd = mtod(m, struct iwn_tx_cmd *); 4757 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 4758 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 4759 if (error != 0) { 4760 m_freem(m); 4761 return error; 4762 } 4763 data->m = m; 4764 } else { 4765 cmd = &ring->cmd[ring->cur]; 4766 paddr = data->cmd_paddr; 4767 } 4768 4769 cmd->code = code; 4770 cmd->flags = 0; 4771 cmd->qid = ring->qid; 4772 cmd->idx = ring->cur; 4773 memcpy(cmd->data, buf, size); 4774 4775 desc->nsegs = 1; 4776 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 4777 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 4778 4779 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 4780 __func__, iwn_intr_str(cmd->code), cmd->code, 4781 cmd->flags, cmd->qid, cmd->idx); 4782 4783 if (size > sizeof cmd->data) { 4784 bus_dmamap_sync(ring->data_dmat, data->map, 4785 BUS_DMASYNC_PREWRITE); 4786 } else { 4787 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4788 BUS_DMASYNC_PREWRITE); 4789 } 4790 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4791 BUS_DMASYNC_PREWRITE); 4792 4793 /* Kick command ring. */ 4794 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4795 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4796 4797 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4798 4799 return async ? 0 : zsleep(desc, &wlan_global_serializer, 0, "iwncmd", hz); 4800 } 4801 4802 static int 4803 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4804 { 4805 struct iwn4965_node_info hnode; 4806 caddr_t src, dst; 4807 4808 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4809 4810 /* 4811 * We use the node structure for 5000 Series internally (it is 4812 * a superset of the one for 4965AGN). We thus copy the common 4813 * fields before sending the command. 4814 */ 4815 src = (caddr_t)node; 4816 dst = (caddr_t)&hnode; 4817 memcpy(dst, src, 48); 4818 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 4819 memcpy(dst + 48, src + 72, 20); 4820 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 4821 } 4822 4823 static int 4824 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4825 { 4826 4827 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4828 4829 /* Direct mapping. */ 4830 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 4831 } 4832 4833 static int 4834 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 4835 { 4836 #define RV(v) ((v) & IEEE80211_RATE_VAL) 4837 struct iwn_node *wn = (void *)ni; 4838 struct ieee80211_rateset *rs; 4839 struct iwn_cmd_link_quality linkq; 4840 uint8_t txant; 4841 int i, rate, txrate; 4842 int is_11n; 4843 4844 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4845 4846 /* Use the first valid TX antenna. */ 4847 txant = IWN_LSB(sc->txchainmask); 4848 4849 memset(&linkq, 0, sizeof linkq); 4850 linkq.id = wn->id; 4851 linkq.antmsk_1stream = txant; 4852 4853 /* 4854 * The '2 stream' setup is a bit .. odd. 4855 * 4856 * For NICs that support only 1 antenna, default to IWN_ANT_AB or 4857 * the firmware panics (eg Intel 5100.) 4858 * 4859 * For NICs that support two antennas, we use ANT_AB. 4860 * 4861 * For NICs that support three antennas, we use the two that 4862 * wasn't the default one. 4863 * 4864 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict 4865 * this to only one antenna. 4866 */ 4867 4868 /* So - if there's no secondary antenna, assume IWN_ANT_AB */ 4869 4870 /* Default - transmit on the other antennas */ 4871 linkq.antmsk_2stream = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); 4872 4873 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ 4874 if (linkq.antmsk_2stream == 0) 4875 linkq.antmsk_2stream = IWN_ANT_AB; 4876 4877 /* 4878 * If the NIC is a two-stream TX NIC, configure the TX mask to 4879 * the default chainmask 4880 */ 4881 else if (sc->ntxchains == 2) 4882 linkq.antmsk_2stream = sc->txchainmask; 4883 4884 linkq.ampdu_max = 32; /* XXX negotiated? */ 4885 linkq.ampdu_threshold = 3; 4886 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4887 4888 DPRINTF(sc, IWN_DEBUG_XMIT, 4889 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", 4890 __func__, 4891 linkq.antmsk_1stream, 4892 linkq.antmsk_2stream, 4893 sc->ntxchains); 4894 4895 /* 4896 * Are we using 11n rates? Ensure the channel is 4897 * 11n _and_ we have some 11n rates, or don't 4898 * try. 4899 */ 4900 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { 4901 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4902 is_11n = 1; 4903 } else { 4904 rs = &ni->ni_rates; 4905 is_11n = 0; 4906 } 4907 4908 /* Start at highest available bit-rate. */ 4909 /* 4910 * XXX this is all very dirty! 4911 */ 4912 if (is_11n) 4913 txrate = ni->ni_htrates.rs_nrates - 1; 4914 else 4915 txrate = rs->rs_nrates - 1; 4916 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 4917 uint32_t plcp; 4918 4919 if (is_11n) 4920 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; 4921 else 4922 rate = RV(rs->rs_rates[txrate]); 4923 4924 DPRINTF(sc, IWN_DEBUG_XMIT, 4925 "%s: i=%d, txrate=%d, rate=0x%02x\n", 4926 __func__, 4927 i, 4928 txrate, 4929 rate); 4930 4931 /* Do rate -> PLCP config mapping */ 4932 plcp = iwn_rate_to_plcp(sc, ni, rate); 4933 linkq.retry[i] = plcp; 4934 4935 /* 4936 * The mimo field is an index into the table which 4937 * indicates the first index where it and subsequent entries 4938 * will not be using MIMO. 4939 * 4940 * Since we're filling linkq from 0..15 and we're filling 4941 * from the higest MCS rates to the lowest rates, if we 4942 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, 4943 * the next entry.) That way if the next entry is a non-MIMO 4944 * entry, we're already pointing at it. 4945 */ 4946 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 4947 RV(le32toh(plcp)) > 7) 4948 linkq.mimo = i + 1; 4949 4950 /* Next retry at immediate lower bit-rate. */ 4951 if (txrate > 0) 4952 txrate--; 4953 } 4954 4955 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4956 4957 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 4958 #undef RV 4959 } 4960 4961 /* 4962 * Broadcast node is used to send group-addressed and management frames. 4963 */ 4964 static int 4965 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 4966 { 4967 struct iwn_ops *ops = &sc->ops; 4968 struct ifnet *ifp = sc->sc_ifp; 4969 struct ieee80211com *ic = ifp->if_l2com; 4970 struct iwn_node_info node; 4971 struct iwn_cmd_link_quality linkq; 4972 uint8_t txant; 4973 int i, error; 4974 4975 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4976 4977 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 4978 4979 memset(&node, 0, sizeof node); 4980 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 4981 node.id = sc->broadcast_id; 4982 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 4983 if ((error = ops->add_node(sc, &node, async)) != 0) 4984 return error; 4985 4986 /* Use the first valid TX antenna. */ 4987 txant = IWN_LSB(sc->txchainmask); 4988 4989 memset(&linkq, 0, sizeof linkq); 4990 linkq.id = sc->broadcast_id; 4991 linkq.antmsk_1stream = txant; 4992 linkq.antmsk_2stream = IWN_ANT_AB; 4993 linkq.ampdu_max = 64; 4994 linkq.ampdu_threshold = 3; 4995 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4996 4997 /* Use lowest mandatory bit-rate. */ 4998 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 4999 linkq.retry[0] = htole32(0xd); 5000 else 5001 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 5002 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 5003 /* Use same bit-rate for all TX retries. */ 5004 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 5005 linkq.retry[i] = linkq.retry[0]; 5006 } 5007 5008 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5009 5010 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 5011 } 5012 5013 static int 5014 iwn_updateedca(struct ieee80211com *ic) 5015 { 5016 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 5017 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5018 struct iwn_edca_params cmd; 5019 int aci; 5020 5021 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5022 5023 memset(&cmd, 0, sizeof cmd); 5024 cmd.flags = htole32(IWN_EDCA_UPDATE); 5025 for (aci = 0; aci < WME_NUM_AC; aci++) { 5026 const struct wmeParams *ac = 5027 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 5028 cmd.ac[aci].aifsn = ac->wmep_aifsn; 5029 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 5030 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 5031 cmd.ac[aci].txoplimit = 5032 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 5033 } 5034 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5035 5036 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5037 5038 return 0; 5039 #undef IWN_EXP2 5040 } 5041 5042 static void 5043 iwn_update_mcast(struct ifnet *ifp) 5044 { 5045 /* Ignore */ 5046 } 5047 5048 static void 5049 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 5050 { 5051 struct iwn_cmd_led led; 5052 5053 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5054 5055 #if 0 5056 /* XXX don't set LEDs during scan? */ 5057 if (sc->sc_is_scanning) 5058 return; 5059 #endif 5060 5061 /* Clear microcode LED ownership. */ 5062 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 5063 5064 led.which = which; 5065 led.unit = htole32(10000); /* on/off in unit of 100ms */ 5066 led.off = off; 5067 led.on = on; 5068 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 5069 } 5070 5071 /* 5072 * Set the critical temperature at which the firmware will stop the radio 5073 * and notify us. 5074 */ 5075 static int 5076 iwn_set_critical_temp(struct iwn_softc *sc) 5077 { 5078 struct iwn_critical_temp crit; 5079 int32_t temp; 5080 5081 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5082 5083 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 5084 5085 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 5086 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 5087 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 5088 temp = IWN_CTOK(110); 5089 else 5090 temp = 110; 5091 memset(&crit, 0, sizeof crit); 5092 crit.tempR = htole32(temp); 5093 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 5094 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 5095 } 5096 5097 static int 5098 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 5099 { 5100 struct iwn_cmd_timing cmd; 5101 uint64_t val, mod; 5102 5103 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5104 5105 memset(&cmd, 0, sizeof cmd); 5106 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 5107 cmd.bintval = htole16(ni->ni_intval); 5108 cmd.lintval = htole16(10); 5109 5110 /* Compute remaining time until next beacon. */ 5111 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 5112 mod = le64toh(cmd.tstamp) % val; 5113 cmd.binitval = htole32((uint32_t)(val - mod)); 5114 5115 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 5116 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 5117 5118 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 5119 } 5120 5121 static void 5122 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 5123 { 5124 struct ifnet *ifp = sc->sc_ifp; 5125 struct ieee80211com *ic = ifp->if_l2com; 5126 5127 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5128 5129 /* Adjust TX power if need be (delta >= 3 degC). */ 5130 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 5131 __func__, sc->temp, temp); 5132 if (abs(temp - sc->temp) >= 3) { 5133 /* Record temperature of last calibration. */ 5134 sc->temp = temp; 5135 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 5136 } 5137 } 5138 5139 /* 5140 * Set TX power for current channel (each rate has its own power settings). 5141 * This function takes into account the regulatory information from EEPROM, 5142 * the current temperature and the current voltage. 5143 */ 5144 static int 5145 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5146 int async) 5147 { 5148 /* Fixed-point arithmetic division using a n-bit fractional part. */ 5149 #define fdivround(a, b, n) \ 5150 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 5151 /* Linear interpolation. */ 5152 #define interpolate(x, x1, y1, x2, y2, n) \ 5153 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 5154 5155 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 5156 struct iwn_ucode_info *uc = &sc->ucode_info; 5157 struct iwn4965_cmd_txpower cmd; 5158 struct iwn4965_eeprom_chan_samples *chans; 5159 const uint8_t *rf_gain, *dsp_gain; 5160 int32_t vdiff, tdiff; 5161 int i, c, grp, maxpwr; 5162 uint8_t chan; 5163 5164 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5165 /* Retrieve current channel from last RXON. */ 5166 chan = sc->rxon->chan; 5167 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 5168 chan); 5169 5170 memset(&cmd, 0, sizeof cmd); 5171 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 5172 cmd.chan = chan; 5173 5174 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 5175 maxpwr = sc->maxpwr5GHz; 5176 rf_gain = iwn4965_rf_gain_5ghz; 5177 dsp_gain = iwn4965_dsp_gain_5ghz; 5178 } else { 5179 maxpwr = sc->maxpwr2GHz; 5180 rf_gain = iwn4965_rf_gain_2ghz; 5181 dsp_gain = iwn4965_dsp_gain_2ghz; 5182 } 5183 5184 /* Compute voltage compensation. */ 5185 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 5186 if (vdiff > 0) 5187 vdiff *= 2; 5188 if (abs(vdiff) > 2) 5189 vdiff = 0; 5190 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5191 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 5192 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 5193 5194 /* Get channel attenuation group. */ 5195 if (chan <= 20) /* 1-20 */ 5196 grp = 4; 5197 else if (chan <= 43) /* 34-43 */ 5198 grp = 0; 5199 else if (chan <= 70) /* 44-70 */ 5200 grp = 1; 5201 else if (chan <= 124) /* 71-124 */ 5202 grp = 2; 5203 else /* 125-200 */ 5204 grp = 3; 5205 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5206 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 5207 5208 /* Get channel sub-band. */ 5209 for (i = 0; i < IWN_NBANDS; i++) 5210 if (sc->bands[i].lo != 0 && 5211 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 5212 break; 5213 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 5214 return EINVAL; 5215 chans = sc->bands[i].chans; 5216 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5217 "%s: chan %d sub-band=%d\n", __func__, chan, i); 5218 5219 for (c = 0; c < 2; c++) { 5220 uint8_t power, gain, temp; 5221 int maxchpwr, pwr, ridx, idx; 5222 5223 power = interpolate(chan, 5224 chans[0].num, chans[0].samples[c][1].power, 5225 chans[1].num, chans[1].samples[c][1].power, 1); 5226 gain = interpolate(chan, 5227 chans[0].num, chans[0].samples[c][1].gain, 5228 chans[1].num, chans[1].samples[c][1].gain, 1); 5229 temp = interpolate(chan, 5230 chans[0].num, chans[0].samples[c][1].temp, 5231 chans[1].num, chans[1].samples[c][1].temp, 1); 5232 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5233 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 5234 __func__, c, power, gain, temp); 5235 5236 /* Compute temperature compensation. */ 5237 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 5238 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5239 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 5240 __func__, tdiff, sc->temp, temp); 5241 5242 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 5243 /* Convert dBm to half-dBm. */ 5244 maxchpwr = sc->maxpwr[chan] * 2; 5245 if ((ridx / 8) & 1) 5246 maxchpwr -= 6; /* MIMO 2T: -3dB */ 5247 5248 pwr = maxpwr; 5249 5250 /* Adjust TX power based on rate. */ 5251 if ((ridx % 8) == 5) 5252 pwr -= 15; /* OFDM48: -7.5dB */ 5253 else if ((ridx % 8) == 6) 5254 pwr -= 17; /* OFDM54: -8.5dB */ 5255 else if ((ridx % 8) == 7) 5256 pwr -= 20; /* OFDM60: -10dB */ 5257 else 5258 pwr -= 10; /* Others: -5dB */ 5259 5260 /* Do not exceed channel max TX power. */ 5261 if (pwr > maxchpwr) 5262 pwr = maxchpwr; 5263 5264 idx = gain - (pwr - power) - tdiff - vdiff; 5265 if ((ridx / 8) & 1) /* MIMO */ 5266 idx += (int32_t)le32toh(uc->atten[grp][c]); 5267 5268 if (cmd.band == 0) 5269 idx += 9; /* 5GHz */ 5270 if (ridx == IWN_RIDX_MAX) 5271 idx += 5; /* CCK */ 5272 5273 /* Make sure idx stays in a valid range. */ 5274 if (idx < 0) 5275 idx = 0; 5276 else if (idx > IWN4965_MAX_PWR_INDEX) 5277 idx = IWN4965_MAX_PWR_INDEX; 5278 5279 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5280 "%s: Tx chain %d, rate idx %d: power=%d\n", 5281 __func__, c, ridx, idx); 5282 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 5283 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 5284 } 5285 } 5286 5287 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5288 "%s: set tx power for chan %d\n", __func__, chan); 5289 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 5290 5291 #undef interpolate 5292 #undef fdivround 5293 } 5294 5295 static int 5296 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5297 int async) 5298 { 5299 struct iwn5000_cmd_txpower cmd; 5300 5301 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5302 5303 /* 5304 * TX power calibration is handled automatically by the firmware 5305 * for 5000 Series. 5306 */ 5307 memset(&cmd, 0, sizeof cmd); 5308 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 5309 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 5310 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 5311 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 5312 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 5313 } 5314 5315 /* 5316 * Retrieve the maximum RSSI (in dBm) among receivers. 5317 */ 5318 static int 5319 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5320 { 5321 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 5322 uint8_t mask, agc; 5323 int rssi; 5324 5325 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5326 5327 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 5328 agc = (le16toh(phy->agc) >> 7) & 0x7f; 5329 5330 rssi = 0; 5331 if (mask & IWN_ANT_A) 5332 rssi = MAX(rssi, phy->rssi[0]); 5333 if (mask & IWN_ANT_B) 5334 rssi = MAX(rssi, phy->rssi[2]); 5335 if (mask & IWN_ANT_C) 5336 rssi = MAX(rssi, phy->rssi[4]); 5337 5338 DPRINTF(sc, IWN_DEBUG_RECV, 5339 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 5340 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 5341 rssi - agc - IWN_RSSI_TO_DBM); 5342 return rssi - agc - IWN_RSSI_TO_DBM; 5343 } 5344 5345 static int 5346 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5347 { 5348 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 5349 uint8_t agc; 5350 int rssi; 5351 5352 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5353 5354 agc = (le32toh(phy->agc) >> 9) & 0x7f; 5355 5356 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 5357 le16toh(phy->rssi[1]) & 0xff); 5358 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 5359 5360 DPRINTF(sc, IWN_DEBUG_RECV, 5361 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 5362 phy->rssi[0], phy->rssi[1], phy->rssi[2], 5363 rssi - agc - IWN_RSSI_TO_DBM); 5364 return rssi - agc - IWN_RSSI_TO_DBM; 5365 } 5366 5367 /* 5368 * Retrieve the average noise (in dBm) among receivers. 5369 */ 5370 static int 5371 iwn_get_noise(const struct iwn_rx_general_stats *stats) 5372 { 5373 int i, total, nbant, noise; 5374 5375 total = nbant = 0; 5376 for (i = 0; i < 3; i++) { 5377 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 5378 continue; 5379 total += noise; 5380 nbant++; 5381 } 5382 /* There should be at least one antenna but check anyway. */ 5383 return (nbant == 0) ? -127 : (total / nbant) - 107; 5384 } 5385 5386 /* 5387 * Compute temperature (in degC) from last received statistics. 5388 */ 5389 static int 5390 iwn4965_get_temperature(struct iwn_softc *sc) 5391 { 5392 struct iwn_ucode_info *uc = &sc->ucode_info; 5393 int32_t r1, r2, r3, r4, temp; 5394 5395 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5396 5397 r1 = le32toh(uc->temp[0].chan20MHz); 5398 r2 = le32toh(uc->temp[1].chan20MHz); 5399 r3 = le32toh(uc->temp[2].chan20MHz); 5400 r4 = le32toh(sc->rawtemp); 5401 5402 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 5403 return 0; 5404 5405 /* Sign-extend 23-bit R4 value to 32-bit. */ 5406 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 5407 /* Compute temperature in Kelvin. */ 5408 temp = (259 * (r4 - r2)) / (r3 - r1); 5409 temp = (temp * 97) / 100 + 8; 5410 5411 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 5412 IWN_KTOC(temp)); 5413 return IWN_KTOC(temp); 5414 } 5415 5416 static int 5417 iwn5000_get_temperature(struct iwn_softc *sc) 5418 { 5419 int32_t temp; 5420 5421 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5422 5423 /* 5424 * Temperature is not used by the driver for 5000 Series because 5425 * TX power calibration is handled by firmware. 5426 */ 5427 temp = le32toh(sc->rawtemp); 5428 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 5429 temp = (temp / -5) + sc->temp_off; 5430 temp = IWN_KTOC(temp); 5431 } 5432 return temp; 5433 } 5434 5435 /* 5436 * Initialize sensitivity calibration state machine. 5437 */ 5438 static int 5439 iwn_init_sensitivity(struct iwn_softc *sc) 5440 { 5441 struct iwn_ops *ops = &sc->ops; 5442 struct iwn_calib_state *calib = &sc->calib; 5443 uint32_t flags; 5444 int error; 5445 5446 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5447 5448 /* Reset calibration state machine. */ 5449 memset(calib, 0, sizeof (*calib)); 5450 calib->state = IWN_CALIB_STATE_INIT; 5451 calib->cck_state = IWN_CCK_STATE_HIFA; 5452 /* Set initial correlation values. */ 5453 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 5454 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 5455 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 5456 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 5457 calib->cck_x4 = 125; 5458 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 5459 calib->energy_cck = sc->limits->energy_cck; 5460 5461 /* Write initial sensitivity. */ 5462 if ((error = iwn_send_sensitivity(sc)) != 0) 5463 return error; 5464 5465 /* Write initial gains. */ 5466 if ((error = ops->init_gains(sc)) != 0) 5467 return error; 5468 5469 /* Request statistics at each beacon interval. */ 5470 flags = 0; 5471 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 5472 __func__); 5473 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 5474 } 5475 5476 /* 5477 * Collect noise and RSSI statistics for the first 20 beacons received 5478 * after association and use them to determine connected antennas and 5479 * to set differential gains. 5480 */ 5481 static void 5482 iwn_collect_noise(struct iwn_softc *sc, 5483 const struct iwn_rx_general_stats *stats) 5484 { 5485 struct iwn_ops *ops = &sc->ops; 5486 struct iwn_calib_state *calib = &sc->calib; 5487 struct ifnet *ifp = sc->sc_ifp; 5488 struct ieee80211com *ic = ifp->if_l2com; 5489 uint32_t val; 5490 int i; 5491 5492 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5493 5494 /* Accumulate RSSI and noise for all 3 antennas. */ 5495 for (i = 0; i < 3; i++) { 5496 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 5497 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 5498 } 5499 /* NB: We update differential gains only once after 20 beacons. */ 5500 if (++calib->nbeacons < 20) 5501 return; 5502 5503 /* Determine highest average RSSI. */ 5504 val = MAX(calib->rssi[0], calib->rssi[1]); 5505 val = MAX(calib->rssi[2], val); 5506 5507 /* Determine which antennas are connected. */ 5508 sc->chainmask = sc->rxchainmask; 5509 for (i = 0; i < 3; i++) 5510 if (val - calib->rssi[i] > 15 * 20) 5511 sc->chainmask &= ~(1 << i); 5512 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5513 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 5514 __func__, sc->rxchainmask, sc->chainmask); 5515 5516 /* If none of the TX antennas are connected, keep at least one. */ 5517 if ((sc->chainmask & sc->txchainmask) == 0) 5518 sc->chainmask |= IWN_LSB(sc->txchainmask); 5519 5520 (void)ops->set_gains(sc); 5521 calib->state = IWN_CALIB_STATE_RUN; 5522 5523 #ifdef notyet 5524 /* XXX Disable RX chains with no antennas connected. */ 5525 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 5526 if (sc->sc_is_scanning) 5527 device_printf(sc->sc_dev, 5528 "%s: is_scanning set, before RXON\n", 5529 __func__); 5530 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 5531 #endif 5532 5533 /* Enable power-saving mode if requested by user. */ 5534 if (ic->ic_flags & IEEE80211_F_PMGTON) 5535 (void)iwn_set_pslevel(sc, 0, 3, 1); 5536 5537 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5538 5539 } 5540 5541 static int 5542 iwn4965_init_gains(struct iwn_softc *sc) 5543 { 5544 struct iwn_phy_calib_gain cmd; 5545 5546 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5547 5548 memset(&cmd, 0, sizeof cmd); 5549 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5550 /* Differential gains initially set to 0 for all 3 antennas. */ 5551 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5552 "%s: setting initial differential gains\n", __func__); 5553 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5554 } 5555 5556 static int 5557 iwn5000_init_gains(struct iwn_softc *sc) 5558 { 5559 struct iwn_phy_calib cmd; 5560 5561 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5562 5563 memset(&cmd, 0, sizeof cmd); 5564 cmd.code = sc->reset_noise_gain; 5565 cmd.ngroups = 1; 5566 cmd.isvalid = 1; 5567 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5568 "%s: setting initial differential gains\n", __func__); 5569 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5570 } 5571 5572 static int 5573 iwn4965_set_gains(struct iwn_softc *sc) 5574 { 5575 struct iwn_calib_state *calib = &sc->calib; 5576 struct iwn_phy_calib_gain cmd; 5577 int i, delta, noise; 5578 5579 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5580 5581 /* Get minimal noise among connected antennas. */ 5582 noise = INT_MAX; /* NB: There's at least one antenna. */ 5583 for (i = 0; i < 3; i++) 5584 if (sc->chainmask & (1 << i)) 5585 noise = MIN(calib->noise[i], noise); 5586 5587 memset(&cmd, 0, sizeof cmd); 5588 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5589 /* Set differential gains for connected antennas. */ 5590 for (i = 0; i < 3; i++) { 5591 if (sc->chainmask & (1 << i)) { 5592 /* Compute attenuation (in unit of 1.5dB). */ 5593 delta = (noise - (int32_t)calib->noise[i]) / 30; 5594 /* NB: delta <= 0 */ 5595 /* Limit to [-4.5dB,0]. */ 5596 cmd.gain[i] = MIN(abs(delta), 3); 5597 if (delta < 0) 5598 cmd.gain[i] |= 1 << 2; /* sign bit */ 5599 } 5600 } 5601 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5602 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 5603 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 5604 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5605 } 5606 5607 static int 5608 iwn5000_set_gains(struct iwn_softc *sc) 5609 { 5610 struct iwn_calib_state *calib = &sc->calib; 5611 struct iwn_phy_calib_gain cmd; 5612 int i, ant, div, delta; 5613 5614 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5615 5616 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 5617 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 5618 5619 memset(&cmd, 0, sizeof cmd); 5620 cmd.code = sc->noise_gain; 5621 cmd.ngroups = 1; 5622 cmd.isvalid = 1; 5623 /* Get first available RX antenna as referential. */ 5624 ant = IWN_LSB(sc->rxchainmask); 5625 /* Set differential gains for other antennas. */ 5626 for (i = ant + 1; i < 3; i++) { 5627 if (sc->chainmask & (1 << i)) { 5628 /* The delta is relative to antenna "ant". */ 5629 delta = ((int32_t)calib->noise[ant] - 5630 (int32_t)calib->noise[i]) / div; 5631 /* Limit to [-4.5dB,+4.5dB]. */ 5632 cmd.gain[i - 1] = MIN(abs(delta), 3); 5633 if (delta < 0) 5634 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 5635 } 5636 } 5637 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5638 "setting differential gains Ant B/C: %x/%x (%x)\n", 5639 cmd.gain[0], cmd.gain[1], sc->chainmask); 5640 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5641 } 5642 5643 /* 5644 * Tune RF RX sensitivity based on the number of false alarms detected 5645 * during the last beacon period. 5646 */ 5647 static void 5648 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 5649 { 5650 #define inc(val, inc, max) \ 5651 if ((val) < (max)) { \ 5652 if ((val) < (max) - (inc)) \ 5653 (val) += (inc); \ 5654 else \ 5655 (val) = (max); \ 5656 needs_update = 1; \ 5657 } 5658 #define dec(val, dec, min) \ 5659 if ((val) > (min)) { \ 5660 if ((val) > (min) + (dec)) \ 5661 (val) -= (dec); \ 5662 else \ 5663 (val) = (min); \ 5664 needs_update = 1; \ 5665 } 5666 5667 const struct iwn_sensitivity_limits *limits = sc->limits; 5668 struct iwn_calib_state *calib = &sc->calib; 5669 uint32_t val, rxena, fa; 5670 uint32_t energy[3], energy_min; 5671 uint8_t noise[3], noise_ref; 5672 int i, needs_update = 0; 5673 5674 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5675 5676 /* Check that we've been enabled long enough. */ 5677 if ((rxena = le32toh(stats->general.load)) == 0){ 5678 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 5679 return; 5680 } 5681 5682 /* Compute number of false alarms since last call for OFDM. */ 5683 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5684 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 5685 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5686 5687 /* Save counters values for next call. */ 5688 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 5689 calib->fa_ofdm = le32toh(stats->ofdm.fa); 5690 5691 if (fa > 50 * rxena) { 5692 /* High false alarm count, decrease sensitivity. */ 5693 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5694 "%s: OFDM high false alarm count: %u\n", __func__, fa); 5695 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 5696 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 5697 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 5698 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 5699 5700 } else if (fa < 5 * rxena) { 5701 /* Low false alarm count, increase sensitivity. */ 5702 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5703 "%s: OFDM low false alarm count: %u\n", __func__, fa); 5704 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 5705 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 5706 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 5707 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 5708 } 5709 5710 /* Compute maximum noise among 3 receivers. */ 5711 for (i = 0; i < 3; i++) 5712 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 5713 val = MAX(noise[0], noise[1]); 5714 val = MAX(noise[2], val); 5715 /* Insert it into our samples table. */ 5716 calib->noise_samples[calib->cur_noise_sample] = val; 5717 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 5718 5719 /* Compute maximum noise among last 20 samples. */ 5720 noise_ref = calib->noise_samples[0]; 5721 for (i = 1; i < 20; i++) 5722 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 5723 5724 /* Compute maximum energy among 3 receivers. */ 5725 for (i = 0; i < 3; i++) 5726 energy[i] = le32toh(stats->general.energy[i]); 5727 val = MIN(energy[0], energy[1]); 5728 val = MIN(energy[2], val); 5729 /* Insert it into our samples table. */ 5730 calib->energy_samples[calib->cur_energy_sample] = val; 5731 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 5732 5733 /* Compute minimum energy among last 10 samples. */ 5734 energy_min = calib->energy_samples[0]; 5735 for (i = 1; i < 10; i++) 5736 energy_min = MAX(energy_min, calib->energy_samples[i]); 5737 energy_min += 6; 5738 5739 /* Compute number of false alarms since last call for CCK. */ 5740 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 5741 fa += le32toh(stats->cck.fa) - calib->fa_cck; 5742 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5743 5744 /* Save counters values for next call. */ 5745 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 5746 calib->fa_cck = le32toh(stats->cck.fa); 5747 5748 if (fa > 50 * rxena) { 5749 /* High false alarm count, decrease sensitivity. */ 5750 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5751 "%s: CCK high false alarm count: %u\n", __func__, fa); 5752 calib->cck_state = IWN_CCK_STATE_HIFA; 5753 calib->low_fa = 0; 5754 5755 if (calib->cck_x4 > 160) { 5756 calib->noise_ref = noise_ref; 5757 if (calib->energy_cck > 2) 5758 dec(calib->energy_cck, 2, energy_min); 5759 } 5760 if (calib->cck_x4 < 160) { 5761 calib->cck_x4 = 161; 5762 needs_update = 1; 5763 } else 5764 inc(calib->cck_x4, 3, limits->max_cck_x4); 5765 5766 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 5767 5768 } else if (fa < 5 * rxena) { 5769 /* Low false alarm count, increase sensitivity. */ 5770 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5771 "%s: CCK low false alarm count: %u\n", __func__, fa); 5772 calib->cck_state = IWN_CCK_STATE_LOFA; 5773 calib->low_fa++; 5774 5775 if (calib->cck_state != IWN_CCK_STATE_INIT && 5776 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 5777 calib->low_fa > 100)) { 5778 inc(calib->energy_cck, 2, limits->min_energy_cck); 5779 dec(calib->cck_x4, 3, limits->min_cck_x4); 5780 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 5781 } 5782 } else { 5783 /* Not worth to increase or decrease sensitivity. */ 5784 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5785 "%s: CCK normal false alarm count: %u\n", __func__, fa); 5786 calib->low_fa = 0; 5787 calib->noise_ref = noise_ref; 5788 5789 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 5790 /* Previous interval had many false alarms. */ 5791 dec(calib->energy_cck, 8, energy_min); 5792 } 5793 calib->cck_state = IWN_CCK_STATE_INIT; 5794 } 5795 5796 if (needs_update) 5797 (void)iwn_send_sensitivity(sc); 5798 5799 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5800 5801 #undef dec 5802 #undef inc 5803 } 5804 5805 static int 5806 iwn_send_sensitivity(struct iwn_softc *sc) 5807 { 5808 struct iwn_calib_state *calib = &sc->calib; 5809 struct iwn_enhanced_sensitivity_cmd cmd; 5810 int len; 5811 5812 memset(&cmd, 0, sizeof cmd); 5813 len = sizeof (struct iwn_sensitivity_cmd); 5814 cmd.which = IWN_SENSITIVITY_WORKTBL; 5815 /* OFDM modulation. */ 5816 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 5817 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 5818 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 5819 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 5820 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 5821 cmd.energy_ofdm_th = htole16(62); 5822 /* CCK modulation. */ 5823 cmd.corr_cck_x4 = htole16(calib->cck_x4); 5824 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 5825 cmd.energy_cck = htole16(calib->energy_cck); 5826 /* Barker modulation: use default values. */ 5827 cmd.corr_barker = htole16(190); 5828 cmd.corr_barker_mrc = htole16(390); 5829 5830 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5831 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 5832 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 5833 calib->ofdm_mrc_x4, calib->cck_x4, 5834 calib->cck_mrc_x4, calib->energy_cck); 5835 5836 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 5837 goto send; 5838 /* Enhanced sensitivity settings. */ 5839 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 5840 cmd.ofdm_det_slope_mrc = htole16(668); 5841 cmd.ofdm_det_icept_mrc = htole16(4); 5842 cmd.ofdm_det_slope = htole16(486); 5843 cmd.ofdm_det_icept = htole16(37); 5844 cmd.cck_det_slope_mrc = htole16(853); 5845 cmd.cck_det_icept_mrc = htole16(4); 5846 cmd.cck_det_slope = htole16(476); 5847 cmd.cck_det_icept = htole16(99); 5848 send: 5849 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 5850 } 5851 5852 /* 5853 * Set STA mode power saving level (between 0 and 5). 5854 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 5855 */ 5856 static int 5857 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 5858 { 5859 struct iwn_pmgt_cmd cmd; 5860 const struct iwn_pmgt *pmgt; 5861 uint32_t max, skip_dtim; 5862 uint32_t reg; 5863 int i; 5864 5865 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 5866 "%s: dtim=%d, level=%d, async=%d\n", 5867 __func__, 5868 dtim, 5869 level, 5870 async); 5871 5872 /* Select which PS parameters to use. */ 5873 if (dtim <= 2) 5874 pmgt = &iwn_pmgt[0][level]; 5875 else if (dtim <= 10) 5876 pmgt = &iwn_pmgt[1][level]; 5877 else 5878 pmgt = &iwn_pmgt[2][level]; 5879 5880 memset(&cmd, 0, sizeof cmd); 5881 if (level != 0) /* not CAM */ 5882 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 5883 if (level == 5) 5884 cmd.flags |= htole16(IWN_PS_FAST_PD); 5885 /* Retrieve PCIe Active State Power Management (ASPM). */ 5886 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5887 if (!(reg & 0x1)) /* L0s Entry disabled. */ 5888 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 5889 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 5890 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 5891 5892 if (dtim == 0) { 5893 dtim = 1; 5894 skip_dtim = 0; 5895 } else 5896 skip_dtim = pmgt->skip_dtim; 5897 if (skip_dtim != 0) { 5898 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 5899 max = pmgt->intval[4]; 5900 if (max == (uint32_t)-1) 5901 max = dtim * (skip_dtim + 1); 5902 else if (max > dtim) 5903 max = (max / dtim) * dtim; 5904 } else 5905 max = dtim; 5906 for (i = 0; i < 5; i++) 5907 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 5908 5909 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 5910 level); 5911 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 5912 } 5913 5914 static int 5915 iwn_send_btcoex(struct iwn_softc *sc) 5916 { 5917 struct iwn_bluetooth cmd; 5918 5919 memset(&cmd, 0, sizeof cmd); 5920 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 5921 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 5922 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 5923 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 5924 __func__); 5925 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 5926 } 5927 5928 static int 5929 iwn_send_advanced_btcoex(struct iwn_softc *sc) 5930 { 5931 static const uint32_t btcoex_3wire[12] = { 5932 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 5933 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 5934 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 5935 }; 5936 struct iwn6000_btcoex_config btconfig; 5937 struct iwn2000_btcoex_config btconfig2k; 5938 struct iwn_btcoex_priotable btprio; 5939 struct iwn_btcoex_prot btprot; 5940 int error, i; 5941 uint8_t flags; 5942 5943 memset(&btconfig, 0, sizeof btconfig); 5944 memset(&btconfig2k, 0, sizeof btconfig2k); 5945 5946 flags = IWN_BT_FLAG_COEX6000_MODE_3W << 5947 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 5948 5949 if (sc->base_params->bt_sco_disable) 5950 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; 5951 else 5952 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; 5953 5954 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; 5955 5956 /* Default flags result is 145 as old value */ 5957 5958 /* 5959 * Flags value has to be review. Values must change if we 5960 * which to disable it 5961 */ 5962 if (sc->base_params->bt_session_2) { 5963 btconfig2k.flags = flags; 5964 btconfig2k.max_kill = 5; 5965 btconfig2k.bt3_t7_timer = 1; 5966 btconfig2k.kill_ack = htole32(0xffff0000); 5967 btconfig2k.kill_cts = htole32(0xffff0000); 5968 btconfig2k.sample_time = 2; 5969 btconfig2k.bt3_t2_timer = 0xc; 5970 5971 for (i = 0; i < 12; i++) 5972 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); 5973 btconfig2k.valid = htole16(0xff); 5974 btconfig2k.prio_boost = htole32(0xf0); 5975 DPRINTF(sc, IWN_DEBUG_RESET, 5976 "%s: configuring advanced bluetooth coexistence" 5977 " session 2, flags : 0x%x\n", 5978 __func__, 5979 flags); 5980 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, 5981 sizeof(btconfig2k), 1); 5982 } else { 5983 btconfig.flags = flags; 5984 btconfig.max_kill = 5; 5985 btconfig.bt3_t7_timer = 1; 5986 btconfig.kill_ack = htole32(0xffff0000); 5987 btconfig.kill_cts = htole32(0xffff0000); 5988 btconfig.sample_time = 2; 5989 btconfig.bt3_t2_timer = 0xc; 5990 5991 for (i = 0; i < 12; i++) 5992 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 5993 btconfig.valid = htole16(0xff); 5994 btconfig.prio_boost = 0xf0; 5995 DPRINTF(sc, IWN_DEBUG_RESET, 5996 "%s: configuring advanced bluetooth coexistence," 5997 " flags : 0x%x\n", 5998 __func__, 5999 flags); 6000 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 6001 sizeof(btconfig), 1); 6002 } 6003 6004 6005 if (error != 0) 6006 return error; 6007 6008 memset(&btprio, 0, sizeof btprio); 6009 btprio.calib_init1 = 0x6; 6010 btprio.calib_init2 = 0x7; 6011 btprio.calib_periodic_low1 = 0x2; 6012 btprio.calib_periodic_low2 = 0x3; 6013 btprio.calib_periodic_high1 = 0x4; 6014 btprio.calib_periodic_high2 = 0x5; 6015 btprio.dtim = 0x6; 6016 btprio.scan52 = 0x8; 6017 btprio.scan24 = 0xa; 6018 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 6019 1); 6020 if (error != 0) 6021 return error; 6022 6023 /* Force BT state machine change. */ 6024 memset(&btprot, 0, sizeof btprot); 6025 btprot.open = 1; 6026 btprot.type = 1; 6027 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6028 if (error != 0) 6029 return error; 6030 btprot.open = 0; 6031 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6032 } 6033 6034 static int 6035 iwn5000_runtime_calib(struct iwn_softc *sc) 6036 { 6037 struct iwn5000_calib_config cmd; 6038 6039 memset(&cmd, 0, sizeof cmd); 6040 cmd.ucode.once.enable = 0xffffffff; 6041 cmd.ucode.once.start = IWN5000_CALIB_DC; 6042 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6043 "%s: configuring runtime calibration\n", __func__); 6044 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 6045 } 6046 6047 static int 6048 iwn_config(struct iwn_softc *sc) 6049 { 6050 struct iwn_ops *ops = &sc->ops; 6051 struct ifnet *ifp = sc->sc_ifp; 6052 struct ieee80211com *ic = ifp->if_l2com; 6053 uint32_t txmask; 6054 uint16_t rxchain; 6055 int error; 6056 6057 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6058 6059 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) 6060 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { 6061 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" 6062 " exclusive each together. Review NIC config file. Conf" 6063 " : 0x%08x Flags : 0x%08x \n", __func__, 6064 sc->base_params->calib_need, 6065 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | 6066 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); 6067 return (EINVAL); 6068 } 6069 6070 /* Compute temperature calib if needed. Will be send by send calib */ 6071 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { 6072 error = iwn5000_temp_offset_calib(sc); 6073 if (error != 0) { 6074 device_printf(sc->sc_dev, 6075 "%s: could not set temperature offset\n", __func__); 6076 return (error); 6077 } 6078 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 6079 error = iwn5000_temp_offset_calibv2(sc); 6080 if (error != 0) { 6081 device_printf(sc->sc_dev, 6082 "%s: could not compute temperature offset v2\n", 6083 __func__); 6084 return (error); 6085 } 6086 } 6087 6088 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6089 /* Configure runtime DC calibration. */ 6090 error = iwn5000_runtime_calib(sc); 6091 if (error != 0) { 6092 device_printf(sc->sc_dev, 6093 "%s: could not configure runtime calibration\n", 6094 __func__); 6095 return error; 6096 } 6097 } 6098 6099 /* Configure valid TX chains for >=5000 Series. */ 6100 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 6101 txmask = htole32(sc->txchainmask); 6102 DPRINTF(sc, IWN_DEBUG_RESET, 6103 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 6104 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 6105 sizeof txmask, 0); 6106 if (error != 0) { 6107 device_printf(sc->sc_dev, 6108 "%s: could not configure valid TX chains, " 6109 "error %d\n", __func__, error); 6110 return error; 6111 } 6112 } 6113 6114 /* Configure bluetooth coexistence. */ 6115 error = 0; 6116 6117 /* Configure bluetooth coexistence if needed. */ 6118 if (sc->base_params->bt_mode == IWN_BT_ADVANCED) 6119 error = iwn_send_advanced_btcoex(sc); 6120 if (sc->base_params->bt_mode == IWN_BT_SIMPLE) 6121 error = iwn_send_btcoex(sc); 6122 6123 if (error != 0) { 6124 device_printf(sc->sc_dev, 6125 "%s: could not configure bluetooth coexistence, error %d\n", 6126 __func__, error); 6127 return error; 6128 } 6129 6130 /* Set mode, channel, RX filter and enable RX. */ 6131 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6132 memset(sc->rxon, 0, sizeof (struct iwn_rxon)); 6133 IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp)); 6134 IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp)); 6135 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 6136 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6137 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 6138 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6139 switch (ic->ic_opmode) { 6140 case IEEE80211_M_STA: 6141 sc->rxon->mode = IWN_MODE_STA; 6142 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); 6143 break; 6144 case IEEE80211_M_MONITOR: 6145 sc->rxon->mode = IWN_MODE_MONITOR; 6146 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST | 6147 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 6148 break; 6149 default: 6150 /* Should not get there. */ 6151 break; 6152 } 6153 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ 6154 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ 6155 sc->rxon->ht_single_mask = 0xff; 6156 sc->rxon->ht_dual_mask = 0xff; 6157 sc->rxon->ht_triple_mask = 0xff; 6158 rxchain = 6159 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6160 IWN_RXCHAIN_MIMO_COUNT(2) | 6161 IWN_RXCHAIN_IDLE_COUNT(2); 6162 sc->rxon->rxchain = htole16(rxchain); 6163 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 6164 if (sc->sc_is_scanning) 6165 device_printf(sc->sc_dev, 6166 "%s: is_scanning set, before RXON\n", 6167 __func__); 6168 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0); 6169 if (error != 0) { 6170 device_printf(sc->sc_dev, "%s: RXON command failed\n", 6171 __func__); 6172 return error; 6173 } 6174 6175 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 6176 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 6177 __func__); 6178 return error; 6179 } 6180 6181 /* Configuration has changed, set TX power accordingly. */ 6182 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 6183 device_printf(sc->sc_dev, "%s: could not set TX power\n", 6184 __func__); 6185 return error; 6186 } 6187 6188 if ((error = iwn_set_critical_temp(sc)) != 0) { 6189 device_printf(sc->sc_dev, 6190 "%s: could not set critical temperature\n", __func__); 6191 return error; 6192 } 6193 6194 /* Set power saving level to CAM during initialization. */ 6195 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 6196 device_printf(sc->sc_dev, 6197 "%s: could not set power saving level\n", __func__); 6198 return error; 6199 } 6200 6201 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6202 6203 return 0; 6204 } 6205 6206 /* 6207 * Add an ssid element to a frame. 6208 */ 6209 static uint8_t * 6210 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) 6211 { 6212 *frm++ = IEEE80211_ELEMID_SSID; 6213 *frm++ = len; 6214 memcpy(frm, ssid, len); 6215 return frm + len; 6216 } 6217 6218 static uint16_t 6219 iwn_get_active_dwell_time(struct iwn_softc *sc, 6220 struct ieee80211_channel *c, uint8_t n_probes) 6221 { 6222 /* No channel? Default to 2GHz settings */ 6223 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6224 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 6225 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 6226 } 6227 6228 /* 5GHz dwell time */ 6229 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 6230 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 6231 } 6232 6233 /* 6234 * Limit the total dwell time to 85% of the beacon interval. 6235 * 6236 * Returns the dwell time in milliseconds. 6237 */ 6238 static uint16_t 6239 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 6240 { 6241 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 6242 struct ieee80211vap *vap = NULL; 6243 int bintval = 0; 6244 6245 /* bintval is in TU (1.024mS) */ 6246 if (! TAILQ_EMPTY(&ic->ic_vaps)) { 6247 vap = TAILQ_FIRST(&ic->ic_vaps); 6248 bintval = vap->iv_bss->ni_intval; 6249 } 6250 6251 /* 6252 * If it's non-zero, we should calculate the minimum of 6253 * it and the DWELL_BASE. 6254 * 6255 * XXX Yes, the math should take into account that bintval 6256 * is 1.024mS, not 1mS.. 6257 */ 6258 if (bintval > 0) { 6259 DPRINTF(sc, IWN_DEBUG_SCAN, 6260 "%s: bintval=%d\n", 6261 __func__, 6262 bintval); 6263 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 6264 } 6265 6266 /* No association context? Default */ 6267 return (IWN_PASSIVE_DWELL_BASE); 6268 } 6269 6270 static uint16_t 6271 iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) 6272 { 6273 uint16_t passive; 6274 6275 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6276 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 6277 } else { 6278 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 6279 } 6280 6281 /* Clamp to the beacon interval if we're associated */ 6282 return (iwn_limit_dwell(sc, passive)); 6283 } 6284 6285 static int 6286 iwn_scan(struct iwn_softc *sc) 6287 { 6288 struct ifnet *ifp = sc->sc_ifp; 6289 struct ieee80211com *ic = ifp->if_l2com; 6290 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ 6291 struct ieee80211_node *ni = ss->ss_vap->iv_bss; 6292 struct iwn_scan_hdr *hdr; 6293 struct iwn_cmd_data *tx; 6294 struct iwn_scan_essid *essid; 6295 struct iwn_scan_chan *chan; 6296 struct ieee80211_frame *wh; 6297 struct ieee80211_rateset *rs; 6298 struct ieee80211_channel *c; 6299 uint8_t *buf, *frm; 6300 uint16_t rxchain; 6301 uint8_t txant; 6302 int buflen, error; 6303 int is_active; 6304 uint16_t dwell_active, dwell_passive; 6305 uint32_t extra, scan_service_time; 6306 6307 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6308 6309 /* 6310 * We are absolutely not allowed to send a scan command when another 6311 * scan command is pending. 6312 */ 6313 if (sc->sc_is_scanning) { 6314 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 6315 __func__); 6316 return (EAGAIN); 6317 } 6318 6319 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6320 buf = kmalloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_INTWAIT | M_ZERO); 6321 hdr = (struct iwn_scan_hdr *)buf; 6322 /* 6323 * Move to the next channel if no frames are received within 10ms 6324 * after sending the probe request. 6325 */ 6326 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 6327 hdr->quiet_threshold = htole16(1); /* min # of packets */ 6328 /* 6329 * Max needs to be greater than active and passive and quiet! 6330 * It's also in microseconds! 6331 */ 6332 hdr->max_svc = htole32(250 * 1024); 6333 6334 /* 6335 * Reset scan: interval=100 6336 * Normal scan: interval=becaon interval 6337 * suspend_time: 100 (TU) 6338 * 6339 */ 6340 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; 6341 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); 6342 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ 6343 hdr->pause_svc = htole32(scan_service_time); 6344 6345 /* Select antennas for scanning. */ 6346 rxchain = 6347 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6348 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 6349 IWN_RXCHAIN_DRIVER_FORCE; 6350 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) && 6351 sc->hw_type == IWN_HW_REV_TYPE_4965) { 6352 /* Ant A must be avoided in 5GHz because of an HW bug. */ 6353 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 6354 } else /* Use all available RX antennas. */ 6355 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 6356 hdr->rxchain = htole16(rxchain); 6357 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 6358 6359 tx = (struct iwn_cmd_data *)(hdr + 1); 6360 tx->flags = htole32(IWN_TX_AUTO_SEQ); 6361 tx->id = sc->broadcast_id; 6362 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 6363 6364 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) { 6365 /* Send probe requests at 6Mbps. */ 6366 tx->rate = htole32(0xd); 6367 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 6368 } else { 6369 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 6370 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 6371 sc->rxon->associd && sc->rxon->chan > 14) 6372 tx->rate = htole32(0xd); 6373 else { 6374 /* Send probe requests at 1Mbps. */ 6375 tx->rate = htole32(10 | IWN_RFLAG_CCK); 6376 } 6377 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 6378 } 6379 /* Use the first valid TX antenna. */ 6380 txant = IWN_LSB(sc->txchainmask); 6381 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 6382 6383 /* 6384 * Only do active scanning if we're announcing a probe request 6385 * for a given SSID (or more, if we ever add it to the driver.) 6386 */ 6387 is_active = 0; 6388 6389 /* 6390 * If we're scanning for a specific SSID, add it to the command. 6391 */ 6392 essid = (struct iwn_scan_essid *)(tx + 1); 6393 if (ss->ss_ssid[0].len != 0) { 6394 essid[0].id = IEEE80211_ELEMID_SSID; 6395 essid[0].len = ss->ss_ssid[0].len; 6396 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 6397 } 6398 6399 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", 6400 __func__, 6401 ss->ss_ssid[0].len, 6402 ss->ss_ssid[0].len, 6403 ss->ss_ssid[0].ssid); 6404 6405 if (ss->ss_nssid > 0) 6406 is_active = 1; 6407 6408 /* 6409 * Build a probe request frame. Most of the following code is a 6410 * copy & paste of what is done in net80211. 6411 */ 6412 wh = (struct ieee80211_frame *)(essid + 20); 6413 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 6414 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 6415 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 6416 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 6417 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 6418 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 6419 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 6420 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 6421 6422 frm = (uint8_t *)(wh + 1); 6423 frm = ieee80211_add_ssid(frm, NULL, 0); 6424 frm = ieee80211_add_rates(frm, rs); 6425 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6426 frm = ieee80211_add_xrates(frm, rs); 6427 if (ic->ic_htcaps & IEEE80211_HTC_HT) 6428 frm = ieee80211_add_htcap(frm, ni); 6429 6430 /* Set length of probe request. */ 6431 tx->len = htole16(frm - (uint8_t *)wh); 6432 6433 /* 6434 * If active scanning is requested but a certain channel is 6435 * marked passive, we can do active scanning if we detect 6436 * transmissions. 6437 * 6438 * There is an issue with some firmware versions that triggers 6439 * a sysassert on a "good CRC threshold" of zero (== disabled), 6440 * on a radar channel even though this means that we should NOT 6441 * send probes. 6442 * 6443 * The "good CRC threshold" is the number of frames that we 6444 * need to receive during our dwell time on a channel before 6445 * sending out probes -- setting this to a huge value will 6446 * mean we never reach it, but at the same time work around 6447 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 6448 * here instead of IWL_GOOD_CRC_TH_DISABLED. 6449 * 6450 * This was fixed in later versions along with some other 6451 * scan changes, and the threshold behaves as a flag in those 6452 * versions. 6453 */ 6454 6455 /* 6456 * If we're doing active scanning, set the crc_threshold 6457 * to a suitable value. This is different to active veruss 6458 * passive scanning depending upon the channel flags; the 6459 * firmware will obey that particular check for us. 6460 */ 6461 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 6462 hdr->crc_threshold = is_active ? 6463 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 6464 else 6465 hdr->crc_threshold = is_active ? 6466 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 6467 6468 c = ic->ic_curchan; 6469 chan = (struct iwn_scan_chan *)frm; 6470 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 6471 chan->flags = 0; 6472 if (ss->ss_nssid > 0) 6473 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 6474 chan->dsp_gain = 0x6e; 6475 6476 /* 6477 * Set the passive/active flag depending upon the channel mode. 6478 * XXX TODO: take the is_active flag into account as well? 6479 */ 6480 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 6481 chan->flags |= htole32(IWN_CHAN_PASSIVE); 6482 else 6483 chan->flags |= htole32(IWN_CHAN_ACTIVE); 6484 6485 /* 6486 * Calculate the active/passive dwell times. 6487 */ 6488 6489 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); 6490 dwell_passive = iwn_get_passive_dwell_time(sc, c); 6491 6492 /* Make sure they're valid */ 6493 if (dwell_passive <= dwell_active) 6494 dwell_passive = dwell_active + 1; 6495 6496 chan->active = htole16(dwell_active); 6497 chan->passive = htole16(dwell_passive); 6498 6499 if (IEEE80211_IS_CHAN_5GHZ(c) && 6500 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6501 chan->rf_gain = 0x3b; 6502 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 6503 chan->rf_gain = 0x3b; 6504 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6505 chan->rf_gain = 0x28; 6506 } else { 6507 chan->rf_gain = 0x28; 6508 } 6509 6510 DPRINTF(sc, IWN_DEBUG_STATE, 6511 "%s: chan %u flags 0x%x rf_gain 0x%x " 6512 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " 6513 "isactive=%d numssid=%d\n", __func__, 6514 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 6515 dwell_active, dwell_passive, scan_service_time, 6516 hdr->crc_threshold, is_active, ss->ss_nssid); 6517 6518 hdr->nchan++; 6519 chan++; 6520 buflen = (uint8_t *)chan - buf; 6521 hdr->len = htole16(buflen); 6522 6523 if (sc->sc_is_scanning) { 6524 device_printf(sc->sc_dev, 6525 "%s: called with is_scanning set!\n", 6526 __func__); 6527 } 6528 sc->sc_is_scanning = 1; 6529 6530 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 6531 hdr->nchan); 6532 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 6533 kfree(buf, M_DEVBUF); 6534 6535 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6536 6537 return error; 6538 } 6539 6540 static int 6541 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 6542 { 6543 struct iwn_ops *ops = &sc->ops; 6544 struct ifnet *ifp = sc->sc_ifp; 6545 struct ieee80211com *ic = ifp->if_l2com; 6546 struct ieee80211_node *ni = vap->iv_bss; 6547 int error; 6548 6549 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6550 6551 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6552 /* Update adapter configuration. */ 6553 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6554 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6555 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6556 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6557 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6558 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6559 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6560 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6561 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6562 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6563 sc->rxon->cck_mask = 0; 6564 sc->rxon->ofdm_mask = 0x15; 6565 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6566 sc->rxon->cck_mask = 0x03; 6567 sc->rxon->ofdm_mask = 0; 6568 } else { 6569 /* Assume 802.11b/g. */ 6570 sc->rxon->cck_mask = 0x0f; 6571 sc->rxon->ofdm_mask = 0x15; 6572 } 6573 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 6574 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, 6575 sc->rxon->ofdm_mask); 6576 if (sc->sc_is_scanning) 6577 device_printf(sc->sc_dev, 6578 "%s: is_scanning set, before RXON\n", 6579 __func__); 6580 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6581 if (error != 0) { 6582 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 6583 __func__, error); 6584 return error; 6585 } 6586 6587 /* Configuration has changed, set TX power accordingly. */ 6588 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6589 device_printf(sc->sc_dev, 6590 "%s: could not set TX power, error %d\n", __func__, error); 6591 return error; 6592 } 6593 /* 6594 * Reconfiguring RXON clears the firmware nodes table so we must 6595 * add the broadcast node again. 6596 */ 6597 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 6598 device_printf(sc->sc_dev, 6599 "%s: could not add broadcast node, error %d\n", __func__, 6600 error); 6601 return error; 6602 } 6603 6604 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6605 6606 return 0; 6607 } 6608 6609 static int 6610 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 6611 { 6612 struct iwn_ops *ops = &sc->ops; 6613 struct ifnet *ifp = sc->sc_ifp; 6614 struct ieee80211com *ic = ifp->if_l2com; 6615 struct ieee80211_node *ni = vap->iv_bss; 6616 struct iwn_node_info node; 6617 uint32_t htflags = 0; 6618 int error; 6619 6620 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6621 6622 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6623 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6624 /* Link LED blinks while monitoring. */ 6625 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 6626 return 0; 6627 } 6628 if ((error = iwn_set_timing(sc, ni)) != 0) { 6629 device_printf(sc->sc_dev, 6630 "%s: could not set timing, error %d\n", __func__, error); 6631 return error; 6632 } 6633 6634 /* Update adapter configuration. */ 6635 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6636 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); 6637 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6638 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6639 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6640 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6641 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6642 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6643 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6644 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6645 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6646 sc->rxon->cck_mask = 0; 6647 sc->rxon->ofdm_mask = 0x15; 6648 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6649 sc->rxon->cck_mask = 0x03; 6650 sc->rxon->ofdm_mask = 0; 6651 } else { 6652 /* Assume 802.11b/g. */ 6653 sc->rxon->cck_mask = 0x0f; 6654 sc->rxon->ofdm_mask = 0x15; 6655 } 6656 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6657 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 6658 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 6659 switch (ic->ic_curhtprotmode) { 6660 case IEEE80211_HTINFO_OPMODE_HT20PR: 6661 htflags |= IWN_RXON_HT_MODEPURE40; 6662 break; 6663 default: 6664 htflags |= IWN_RXON_HT_MODEMIXED; 6665 break; 6666 } 6667 } 6668 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 6669 htflags |= IWN_RXON_HT_HT40MINUS; 6670 } 6671 sc->rxon->flags |= htole32(htflags); 6672 sc->rxon->filter |= htole32(IWN_FILTER_BSS); 6673 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", 6674 sc->rxon->chan, sc->rxon->flags); 6675 if (sc->sc_is_scanning) 6676 device_printf(sc->sc_dev, 6677 "%s: is_scanning set, before RXON\n", 6678 __func__); 6679 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6680 if (error != 0) { 6681 device_printf(sc->sc_dev, 6682 "%s: could not update configuration, error %d\n", __func__, 6683 error); 6684 return error; 6685 } 6686 6687 /* Configuration has changed, set TX power accordingly. */ 6688 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6689 device_printf(sc->sc_dev, 6690 "%s: could not set TX power, error %d\n", __func__, error); 6691 return error; 6692 } 6693 6694 /* Fake a join to initialize the TX rate. */ 6695 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 6696 iwn_newassoc(ni, 1); 6697 6698 /* Add BSS node. */ 6699 memset(&node, 0, sizeof node); 6700 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 6701 node.id = IWN_ID_BSS; 6702 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6703 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 6704 case IEEE80211_HTCAP_SMPS_ENA: 6705 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 6706 break; 6707 case IEEE80211_HTCAP_SMPS_DYNAMIC: 6708 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 6709 break; 6710 } 6711 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 6712 IWN_AMDPU_DENSITY(5)); /* 4us */ 6713 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 6714 node.htflags |= htole32(IWN_NODE_HT40); 6715 } 6716 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 6717 error = ops->add_node(sc, &node, 1); 6718 if (error != 0) { 6719 device_printf(sc->sc_dev, 6720 "%s: could not add BSS node, error %d\n", __func__, error); 6721 return error; 6722 } 6723 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 6724 __func__, node.id); 6725 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 6726 device_printf(sc->sc_dev, 6727 "%s: could not setup link quality for node %d, error %d\n", 6728 __func__, node.id, error); 6729 return error; 6730 } 6731 6732 if ((error = iwn_init_sensitivity(sc)) != 0) { 6733 device_printf(sc->sc_dev, 6734 "%s: could not set sensitivity, error %d\n", __func__, 6735 error); 6736 return error; 6737 } 6738 /* Start periodic calibration timer. */ 6739 sc->calib.state = IWN_CALIB_STATE_ASSOC; 6740 sc->calib_cnt = 0; 6741 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 6742 sc); 6743 6744 /* Link LED always on while associated. */ 6745 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6746 6747 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6748 6749 return 0; 6750 } 6751 6752 /* 6753 * This function is called by upper layer when an ADDBA request is received 6754 * from another STA and before the ADDBA response is sent. 6755 */ 6756 static int 6757 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 6758 int baparamset, int batimeout, int baseqctl) 6759 { 6760 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 6761 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6762 struct iwn_ops *ops = &sc->ops; 6763 struct iwn_node *wn = (void *)ni; 6764 struct iwn_node_info node; 6765 uint16_t ssn; 6766 uint8_t tid; 6767 int error; 6768 6769 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6770 6771 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 6772 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 6773 6774 memset(&node, 0, sizeof node); 6775 node.id = wn->id; 6776 node.control = IWN_NODE_UPDATE; 6777 node.flags = IWN_FLAG_SET_ADDBA; 6778 node.addba_tid = tid; 6779 node.addba_ssn = htole16(ssn); 6780 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 6781 wn->id, tid, ssn); 6782 error = ops->add_node(sc, &node, 1); 6783 if (error != 0) 6784 return error; 6785 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 6786 #undef MS 6787 } 6788 6789 /* 6790 * This function is called by upper layer on teardown of an HT-immediate 6791 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 6792 */ 6793 static void 6794 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 6795 { 6796 struct ieee80211com *ic = ni->ni_ic; 6797 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6798 struct iwn_ops *ops = &sc->ops; 6799 struct iwn_node *wn = (void *)ni; 6800 struct iwn_node_info node; 6801 uint8_t tid; 6802 6803 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6804 6805 /* XXX: tid as an argument */ 6806 for (tid = 0; tid < WME_NUM_TID; tid++) { 6807 if (&ni->ni_rx_ampdu[tid] == rap) 6808 break; 6809 } 6810 6811 memset(&node, 0, sizeof node); 6812 node.id = wn->id; 6813 node.control = IWN_NODE_UPDATE; 6814 node.flags = IWN_FLAG_SET_DELBA; 6815 node.delba_tid = tid; 6816 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 6817 (void)ops->add_node(sc, &node, 1); 6818 sc->sc_ampdu_rx_stop(ni, rap); 6819 } 6820 6821 static int 6822 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6823 int dialogtoken, int baparamset, int batimeout) 6824 { 6825 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6826 int qid; 6827 6828 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6829 6830 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 6831 if (sc->qid2tap[qid] == NULL) 6832 break; 6833 } 6834 if (qid == sc->ntxqs) { 6835 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 6836 __func__); 6837 return 0; 6838 } 6839 tap->txa_private = kmalloc(sizeof(int), M_DEVBUF, M_INTWAIT); 6840 sc->qid2tap[qid] = tap; 6841 *(int *)tap->txa_private = qid; 6842 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 6843 batimeout); 6844 } 6845 6846 static int 6847 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6848 int code, int baparamset, int batimeout) 6849 { 6850 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6851 int qid = *(int *)tap->txa_private; 6852 uint8_t tid = tap->txa_ac; 6853 int ret; 6854 6855 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6856 6857 if (code == IEEE80211_STATUS_SUCCESS) { 6858 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 6859 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 6860 if (ret != 1) 6861 return ret; 6862 } else { 6863 sc->qid2tap[qid] = NULL; 6864 kfree(tap->txa_private, M_DEVBUF); 6865 tap->txa_private = NULL; 6866 } 6867 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 6868 } 6869 6870 /* 6871 * This function is called by upper layer when an ADDBA response is received 6872 * from another STA. 6873 */ 6874 static int 6875 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 6876 uint8_t tid) 6877 { 6878 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 6879 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6880 struct iwn_ops *ops = &sc->ops; 6881 struct iwn_node *wn = (void *)ni; 6882 struct iwn_node_info node; 6883 int error, qid; 6884 6885 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6886 6887 /* Enable TX for the specified RA/TID. */ 6888 wn->disable_tid &= ~(1 << tid); 6889 memset(&node, 0, sizeof node); 6890 node.id = wn->id; 6891 node.control = IWN_NODE_UPDATE; 6892 node.flags = IWN_FLAG_SET_DISABLE_TID; 6893 node.disable_tid = htole16(wn->disable_tid); 6894 error = ops->add_node(sc, &node, 1); 6895 if (error != 0) 6896 return 0; 6897 6898 if ((error = iwn_nic_lock(sc)) != 0) 6899 return 0; 6900 qid = *(int *)tap->txa_private; 6901 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 6902 __func__, wn->id, tid, tap->txa_start, qid); 6903 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 6904 iwn_nic_unlock(sc); 6905 6906 iwn_set_link_quality(sc, ni); 6907 return 1; 6908 } 6909 6910 static void 6911 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 6912 { 6913 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6914 struct iwn_ops *ops = &sc->ops; 6915 uint8_t tid = tap->txa_ac; 6916 int qid; 6917 6918 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6919 6920 sc->sc_addba_stop(ni, tap); 6921 6922 if (tap->txa_private == NULL) 6923 return; 6924 6925 qid = *(int *)tap->txa_private; 6926 if (sc->txq[qid].queued != 0) 6927 return; 6928 if (iwn_nic_lock(sc) != 0) 6929 return; 6930 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 6931 iwn_nic_unlock(sc); 6932 sc->qid2tap[qid] = NULL; 6933 kfree(tap->txa_private, M_DEVBUF); 6934 tap->txa_private = NULL; 6935 } 6936 6937 static void 6938 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 6939 int qid, uint8_t tid, uint16_t ssn) 6940 { 6941 struct iwn_node *wn = (void *)ni; 6942 6943 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6944 6945 /* Stop TX scheduler while we're changing its configuration. */ 6946 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6947 IWN4965_TXQ_STATUS_CHGACT); 6948 6949 /* Assign RA/TID translation to the queue. */ 6950 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 6951 wn->id << 4 | tid); 6952 6953 /* Enable chain-building mode for the queue. */ 6954 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 6955 6956 /* Set starting sequence number from the ADDBA request. */ 6957 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 6958 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6959 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6960 6961 /* Set scheduler window size. */ 6962 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 6963 IWN_SCHED_WINSZ); 6964 /* Set scheduler frame limit. */ 6965 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 6966 IWN_SCHED_LIMIT << 16); 6967 6968 /* Enable interrupts for the queue. */ 6969 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6970 6971 /* Mark the queue as active. */ 6972 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6973 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 6974 iwn_tid2fifo[tid] << 1); 6975 } 6976 6977 static void 6978 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 6979 { 6980 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6981 6982 /* Stop TX scheduler while we're changing its configuration. */ 6983 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6984 IWN4965_TXQ_STATUS_CHGACT); 6985 6986 /* Set starting sequence number from the ADDBA request. */ 6987 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6988 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6989 6990 /* Disable interrupts for the queue. */ 6991 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6992 6993 /* Mark the queue as inactive. */ 6994 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6995 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 6996 } 6997 6998 static void 6999 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7000 int qid, uint8_t tid, uint16_t ssn) 7001 { 7002 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7003 7004 struct iwn_node *wn = (void *)ni; 7005 7006 /* Stop TX scheduler while we're changing its configuration. */ 7007 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7008 IWN5000_TXQ_STATUS_CHGACT); 7009 7010 /* Assign RA/TID translation to the queue. */ 7011 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 7012 wn->id << 4 | tid); 7013 7014 /* Enable chain-building mode for the queue. */ 7015 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 7016 7017 /* Enable aggregation for the queue. */ 7018 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7019 7020 /* Set starting sequence number from the ADDBA request. */ 7021 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7022 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7023 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7024 7025 /* Set scheduler window size and frame limit. */ 7026 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7027 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7028 7029 /* Enable interrupts for the queue. */ 7030 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7031 7032 /* Mark the queue as active. */ 7033 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7034 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 7035 } 7036 7037 static void 7038 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7039 { 7040 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7041 7042 /* Stop TX scheduler while we're changing its configuration. */ 7043 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7044 IWN5000_TXQ_STATUS_CHGACT); 7045 7046 /* Disable aggregation for the queue. */ 7047 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7048 7049 /* Set starting sequence number from the ADDBA request. */ 7050 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7051 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7052 7053 /* Disable interrupts for the queue. */ 7054 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7055 7056 /* Mark the queue as inactive. */ 7057 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7058 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 7059 } 7060 7061 /* 7062 * Query calibration tables from the initialization firmware. We do this 7063 * only once at first boot. Called from a process context. 7064 */ 7065 static int 7066 iwn5000_query_calibration(struct iwn_softc *sc) 7067 { 7068 struct iwn5000_calib_config cmd; 7069 int error; 7070 7071 memset(&cmd, 0, sizeof cmd); 7072 cmd.ucode.once.enable = 0xffffffff; 7073 cmd.ucode.once.start = 0xffffffff; 7074 cmd.ucode.once.send = 0xffffffff; 7075 cmd.ucode.flags = 0xffffffff; 7076 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 7077 __func__); 7078 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 7079 if (error != 0) 7080 return error; 7081 7082 /* Wait at most two seconds for calibration to complete. */ 7083 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 7084 error = zsleep(sc, &wlan_global_serializer, 0, "iwncal", 2 * hz); 7085 return error; 7086 } 7087 7088 /* 7089 * Send calibration results to the runtime firmware. These results were 7090 * obtained on first boot from the initialization firmware. 7091 */ 7092 static int 7093 iwn5000_send_calibration(struct iwn_softc *sc) 7094 { 7095 int idx, error; 7096 7097 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { 7098 if (!(sc->base_params->calib_need & (1<<idx))) { 7099 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7100 "No need of calib %d\n", 7101 idx); 7102 continue; /* no need for this calib */ 7103 } 7104 if (sc->calibcmd[idx].buf == NULL) { 7105 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7106 "Need calib idx : %d but no available data\n", 7107 idx); 7108 continue; 7109 } 7110 7111 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7112 "send calibration result idx=%d len=%d\n", idx, 7113 sc->calibcmd[idx].len); 7114 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 7115 sc->calibcmd[idx].len, 0); 7116 if (error != 0) { 7117 device_printf(sc->sc_dev, 7118 "%s: could not send calibration result, error %d\n", 7119 __func__, error); 7120 return error; 7121 } 7122 } 7123 return 0; 7124 } 7125 7126 static int 7127 iwn5000_send_wimax_coex(struct iwn_softc *sc) 7128 { 7129 struct iwn5000_wimax_coex wimax; 7130 7131 #ifdef notyet 7132 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 7133 /* Enable WiMAX coexistence for combo adapters. */ 7134 wimax.flags = 7135 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 7136 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 7137 IWN_WIMAX_COEX_STA_TABLE_VALID | 7138 IWN_WIMAX_COEX_ENABLE; 7139 memcpy(wimax.events, iwn6050_wimax_events, 7140 sizeof iwn6050_wimax_events); 7141 } else 7142 #endif 7143 { 7144 /* Disable WiMAX coexistence. */ 7145 wimax.flags = 0; 7146 memset(wimax.events, 0, sizeof wimax.events); 7147 } 7148 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 7149 __func__); 7150 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 7151 } 7152 7153 static int 7154 iwn5000_crystal_calib(struct iwn_softc *sc) 7155 { 7156 struct iwn5000_phy_calib_crystal cmd; 7157 7158 memset(&cmd, 0, sizeof cmd); 7159 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 7160 cmd.ngroups = 1; 7161 cmd.isvalid = 1; 7162 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 7163 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 7164 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 7165 cmd.cap_pin[0], cmd.cap_pin[1]); 7166 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7167 } 7168 7169 static int 7170 iwn5000_temp_offset_calib(struct iwn_softc *sc) 7171 { 7172 struct iwn5000_phy_calib_temp_offset cmd; 7173 7174 memset(&cmd, 0, sizeof cmd); 7175 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7176 cmd.ngroups = 1; 7177 cmd.isvalid = 1; 7178 if (sc->eeprom_temp != 0) 7179 cmd.offset = htole16(sc->eeprom_temp); 7180 else 7181 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 7182 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 7183 le16toh(cmd.offset)); 7184 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7185 } 7186 7187 static int 7188 iwn5000_temp_offset_calibv2(struct iwn_softc *sc) 7189 { 7190 struct iwn5000_phy_calib_temp_offsetv2 cmd; 7191 7192 memset(&cmd, 0, sizeof cmd); 7193 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7194 cmd.ngroups = 1; 7195 cmd.isvalid = 1; 7196 if (sc->eeprom_temp != 0) { 7197 cmd.offset_low = htole16(sc->eeprom_temp); 7198 cmd.offset_high = htole16(sc->eeprom_temp_high); 7199 } else { 7200 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 7201 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 7202 } 7203 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 7204 7205 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7206 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", 7207 le16toh(cmd.offset_low), 7208 le16toh(cmd.offset_high), 7209 le16toh(cmd.burnt_voltage_ref)); 7210 7211 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7212 } 7213 7214 /* 7215 * This function is called after the runtime firmware notifies us of its 7216 * readiness (called in a process context). 7217 */ 7218 static int 7219 iwn4965_post_alive(struct iwn_softc *sc) 7220 { 7221 int error, qid; 7222 7223 if ((error = iwn_nic_lock(sc)) != 0) 7224 return error; 7225 7226 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7227 7228 /* Clear TX scheduler state in SRAM. */ 7229 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7230 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 7231 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 7232 7233 /* Set physical address of TX scheduler rings (1KB aligned). */ 7234 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7235 7236 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7237 7238 /* Disable chain mode for all our 16 queues. */ 7239 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 7240 7241 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 7242 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 7243 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7244 7245 /* Set scheduler window size. */ 7246 iwn_mem_write(sc, sc->sched_base + 7247 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 7248 /* Set scheduler frame limit. */ 7249 iwn_mem_write(sc, sc->sched_base + 7250 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7251 IWN_SCHED_LIMIT << 16); 7252 } 7253 7254 /* Enable interrupts for all our 16 queues. */ 7255 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 7256 /* Identify TX FIFO rings (0-7). */ 7257 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 7258 7259 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7260 for (qid = 0; qid < 7; qid++) { 7261 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 7262 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7263 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 7264 } 7265 iwn_nic_unlock(sc); 7266 return 0; 7267 } 7268 7269 /* 7270 * This function is called after the initialization or runtime firmware 7271 * notifies us of its readiness (called in a process context). 7272 */ 7273 static int 7274 iwn5000_post_alive(struct iwn_softc *sc) 7275 { 7276 int error, qid; 7277 7278 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7279 7280 /* Switch to using ICT interrupt mode. */ 7281 iwn5000_ict_reset(sc); 7282 7283 if ((error = iwn_nic_lock(sc)) != 0){ 7284 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 7285 return error; 7286 } 7287 7288 /* Clear TX scheduler state in SRAM. */ 7289 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7290 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 7291 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 7292 7293 /* Set physical address of TX scheduler rings (1KB aligned). */ 7294 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7295 7296 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7297 7298 /* Enable chain mode for all queues, except command queue. */ 7299 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 7300 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); 7301 else 7302 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 7303 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 7304 7305 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 7306 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 7307 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7308 7309 iwn_mem_write(sc, sc->sched_base + 7310 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 7311 /* Set scheduler window size and frame limit. */ 7312 iwn_mem_write(sc, sc->sched_base + 7313 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7314 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7315 } 7316 7317 /* Enable interrupts for all our 20 queues. */ 7318 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 7319 /* Identify TX FIFO rings (0-7). */ 7320 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 7321 7322 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7323 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { 7324 /* Mark TX rings as active. */ 7325 for (qid = 0; qid < 11; qid++) { 7326 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; 7327 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7328 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7329 } 7330 } else { 7331 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7332 for (qid = 0; qid < 7; qid++) { 7333 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 7334 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7335 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7336 } 7337 } 7338 iwn_nic_unlock(sc); 7339 7340 /* Configure WiMAX coexistence for combo adapters. */ 7341 error = iwn5000_send_wimax_coex(sc); 7342 if (error != 0) { 7343 device_printf(sc->sc_dev, 7344 "%s: could not configure WiMAX coexistence, error %d\n", 7345 __func__, error); 7346 return error; 7347 } 7348 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 7349 /* Perform crystal calibration. */ 7350 error = iwn5000_crystal_calib(sc); 7351 if (error != 0) { 7352 device_printf(sc->sc_dev, 7353 "%s: crystal calibration failed, error %d\n", 7354 __func__, error); 7355 return error; 7356 } 7357 } 7358 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 7359 /* Query calibration from the initialization firmware. */ 7360 if ((error = iwn5000_query_calibration(sc)) != 0) { 7361 device_printf(sc->sc_dev, 7362 "%s: could not query calibration, error %d\n", 7363 __func__, error); 7364 return error; 7365 } 7366 /* 7367 * We have the calibration results now, reboot with the 7368 * runtime firmware (call ourselves recursively!) 7369 */ 7370 iwn_hw_stop(sc); 7371 error = iwn_hw_init(sc); 7372 } else { 7373 /* Send calibration results to runtime firmware. */ 7374 error = iwn5000_send_calibration(sc); 7375 } 7376 7377 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7378 7379 return error; 7380 } 7381 7382 /* 7383 * The firmware boot code is small and is intended to be copied directly into 7384 * the NIC internal memory (no DMA transfer). 7385 */ 7386 static int 7387 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 7388 { 7389 int error, ntries; 7390 7391 size /= sizeof (uint32_t); 7392 7393 if ((error = iwn_nic_lock(sc)) != 0) 7394 return error; 7395 7396 /* Copy microcode image into NIC memory. */ 7397 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 7398 (const uint32_t *)ucode, size); 7399 7400 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 7401 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 7402 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 7403 7404 /* Start boot load now. */ 7405 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 7406 7407 /* Wait for transfer to complete. */ 7408 for (ntries = 0; ntries < 1000; ntries++) { 7409 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 7410 IWN_BSM_WR_CTRL_START)) 7411 break; 7412 DELAY(10); 7413 } 7414 if (ntries == 1000) { 7415 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7416 __func__); 7417 iwn_nic_unlock(sc); 7418 return ETIMEDOUT; 7419 } 7420 7421 /* Enable boot after power up. */ 7422 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 7423 7424 iwn_nic_unlock(sc); 7425 return 0; 7426 } 7427 7428 static int 7429 iwn4965_load_firmware(struct iwn_softc *sc) 7430 { 7431 struct iwn_fw_info *fw = &sc->fw; 7432 struct iwn_dma_info *dma = &sc->fw_dma; 7433 int error; 7434 7435 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 7436 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 7437 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7438 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7439 fw->init.text, fw->init.textsz); 7440 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7441 7442 /* Tell adapter where to find initialization sections. */ 7443 if ((error = iwn_nic_lock(sc)) != 0) 7444 return error; 7445 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7446 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 7447 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7448 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7449 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 7450 iwn_nic_unlock(sc); 7451 7452 /* Load firmware boot code. */ 7453 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 7454 if (error != 0) { 7455 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7456 __func__); 7457 return error; 7458 } 7459 /* Now press "execute". */ 7460 IWN_WRITE(sc, IWN_RESET, 0); 7461 7462 /* Wait at most one second for first alive notification. */ 7463 if ((error = zsleep(sc, &wlan_global_serializer, 0, "iwninit", hz)) != 0) { 7464 device_printf(sc->sc_dev, 7465 "%s: timeout waiting for adapter to initialize, error %d\n", 7466 __func__, error); 7467 return error; 7468 } 7469 7470 /* Retrieve current temperature for initial TX power calibration. */ 7471 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 7472 sc->temp = iwn4965_get_temperature(sc); 7473 7474 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 7475 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 7476 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7477 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7478 fw->main.text, fw->main.textsz); 7479 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7480 7481 /* Tell adapter where to find runtime sections. */ 7482 if ((error = iwn_nic_lock(sc)) != 0) 7483 return error; 7484 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7485 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 7486 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7487 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7488 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 7489 IWN_FW_UPDATED | fw->main.textsz); 7490 iwn_nic_unlock(sc); 7491 7492 return 0; 7493 } 7494 7495 static int 7496 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 7497 const uint8_t *section, int size) 7498 { 7499 struct iwn_dma_info *dma = &sc->fw_dma; 7500 int error; 7501 7502 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7503 7504 /* Copy firmware section into pre-allocated DMA-safe memory. */ 7505 memcpy(dma->vaddr, section, size); 7506 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7507 7508 if ((error = iwn_nic_lock(sc)) != 0) 7509 return error; 7510 7511 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7512 IWN_FH_TX_CONFIG_DMA_PAUSE); 7513 7514 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 7515 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 7516 IWN_LOADDR(dma->paddr)); 7517 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 7518 IWN_HIADDR(dma->paddr) << 28 | size); 7519 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 7520 IWN_FH_TXBUF_STATUS_TBNUM(1) | 7521 IWN_FH_TXBUF_STATUS_TBIDX(1) | 7522 IWN_FH_TXBUF_STATUS_TFBD_VALID); 7523 7524 /* Kick Flow Handler to start DMA transfer. */ 7525 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7526 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 7527 7528 iwn_nic_unlock(sc); 7529 7530 /* Wait at most five seconds for FH DMA transfer to complete. */ 7531 return zsleep(sc, &wlan_global_serializer, 0, "iwninit", 5 * hz); 7532 } 7533 7534 static int 7535 iwn5000_load_firmware(struct iwn_softc *sc) 7536 { 7537 struct iwn_fw_part *fw; 7538 int error; 7539 7540 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7541 7542 /* Load the initialization firmware on first boot only. */ 7543 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 7544 &sc->fw.main : &sc->fw.init; 7545 7546 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 7547 fw->text, fw->textsz); 7548 if (error != 0) { 7549 device_printf(sc->sc_dev, 7550 "%s: could not load firmware %s section, error %d\n", 7551 __func__, ".text", error); 7552 return error; 7553 } 7554 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 7555 fw->data, fw->datasz); 7556 if (error != 0) { 7557 device_printf(sc->sc_dev, 7558 "%s: could not load firmware %s section, error %d\n", 7559 __func__, ".data", error); 7560 return error; 7561 } 7562 7563 /* Now press "execute". */ 7564 IWN_WRITE(sc, IWN_RESET, 0); 7565 return 0; 7566 } 7567 7568 /* 7569 * Extract text and data sections from a legacy firmware image. 7570 */ 7571 static int 7572 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 7573 { 7574 const uint32_t *ptr; 7575 size_t hdrlen = 24; 7576 uint32_t rev; 7577 7578 ptr = (const uint32_t *)fw->data; 7579 rev = le32toh(*ptr++); 7580 7581 /* Check firmware API version. */ 7582 if (IWN_FW_API(rev) <= 1) { 7583 device_printf(sc->sc_dev, 7584 "%s: bad firmware, need API version >=2\n", __func__); 7585 return EINVAL; 7586 } 7587 if (IWN_FW_API(rev) >= 3) { 7588 /* Skip build number (version 2 header). */ 7589 hdrlen += 4; 7590 ptr++; 7591 } 7592 if (fw->size < hdrlen) { 7593 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7594 __func__, fw->size); 7595 return EINVAL; 7596 } 7597 fw->main.textsz = le32toh(*ptr++); 7598 fw->main.datasz = le32toh(*ptr++); 7599 fw->init.textsz = le32toh(*ptr++); 7600 fw->init.datasz = le32toh(*ptr++); 7601 fw->boot.textsz = le32toh(*ptr++); 7602 7603 /* Check that all firmware sections fit. */ 7604 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 7605 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 7606 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7607 __func__, fw->size); 7608 return EINVAL; 7609 } 7610 7611 /* Get pointers to firmware sections. */ 7612 fw->main.text = (const uint8_t *)ptr; 7613 fw->main.data = fw->main.text + fw->main.textsz; 7614 fw->init.text = fw->main.data + fw->main.datasz; 7615 fw->init.data = fw->init.text + fw->init.textsz; 7616 fw->boot.text = fw->init.data + fw->init.datasz; 7617 return 0; 7618 } 7619 7620 /* 7621 * Extract text and data sections from a TLV firmware image. 7622 */ 7623 static int 7624 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 7625 uint16_t alt) 7626 { 7627 const struct iwn_fw_tlv_hdr *hdr; 7628 const struct iwn_fw_tlv *tlv; 7629 const uint8_t *ptr, *end; 7630 uint64_t altmask; 7631 uint32_t len, tmp; 7632 7633 if (fw->size < sizeof (*hdr)) { 7634 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7635 __func__, fw->size); 7636 return EINVAL; 7637 } 7638 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 7639 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 7640 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 7641 __func__, le32toh(hdr->signature)); 7642 return EINVAL; 7643 } 7644 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 7645 le32toh(hdr->build)); 7646 7647 /* 7648 * Select the closest supported alternative that is less than 7649 * or equal to the specified one. 7650 */ 7651 altmask = le64toh(hdr->altmask); 7652 while (alt > 0 && !(altmask & (1ULL << alt))) 7653 alt--; /* Downgrade. */ 7654 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 7655 7656 ptr = (const uint8_t *)(hdr + 1); 7657 end = (const uint8_t *)(fw->data + fw->size); 7658 7659 /* Parse type-length-value fields. */ 7660 while (ptr + sizeof (*tlv) <= end) { 7661 tlv = (const struct iwn_fw_tlv *)ptr; 7662 len = le32toh(tlv->len); 7663 7664 ptr += sizeof (*tlv); 7665 if (ptr + len > end) { 7666 device_printf(sc->sc_dev, 7667 "%s: firmware too short: %zu bytes\n", __func__, 7668 fw->size); 7669 return EINVAL; 7670 } 7671 /* Skip other alternatives. */ 7672 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 7673 goto next; 7674 7675 switch (le16toh(tlv->type)) { 7676 case IWN_FW_TLV_MAIN_TEXT: 7677 fw->main.text = ptr; 7678 fw->main.textsz = len; 7679 break; 7680 case IWN_FW_TLV_MAIN_DATA: 7681 fw->main.data = ptr; 7682 fw->main.datasz = len; 7683 break; 7684 case IWN_FW_TLV_INIT_TEXT: 7685 fw->init.text = ptr; 7686 fw->init.textsz = len; 7687 break; 7688 case IWN_FW_TLV_INIT_DATA: 7689 fw->init.data = ptr; 7690 fw->init.datasz = len; 7691 break; 7692 case IWN_FW_TLV_BOOT_TEXT: 7693 fw->boot.text = ptr; 7694 fw->boot.textsz = len; 7695 break; 7696 case IWN_FW_TLV_ENH_SENS: 7697 if (!len) 7698 sc->sc_flags |= IWN_FLAG_ENH_SENS; 7699 break; 7700 case IWN_FW_TLV_PHY_CALIB: 7701 tmp = le32toh(*ptr); 7702 if (tmp < 253) { 7703 sc->reset_noise_gain = tmp; 7704 sc->noise_gain = tmp + 1; 7705 } 7706 break; 7707 case IWN_FW_TLV_PAN: 7708 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; 7709 DPRINTF(sc, IWN_DEBUG_RESET, 7710 "PAN Support found: %d\n", 1); 7711 break; 7712 case IWN_FW_TLV_FLAGS: 7713 if (len < sizeof(uint32_t)) 7714 break; 7715 if (len % sizeof(uint32_t)) 7716 break; 7717 sc->tlv_feature_flags = le32toh(*ptr); 7718 DPRINTF(sc, IWN_DEBUG_RESET, 7719 "%s: feature: 0x%08x\n", 7720 __func__, 7721 sc->tlv_feature_flags); 7722 break; 7723 case IWN_FW_TLV_PBREQ_MAXLEN: 7724 case IWN_FW_TLV_RUNT_EVTLOG_PTR: 7725 case IWN_FW_TLV_RUNT_EVTLOG_SIZE: 7726 case IWN_FW_TLV_RUNT_ERRLOG_PTR: 7727 case IWN_FW_TLV_INIT_EVTLOG_PTR: 7728 case IWN_FW_TLV_INIT_EVTLOG_SIZE: 7729 case IWN_FW_TLV_INIT_ERRLOG_PTR: 7730 case IWN_FW_TLV_WOWLAN_INST: 7731 case IWN_FW_TLV_WOWLAN_DATA: 7732 DPRINTF(sc, IWN_DEBUG_RESET, 7733 "TLV type %d reconized but not handled\n", 7734 le16toh(tlv->type)); 7735 break; 7736 default: 7737 DPRINTF(sc, IWN_DEBUG_RESET, 7738 "TLV type %d not handled\n", le16toh(tlv->type)); 7739 break; 7740 } 7741 next: /* TLV fields are 32-bit aligned. */ 7742 ptr += (len + 3) & ~3; 7743 } 7744 return 0; 7745 } 7746 7747 static int 7748 iwn_read_firmware(struct iwn_softc *sc) 7749 { 7750 struct iwn_fw_info *fw = &sc->fw; 7751 int error; 7752 7753 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7754 7755 wlan_assert_serialized(); 7756 memset(fw, 0, sizeof (*fw)); 7757 7758 /* 7759 * Read firmware image from filesystem. The firmware can block 7760 * in a taskq and deadlock against our serializer so unlock 7761 * while we do tihs. 7762 */ 7763 wlan_serialize_exit(); 7764 sc->fw_fp = firmware_get(sc->fwname); 7765 wlan_serialize_enter(); 7766 if (sc->fw_fp == NULL) { 7767 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 7768 __func__, sc->fwname); 7769 return EINVAL; 7770 } 7771 7772 fw->size = sc->fw_fp->datasize; 7773 fw->data = (const uint8_t *)sc->fw_fp->data; 7774 if (fw->size < sizeof (uint32_t)) { 7775 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7776 __func__, fw->size); 7777 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7778 sc->fw_fp = NULL; 7779 return EINVAL; 7780 } 7781 7782 /* Retrieve text and data sections. */ 7783 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 7784 error = iwn_read_firmware_leg(sc, fw); 7785 else 7786 error = iwn_read_firmware_tlv(sc, fw, 1); 7787 if (error != 0) { 7788 device_printf(sc->sc_dev, 7789 "%s: could not read firmware sections, error %d\n", 7790 __func__, error); 7791 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7792 sc->fw_fp = NULL; 7793 return error; 7794 } 7795 7796 /* Make sure text and data sections fit in hardware memory. */ 7797 if (fw->main.textsz > sc->fw_text_maxsz || 7798 fw->main.datasz > sc->fw_data_maxsz || 7799 fw->init.textsz > sc->fw_text_maxsz || 7800 fw->init.datasz > sc->fw_data_maxsz || 7801 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 7802 (fw->boot.textsz & 3) != 0) { 7803 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 7804 __func__); 7805 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7806 sc->fw_fp = NULL; 7807 return EINVAL; 7808 } 7809 7810 /* We can proceed with loading the firmware. */ 7811 return 0; 7812 } 7813 7814 static int 7815 iwn_clock_wait(struct iwn_softc *sc) 7816 { 7817 int ntries; 7818 7819 /* Set "initialization complete" bit. */ 7820 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 7821 7822 /* Wait for clock stabilization. */ 7823 for (ntries = 0; ntries < 2500; ntries++) { 7824 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 7825 return 0; 7826 DELAY(10); 7827 } 7828 device_printf(sc->sc_dev, 7829 "%s: timeout waiting for clock stabilization\n", __func__); 7830 return ETIMEDOUT; 7831 } 7832 7833 static int 7834 iwn_apm_init(struct iwn_softc *sc) 7835 { 7836 uint32_t reg; 7837 int error; 7838 7839 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7840 7841 /* Disable L0s exit timer (NMI bug workaround). */ 7842 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 7843 /* Don't wait for ICH L0s (ICH bug workaround). */ 7844 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 7845 7846 /* Set FH wait threshold to max (HW bug under stress workaround). */ 7847 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 7848 7849 /* Enable HAP INTA to move adapter from L1a to L0s. */ 7850 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 7851 7852 /* Retrieve PCIe Active State Power Management (ASPM). */ 7853 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 7854 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 7855 if (reg & 0x02) /* L1 Entry enabled. */ 7856 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 7857 else 7858 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 7859 7860 if (sc->base_params->pll_cfg_val) 7861 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); 7862 7863 /* Wait for clock stabilization before accessing prph. */ 7864 if ((error = iwn_clock_wait(sc)) != 0) 7865 return error; 7866 7867 if ((error = iwn_nic_lock(sc)) != 0) 7868 return error; 7869 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 7870 /* Enable DMA and BSM (Bootstrap State Machine). */ 7871 iwn_prph_write(sc, IWN_APMG_CLK_EN, 7872 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 7873 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 7874 } else { 7875 /* Enable DMA. */ 7876 iwn_prph_write(sc, IWN_APMG_CLK_EN, 7877 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 7878 } 7879 DELAY(20); 7880 /* Disable L1-Active. */ 7881 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 7882 iwn_nic_unlock(sc); 7883 7884 return 0; 7885 } 7886 7887 static void 7888 iwn_apm_stop_master(struct iwn_softc *sc) 7889 { 7890 int ntries; 7891 7892 /* Stop busmaster DMA activity. */ 7893 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 7894 for (ntries = 0; ntries < 100; ntries++) { 7895 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 7896 return; 7897 DELAY(10); 7898 } 7899 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 7900 } 7901 7902 static void 7903 iwn_apm_stop(struct iwn_softc *sc) 7904 { 7905 iwn_apm_stop_master(sc); 7906 7907 /* Reset the entire device. */ 7908 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 7909 DELAY(10); 7910 /* Clear "initialization complete" bit. */ 7911 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 7912 } 7913 7914 static int 7915 iwn4965_nic_config(struct iwn_softc *sc) 7916 { 7917 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7918 7919 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 7920 /* 7921 * I don't believe this to be correct but this is what the 7922 * vendor driver is doing. Probably the bits should not be 7923 * shifted in IWN_RFCFG_*. 7924 */ 7925 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7926 IWN_RFCFG_TYPE(sc->rfcfg) | 7927 IWN_RFCFG_STEP(sc->rfcfg) | 7928 IWN_RFCFG_DASH(sc->rfcfg)); 7929 } 7930 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7931 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 7932 return 0; 7933 } 7934 7935 static int 7936 iwn5000_nic_config(struct iwn_softc *sc) 7937 { 7938 uint32_t tmp; 7939 int error; 7940 7941 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7942 7943 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 7944 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7945 IWN_RFCFG_TYPE(sc->rfcfg) | 7946 IWN_RFCFG_STEP(sc->rfcfg) | 7947 IWN_RFCFG_DASH(sc->rfcfg)); 7948 } 7949 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7950 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 7951 7952 if ((error = iwn_nic_lock(sc)) != 0) 7953 return error; 7954 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 7955 7956 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 7957 /* 7958 * Select first Switching Voltage Regulator (1.32V) to 7959 * solve a stability issue related to noisy DC2DC line 7960 * in the silicon of 1000 Series. 7961 */ 7962 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 7963 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 7964 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 7965 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 7966 } 7967 iwn_nic_unlock(sc); 7968 7969 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 7970 /* Use internal power amplifier only. */ 7971 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 7972 } 7973 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { 7974 /* Indicate that ROM calibration version is >=6. */ 7975 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 7976 } 7977 if (sc->base_params->additional_gp_drv_bit) 7978 IWN_SETBITS(sc, IWN_GP_DRIVER, 7979 sc->base_params->additional_gp_drv_bit); 7980 return 0; 7981 } 7982 7983 /* 7984 * Take NIC ownership over Intel Active Management Technology (AMT). 7985 */ 7986 static int 7987 iwn_hw_prepare(struct iwn_softc *sc) 7988 { 7989 int ntries; 7990 7991 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7992 7993 /* Check if hardware is ready. */ 7994 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 7995 for (ntries = 0; ntries < 5; ntries++) { 7996 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 7997 IWN_HW_IF_CONFIG_NIC_READY) 7998 return 0; 7999 DELAY(10); 8000 } 8001 8002 /* Hardware not ready, force into ready state. */ 8003 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 8004 for (ntries = 0; ntries < 15000; ntries++) { 8005 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 8006 IWN_HW_IF_CONFIG_PREPARE_DONE)) 8007 break; 8008 DELAY(10); 8009 } 8010 if (ntries == 15000) 8011 return ETIMEDOUT; 8012 8013 /* Hardware should be ready now. */ 8014 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8015 for (ntries = 0; ntries < 5; ntries++) { 8016 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8017 IWN_HW_IF_CONFIG_NIC_READY) 8018 return 0; 8019 DELAY(10); 8020 } 8021 return ETIMEDOUT; 8022 } 8023 8024 static int 8025 iwn_hw_init(struct iwn_softc *sc) 8026 { 8027 struct iwn_ops *ops = &sc->ops; 8028 int error, chnl, qid; 8029 8030 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8031 8032 /* Clear pending interrupts. */ 8033 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8034 8035 if ((error = iwn_apm_init(sc)) != 0) { 8036 device_printf(sc->sc_dev, 8037 "%s: could not power ON adapter, error %d\n", __func__, 8038 error); 8039 return error; 8040 } 8041 8042 /* Select VMAIN power source. */ 8043 if ((error = iwn_nic_lock(sc)) != 0) 8044 return error; 8045 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 8046 iwn_nic_unlock(sc); 8047 8048 /* Perform adapter-specific initialization. */ 8049 if ((error = ops->nic_config(sc)) != 0) 8050 return error; 8051 8052 /* Initialize RX ring. */ 8053 if ((error = iwn_nic_lock(sc)) != 0) 8054 return error; 8055 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 8056 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 8057 /* Set physical address of RX ring (256-byte aligned). */ 8058 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 8059 /* Set physical address of RX status (16-byte aligned). */ 8060 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 8061 /* Enable RX. */ 8062 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 8063 IWN_FH_RX_CONFIG_ENA | 8064 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 8065 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 8066 IWN_FH_RX_CONFIG_SINGLE_FRAME | 8067 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 8068 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 8069 iwn_nic_unlock(sc); 8070 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 8071 8072 if ((error = iwn_nic_lock(sc)) != 0) 8073 return error; 8074 8075 /* Initialize TX scheduler. */ 8076 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8077 8078 /* Set physical address of "keep warm" page (16-byte aligned). */ 8079 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 8080 8081 /* Initialize TX rings. */ 8082 for (qid = 0; qid < sc->ntxqs; qid++) { 8083 struct iwn_tx_ring *txq = &sc->txq[qid]; 8084 8085 /* Set physical address of TX ring (256-byte aligned). */ 8086 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 8087 txq->desc_dma.paddr >> 8); 8088 } 8089 iwn_nic_unlock(sc); 8090 8091 /* Enable DMA channels. */ 8092 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8093 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 8094 IWN_FH_TX_CONFIG_DMA_ENA | 8095 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 8096 } 8097 8098 /* Clear "radio off" and "commands blocked" bits. */ 8099 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8100 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 8101 8102 /* Clear pending interrupts. */ 8103 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8104 /* Enable interrupt coalescing. */ 8105 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 8106 /* Enable interrupts. */ 8107 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8108 8109 /* _Really_ make sure "radio off" bit is cleared! */ 8110 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8111 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8112 8113 /* Enable shadow registers. */ 8114 if (sc->base_params->shadow_reg_enable) 8115 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 8116 8117 if ((error = ops->load_firmware(sc)) != 0) { 8118 device_printf(sc->sc_dev, 8119 "%s: could not load firmware, error %d\n", __func__, 8120 error); 8121 return error; 8122 } 8123 /* Wait at most one second for firmware alive notification. */ 8124 if ((error = zsleep(sc, &wlan_global_serializer, 0, "iwninit", hz)) != 0) { 8125 device_printf(sc->sc_dev, 8126 "%s: timeout waiting for adapter to initialize, error %d\n", 8127 __func__, error); 8128 return error; 8129 } 8130 /* Do post-firmware initialization. */ 8131 8132 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8133 8134 return ops->post_alive(sc); 8135 } 8136 8137 static void 8138 iwn_hw_stop(struct iwn_softc *sc) 8139 { 8140 int chnl, qid, ntries; 8141 8142 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8143 8144 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 8145 8146 /* Disable interrupts. */ 8147 IWN_WRITE(sc, IWN_INT_MASK, 0); 8148 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8149 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 8150 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8151 8152 /* Make sure we no longer hold the NIC lock. */ 8153 iwn_nic_unlock(sc); 8154 8155 /* Stop TX scheduler. */ 8156 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8157 8158 /* Stop all DMA channels. */ 8159 if (iwn_nic_lock(sc) == 0) { 8160 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8161 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 8162 for (ntries = 0; ntries < 200; ntries++) { 8163 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 8164 IWN_FH_TX_STATUS_IDLE(chnl)) 8165 break; 8166 DELAY(10); 8167 } 8168 } 8169 iwn_nic_unlock(sc); 8170 } 8171 8172 /* Stop RX ring. */ 8173 iwn_reset_rx_ring(sc, &sc->rxq); 8174 8175 /* Reset all TX rings. */ 8176 for (qid = 0; qid < sc->ntxqs; qid++) 8177 iwn_reset_tx_ring(sc, &sc->txq[qid]); 8178 8179 if (iwn_nic_lock(sc) == 0) { 8180 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 8181 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8182 iwn_nic_unlock(sc); 8183 } 8184 DELAY(5); 8185 /* Power OFF adapter. */ 8186 iwn_apm_stop(sc); 8187 } 8188 8189 static void 8190 iwn_radio_on_task(void *arg0, int pending) 8191 { 8192 struct iwn_softc *sc = arg0; 8193 struct ifnet *ifp; 8194 struct ieee80211com *ic; 8195 struct ieee80211vap *vap; 8196 8197 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8198 8199 wlan_serialize_enter(); 8200 ifp = sc->sc_ifp; 8201 ic = ifp->if_l2com; 8202 vap = TAILQ_FIRST(&ic->ic_vaps); 8203 if (vap != NULL) { 8204 iwn_init_locked(sc); 8205 ieee80211_init(vap); 8206 } 8207 wlan_serialize_exit(); 8208 } 8209 8210 static void 8211 iwn_radio_off_task(void *arg0, int pending) 8212 { 8213 struct iwn_softc *sc = arg0; 8214 struct ifnet *ifp; 8215 struct ieee80211com *ic; 8216 struct ieee80211vap *vap; 8217 8218 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8219 8220 wlan_serialize_enter(); 8221 ifp = sc->sc_ifp; 8222 ic = ifp->if_l2com; 8223 vap = TAILQ_FIRST(&ic->ic_vaps); 8224 iwn_stop_locked(sc); 8225 if (vap != NULL) 8226 ieee80211_stop(vap); 8227 8228 /* Enable interrupts to get RF toggle notification. */ 8229 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8230 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8231 wlan_serialize_exit(); 8232 } 8233 8234 static void 8235 iwn_init_locked(struct iwn_softc *sc) 8236 { 8237 struct ifnet *ifp = sc->sc_ifp; 8238 int error; 8239 8240 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8241 8242 /* 8243 * Make sure we hold the serializer or we will have timing issues 8244 * with the wlan subsystem. 8245 */ 8246 wlan_assert_serialized(); 8247 if ((error = iwn_hw_prepare(sc)) != 0) { 8248 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 8249 __func__, error); 8250 goto fail; 8251 } 8252 8253 /* Initialize interrupt mask to default value. */ 8254 sc->int_mask = IWN_INT_MASK_DEF; 8255 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8256 8257 /* Check that the radio is not disabled by hardware switch. */ 8258 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 8259 device_printf(sc->sc_dev, 8260 "radio is disabled by hardware switch\n"); 8261 /* Enable interrupts to get RF toggle notifications. */ 8262 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8263 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8264 return; 8265 } 8266 8267 /* Read firmware images from the filesystem. */ 8268 if ((error = iwn_read_firmware(sc)) != 0) { 8269 device_printf(sc->sc_dev, 8270 "%s: could not read firmware, error %d\n", __func__, 8271 error); 8272 goto fail; 8273 } 8274 8275 /* Initialize hardware and upload firmware. */ 8276 error = iwn_hw_init(sc); 8277 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8278 sc->fw_fp = NULL; 8279 if (error != 0) { 8280 device_printf(sc->sc_dev, 8281 "%s: could not initialize hardware, error %d\n", __func__, 8282 error); 8283 goto fail; 8284 } 8285 8286 /* Configure adapter now that it is ready. */ 8287 if ((error = iwn_config(sc)) != 0) { 8288 device_printf(sc->sc_dev, 8289 "%s: could not configure device, error %d\n", __func__, 8290 error); 8291 goto fail; 8292 } 8293 8294 ifq_clr_oactive(&ifp->if_snd); 8295 ifp->if_flags |= IFF_RUNNING; 8296 8297 callout_reset(&sc->watchdog_to, hz, iwn_watchdog_timeout, sc); 8298 8299 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8300 8301 return; 8302 8303 fail: iwn_stop_locked(sc); 8304 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 8305 } 8306 8307 static void 8308 iwn_init(void *arg) 8309 { 8310 struct iwn_softc *sc = arg; 8311 struct ifnet *ifp = sc->sc_ifp; 8312 struct ieee80211com *ic = ifp->if_l2com; 8313 8314 wlan_assert_serialized(); 8315 iwn_init_locked(sc); 8316 8317 if (ifp->if_flags & IFF_RUNNING) 8318 ieee80211_start_all(ic); 8319 } 8320 8321 static void 8322 iwn_stop_locked(struct iwn_softc *sc) 8323 { 8324 struct ifnet *ifp = sc->sc_ifp; 8325 8326 sc->sc_is_scanning = 0; 8327 sc->sc_tx_timer = 0; 8328 callout_stop(&sc->watchdog_to); 8329 callout_stop(&sc->calib_to); 8330 ifp->if_flags &= ~IFF_RUNNING; 8331 ifq_clr_oactive(&ifp->if_snd); 8332 8333 /* Power OFF hardware. */ 8334 iwn_hw_stop(sc); 8335 } 8336 8337 static void 8338 iwn_stop(struct iwn_softc *sc) 8339 { 8340 wlan_serialize_enter(); 8341 iwn_stop_locked(sc); 8342 wlan_serialize_exit(); 8343 } 8344 8345 /* 8346 * Callback from net80211 to start a scan. 8347 */ 8348 static void 8349 iwn_scan_start(struct ieee80211com *ic) 8350 { 8351 struct ifnet *ifp = ic->ic_ifp; 8352 struct iwn_softc *sc = ifp->if_softc; 8353 8354 /* make the link LED blink while we're scanning */ 8355 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 8356 } 8357 8358 /* 8359 * Callback from net80211 to terminate a scan. 8360 */ 8361 static void 8362 iwn_scan_end(struct ieee80211com *ic) 8363 { 8364 struct ifnet *ifp = ic->ic_ifp; 8365 struct iwn_softc *sc = ifp->if_softc; 8366 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8367 8368 if (vap->iv_state == IEEE80211_S_RUN) { 8369 /* Set link LED to ON status if we are associated */ 8370 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 8371 } 8372 } 8373 8374 /* 8375 * Callback from net80211 to force a channel change. 8376 */ 8377 static void 8378 iwn_set_channel(struct ieee80211com *ic) 8379 { 8380 const struct ieee80211_channel *c = ic->ic_curchan; 8381 struct ifnet *ifp = ic->ic_ifp; 8382 struct iwn_softc *sc = ifp->if_softc; 8383 int error; 8384 8385 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8386 8387 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 8388 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 8389 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 8390 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 8391 8392 /* 8393 * Only need to set the channel in Monitor mode. AP scanning and auth 8394 * are already taken care of by their respective firmware commands. 8395 */ 8396 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 8397 error = iwn_config(sc); 8398 if (error != 0) 8399 device_printf(sc->sc_dev, 8400 "%s: error %d settting channel\n", __func__, error); 8401 } 8402 } 8403 8404 /* 8405 * Callback from net80211 to start scanning of the current channel. 8406 */ 8407 static void 8408 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 8409 { 8410 struct ieee80211vap *vap = ss->ss_vap; 8411 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 8412 int error; 8413 8414 error = iwn_scan(sc); 8415 if (error != 0) 8416 ieee80211_cancel_scan(vap); 8417 } 8418 8419 /* 8420 * Callback from net80211 to handle the minimum dwell time being met. 8421 * The intent is to terminate the scan but we just let the firmware 8422 * notify us when it's finished as we have no safe way to abort it. 8423 */ 8424 static void 8425 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 8426 { 8427 /* NB: don't try to abort scan; wait for firmware to finish */ 8428 } 8429 8430 static void 8431 iwn_hw_reset_task(void *arg0, int pending) 8432 { 8433 struct iwn_softc *sc = arg0; 8434 struct ifnet *ifp; 8435 struct ieee80211com *ic; 8436 8437 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8438 8439 wlan_serialize_enter(); 8440 ifp = sc->sc_ifp; 8441 ic = ifp->if_l2com; 8442 iwn_stop_locked(sc); 8443 iwn_init_locked(sc); 8444 ieee80211_notify_radio(ic, 1); 8445 wlan_serialize_exit(); 8446 } 8447 #ifdef IWN_DEBUG 8448 #define IWN_DESC(x) case x: return #x 8449 #define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 8450 8451 /* 8452 * Translate CSR code to string 8453 */ 8454 static char *iwn_get_csr_string(int csr) 8455 { 8456 switch (csr) { 8457 IWN_DESC(IWN_HW_IF_CONFIG); 8458 IWN_DESC(IWN_INT_COALESCING); 8459 IWN_DESC(IWN_INT); 8460 IWN_DESC(IWN_INT_MASK); 8461 IWN_DESC(IWN_FH_INT); 8462 IWN_DESC(IWN_GPIO_IN); 8463 IWN_DESC(IWN_RESET); 8464 IWN_DESC(IWN_GP_CNTRL); 8465 IWN_DESC(IWN_HW_REV); 8466 IWN_DESC(IWN_EEPROM); 8467 IWN_DESC(IWN_EEPROM_GP); 8468 IWN_DESC(IWN_OTP_GP); 8469 IWN_DESC(IWN_GIO); 8470 IWN_DESC(IWN_GP_UCODE); 8471 IWN_DESC(IWN_GP_DRIVER); 8472 IWN_DESC(IWN_UCODE_GP1); 8473 IWN_DESC(IWN_UCODE_GP2); 8474 IWN_DESC(IWN_LED); 8475 IWN_DESC(IWN_DRAM_INT_TBL); 8476 IWN_DESC(IWN_GIO_CHICKEN); 8477 IWN_DESC(IWN_ANA_PLL); 8478 IWN_DESC(IWN_HW_REV_WA); 8479 IWN_DESC(IWN_DBG_HPET_MEM); 8480 default: 8481 return "UNKNOWN CSR"; 8482 } 8483 } 8484 8485 /* 8486 * This function print firmware register 8487 */ 8488 static void 8489 iwn_debug_register(struct iwn_softc *sc) 8490 { 8491 int i; 8492 static const uint32_t csr_tbl[] = { 8493 IWN_HW_IF_CONFIG, 8494 IWN_INT_COALESCING, 8495 IWN_INT, 8496 IWN_INT_MASK, 8497 IWN_FH_INT, 8498 IWN_GPIO_IN, 8499 IWN_RESET, 8500 IWN_GP_CNTRL, 8501 IWN_HW_REV, 8502 IWN_EEPROM, 8503 IWN_EEPROM_GP, 8504 IWN_OTP_GP, 8505 IWN_GIO, 8506 IWN_GP_UCODE, 8507 IWN_GP_DRIVER, 8508 IWN_UCODE_GP1, 8509 IWN_UCODE_GP2, 8510 IWN_LED, 8511 IWN_DRAM_INT_TBL, 8512 IWN_GIO_CHICKEN, 8513 IWN_ANA_PLL, 8514 IWN_HW_REV_WA, 8515 IWN_DBG_HPET_MEM, 8516 }; 8517 DPRINTF(sc, IWN_DEBUG_REGISTER, 8518 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", 8519 "\n"); 8520 for (i = 0; i < COUNTOF(csr_tbl); i++){ 8521 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", 8522 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); 8523 if ((i+1) % 3 == 0) 8524 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8525 } 8526 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8527 } 8528 #endif 8529