1 /*- 2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org> 4 * Copyright (c) 2008 Sam Leffler, Errno Consulting 5 * Copyright (c) 2011 Intel Corporation 6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr> 7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 * 21 * $FreeBSD: head/sys/dev/iwn/if_iwn.c 258118 2013-11-14 07:27:00Z adrian $ 22 */ 23 24 /* 25 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 26 * adapters. 27 */ 28 29 #include "opt_wlan.h" 30 #include "opt_iwn.h" 31 32 #include <sys/param.h> 33 #include <sys/sockio.h> 34 #include <sys/sysctl.h> 35 #include <sys/mbuf.h> 36 #include <sys/kernel.h> 37 #include <sys/socket.h> 38 #include <sys/systm.h> 39 #include <sys/malloc.h> 40 #include <sys/stdbool.h> 41 #include <sys/bus.h> 42 #include <sys/rman.h> 43 #include <sys/endian.h> 44 #include <sys/firmware.h> 45 #include <sys/limits.h> 46 #include <sys/module.h> 47 #include <sys/queue.h> 48 #include <sys/taskqueue.h> 49 #include <sys/libkern.h> 50 51 #include <sys/resource.h> 52 #include <machine/clock.h> 53 54 #include <bus/pci/pcireg.h> 55 #include <bus/pci/pcivar.h> 56 57 #include <net/bpf.h> 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_arp.h> 61 #include <net/ifq_var.h> 62 #include <net/ethernet.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #include <net/if_types.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/if_ether.h> 71 #include <netinet/ip.h> 72 73 #include <netproto/802_11/ieee80211_var.h> 74 #include <netproto/802_11/ieee80211_radiotap.h> 75 #include <netproto/802_11/ieee80211_regdomain.h> 76 #include <netproto/802_11/ieee80211_ratectl.h> 77 78 #include "if_iwnreg.h" 79 #include "if_iwnvar.h" 80 #include "if_iwn_devid.h" 81 #include "if_iwn_chip_cfg.h" 82 #include "if_iwn_debug.h" 83 84 #define nitems(ary) (sizeof(ary) / sizeof((ary)[0])) 85 86 struct iwn_ident { 87 uint16_t vendor; 88 uint16_t device; 89 const char *name; 90 }; 91 92 static const struct iwn_ident iwn_ident_table[] = { 93 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, 94 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, 95 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, 96 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, 97 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, 98 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, 99 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, 100 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, 101 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, 102 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, 103 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, 104 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, 105 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 106 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 107 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ 108 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, 109 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, 110 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, 111 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, 112 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, 113 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, 114 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, 115 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, 116 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, 117 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, 118 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, 119 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, 120 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, 121 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, 122 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, 123 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, 124 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, 125 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, 126 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, 127 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, 128 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, 129 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, 130 /* 131 * These currently don't function; the firmware crashes during 132 * the startup calibration request. 133 */ 134 #if 0 135 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, 136 /* XXX TODO: figure out which ID this one is? */ 137 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, 138 #endif 139 { 0, 0, NULL } 140 }; 141 142 static int iwn_pci_probe(device_t); 143 static int iwn_pci_attach(device_t); 144 static int iwn4965_attach(struct iwn_softc *, uint16_t); 145 static int iwn5000_attach(struct iwn_softc *, uint16_t); 146 static int iwn_config_specific(struct iwn_softc *, uint16_t); 147 static void iwn_radiotap_attach(struct iwn_softc *); 148 static void iwn_sysctlattach(struct iwn_softc *); 149 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 150 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 151 const uint8_t [IEEE80211_ADDR_LEN], 152 const uint8_t [IEEE80211_ADDR_LEN]); 153 static void iwn_vap_delete(struct ieee80211vap *); 154 static int iwn_pci_detach(device_t); 155 static int iwn_pci_shutdown(device_t); 156 static int iwn_pci_suspend(device_t); 157 static int iwn_pci_resume(device_t); 158 static int iwn_nic_lock(struct iwn_softc *); 159 static int iwn_eeprom_lock(struct iwn_softc *); 160 static int iwn_init_otprom(struct iwn_softc *); 161 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 162 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 163 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 164 void **, bus_size_t, bus_size_t); 165 static void iwn_dma_contig_free(struct iwn_dma_info *); 166 static int iwn_alloc_sched(struct iwn_softc *); 167 static void iwn_free_sched(struct iwn_softc *); 168 static int iwn_alloc_kw(struct iwn_softc *); 169 static void iwn_free_kw(struct iwn_softc *); 170 static int iwn_alloc_ict(struct iwn_softc *); 171 static void iwn_free_ict(struct iwn_softc *); 172 static int iwn_alloc_fwmem(struct iwn_softc *); 173 static void iwn_free_fwmem(struct iwn_softc *); 174 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 175 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 176 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 177 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 178 int); 179 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 180 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 181 static void iwn5000_ict_reset(struct iwn_softc *); 182 static int iwn_read_eeprom(struct iwn_softc *, 183 uint8_t macaddr[IEEE80211_ADDR_LEN]); 184 static void iwn4965_read_eeprom(struct iwn_softc *); 185 #ifdef IWN_DEBUG 186 static void iwn4965_print_power_group(struct iwn_softc *, int); 187 #endif 188 static void iwn5000_read_eeprom(struct iwn_softc *); 189 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 190 static void iwn_read_eeprom_band(struct iwn_softc *, int); 191 static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 192 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 193 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 194 struct ieee80211_channel *); 195 static int iwn_setregdomain(struct ieee80211com *, 196 struct ieee80211_regdomain *, int, 197 struct ieee80211_channel[]); 198 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 199 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 200 const uint8_t mac[IEEE80211_ADDR_LEN]); 201 static void iwn_newassoc(struct ieee80211_node *, int); 202 static int iwn_media_change(struct ifnet *); 203 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 204 static void iwn_calib_timeout(void *); 205 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 206 struct iwn_rx_data *); 207 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 208 struct iwn_rx_data *); 209 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 210 struct iwn_rx_data *); 211 static void iwn5000_rx_calib_results(struct iwn_softc *, 212 struct iwn_rx_desc *, struct iwn_rx_data *); 213 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 214 struct iwn_rx_data *); 215 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 216 struct iwn_rx_data *); 217 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 218 struct iwn_rx_data *); 219 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 220 uint8_t); 221 static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 222 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 223 static void iwn_notif_intr(struct iwn_softc *); 224 static void iwn_wakeup_intr(struct iwn_softc *); 225 static void iwn_rftoggle_intr(struct iwn_softc *); 226 static void iwn_fatal_intr(struct iwn_softc *); 227 static void iwn_intr(void *); 228 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 229 uint16_t); 230 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 231 uint16_t); 232 #ifdef notyet 233 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 234 #endif 235 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 236 struct ieee80211_node *); 237 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 238 struct ieee80211_node *, 239 const struct ieee80211_bpf_params *params); 240 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 241 const struct ieee80211_bpf_params *); 242 static void iwn_start(struct ifnet *, struct ifaltq_subque *); 243 static void iwn_start_locked(struct ifnet *); 244 static void iwn_watchdog_timeout(void *); 245 static int iwn_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 246 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 247 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 248 int); 249 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 250 int); 251 static int iwn_set_link_quality(struct iwn_softc *, 252 struct ieee80211_node *); 253 static int iwn_add_broadcast_node(struct iwn_softc *, int); 254 static int iwn_updateedca(struct ieee80211com *); 255 static void iwn_update_mcast(struct ifnet *); 256 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 257 static int iwn_set_critical_temp(struct iwn_softc *); 258 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 259 static void iwn4965_power_calibration(struct iwn_softc *, int); 260 static int iwn4965_set_txpower(struct iwn_softc *, 261 struct ieee80211_channel *, int); 262 static int iwn5000_set_txpower(struct iwn_softc *, 263 struct ieee80211_channel *, int); 264 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 265 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 266 static int iwn_get_noise(const struct iwn_rx_general_stats *); 267 static int iwn4965_get_temperature(struct iwn_softc *); 268 static int iwn5000_get_temperature(struct iwn_softc *); 269 static int iwn_init_sensitivity(struct iwn_softc *); 270 static void iwn_collect_noise(struct iwn_softc *, 271 const struct iwn_rx_general_stats *); 272 static int iwn4965_init_gains(struct iwn_softc *); 273 static int iwn5000_init_gains(struct iwn_softc *); 274 static int iwn4965_set_gains(struct iwn_softc *); 275 static int iwn5000_set_gains(struct iwn_softc *); 276 static void iwn_tune_sensitivity(struct iwn_softc *, 277 const struct iwn_rx_stats *); 278 static int iwn_send_sensitivity(struct iwn_softc *); 279 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 280 static int iwn_send_btcoex(struct iwn_softc *); 281 static int iwn_send_advanced_btcoex(struct iwn_softc *); 282 static int iwn5000_runtime_calib(struct iwn_softc *); 283 static int iwn_config(struct iwn_softc *); 284 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); 285 static int iwn_scan(struct iwn_softc *); 286 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 287 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 288 static int iwn_ampdu_rx_start(struct ieee80211_node *, 289 struct ieee80211_rx_ampdu *, int, int, int); 290 static void iwn_ampdu_rx_stop(struct ieee80211_node *, 291 struct ieee80211_rx_ampdu *); 292 static int iwn_addba_request(struct ieee80211_node *, 293 struct ieee80211_tx_ampdu *, int, int, int); 294 static int iwn_addba_response(struct ieee80211_node *, 295 struct ieee80211_tx_ampdu *, int, int, int); 296 static int iwn_ampdu_tx_start(struct ieee80211com *, 297 struct ieee80211_node *, uint8_t); 298 static void iwn_ampdu_tx_stop(struct ieee80211_node *, 299 struct ieee80211_tx_ampdu *); 300 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 301 struct ieee80211_node *, int, uint8_t, uint16_t); 302 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 303 uint8_t, uint16_t); 304 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 305 struct ieee80211_node *, int, uint8_t, uint16_t); 306 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 307 uint8_t, uint16_t); 308 static int iwn5000_query_calibration(struct iwn_softc *); 309 static int iwn5000_send_calibration(struct iwn_softc *); 310 static int iwn5000_send_wimax_coex(struct iwn_softc *); 311 static int iwn5000_crystal_calib(struct iwn_softc *); 312 static int iwn5000_temp_offset_calib(struct iwn_softc *); 313 static int iwn5000_temp_offset_calibv2(struct iwn_softc *); 314 static int iwn4965_post_alive(struct iwn_softc *); 315 static int iwn5000_post_alive(struct iwn_softc *); 316 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 317 int); 318 static int iwn4965_load_firmware(struct iwn_softc *); 319 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 320 const uint8_t *, int); 321 static int iwn5000_load_firmware(struct iwn_softc *); 322 static int iwn_read_firmware_leg(struct iwn_softc *, 323 struct iwn_fw_info *); 324 static int iwn_read_firmware_tlv(struct iwn_softc *, 325 struct iwn_fw_info *, uint16_t); 326 static int iwn_read_firmware(struct iwn_softc *); 327 static int iwn_clock_wait(struct iwn_softc *); 328 static int iwn_apm_init(struct iwn_softc *); 329 static void iwn_apm_stop_master(struct iwn_softc *); 330 static void iwn_apm_stop(struct iwn_softc *); 331 static int iwn4965_nic_config(struct iwn_softc *); 332 static int iwn5000_nic_config(struct iwn_softc *); 333 static int iwn_hw_prepare(struct iwn_softc *); 334 static int iwn_hw_init(struct iwn_softc *); 335 static void iwn_hw_stop(struct iwn_softc *); 336 static void iwn_radio_on_task(void *, int); 337 static void iwn_radio_off_task(void *, int); 338 static void iwn_init_locked(struct iwn_softc *); 339 static void iwn_init(void *); 340 static void iwn_stop_locked(struct iwn_softc *); 341 static void iwn_scan_start(struct ieee80211com *); 342 static void iwn_scan_end(struct ieee80211com *); 343 static void iwn_set_channel(struct ieee80211com *); 344 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 345 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 346 static void iwn_hw_reset_task(void *, int); 347 #ifdef IWN_DEBUG 348 static char *iwn_get_csr_string(int); 349 static void iwn_debug_register(struct iwn_softc *); 350 #endif 351 352 static device_method_t iwn_methods[] = { 353 /* Device interface */ 354 DEVMETHOD(device_probe, iwn_pci_probe), 355 DEVMETHOD(device_attach, iwn_pci_attach), 356 DEVMETHOD(device_detach, iwn_pci_detach), 357 DEVMETHOD(device_shutdown, iwn_pci_shutdown), 358 DEVMETHOD(device_suspend, iwn_pci_suspend), 359 DEVMETHOD(device_resume, iwn_pci_resume), 360 { 0, 0 } 361 }; 362 363 static driver_t iwn_driver = { 364 "iwn", 365 iwn_methods, 366 sizeof(struct iwn_softc) 367 }; 368 static devclass_t iwn_devclass; 369 370 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); 371 372 MODULE_VERSION(iwn, 1); 373 374 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 375 MODULE_DEPEND(iwn, pci, 1, 1, 1); 376 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 377 MODULE_DEPEND(iwn, wlan_amrr, 1, 1, 1); 378 379 static int 380 iwn_pci_probe(device_t dev) 381 { 382 const struct iwn_ident *ident; 383 384 /* no wlan serializer needed */ 385 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 386 if (pci_get_vendor(dev) == ident->vendor && 387 pci_get_device(dev) == ident->device) { 388 device_set_desc(dev, ident->name); 389 return 0; 390 } 391 } 392 return ENXIO; 393 } 394 395 static int 396 iwn_pci_attach(device_t dev) 397 { 398 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 399 struct ieee80211com *ic; 400 struct ifnet *ifp; 401 uint32_t reg; 402 int i, error; 403 #ifdef OLD_MSI 404 int result; 405 #endif 406 uint8_t macaddr[IEEE80211_ADDR_LEN]; 407 char ethstr[ETHER_ADDRSTRLEN + 1]; 408 409 wlan_serialize_enter(); 410 411 sc->sc_dev = dev; 412 sc->sc_dmat = NULL; 413 414 if (bus_dma_tag_create(sc->sc_dmat, 415 1, 0, 416 BUS_SPACE_MAXADDR_32BIT, 417 BUS_SPACE_MAXADDR, 418 NULL, NULL, 419 BUS_SPACE_MAXSIZE, 420 IWN_MAX_SCATTER, 421 BUS_SPACE_MAXSIZE, 422 BUS_DMA_ALLOCNOW, 423 &sc->sc_dmat)) { 424 device_printf(dev, "cannot allocate DMA tag\n"); 425 error = ENOMEM; 426 goto fail; 427 } 428 429 /* prepare sysctl tree for use in sub modules */ 430 sysctl_ctx_init(&sc->sc_sysctl_ctx); 431 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 432 SYSCTL_STATIC_CHILDREN(_hw), 433 OID_AUTO, 434 device_get_nameunit(sc->sc_dev), 435 CTLFLAG_RD, 0, ""); 436 437 #ifdef IWN_DEBUG 438 error = resource_int_value(device_get_name(sc->sc_dev), 439 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 440 if (error != 0) 441 sc->sc_debug = 0; 442 #else 443 sc->sc_debug = 0; 444 #endif 445 446 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 447 448 /* 449 * Get the offset of the PCI Express Capability Structure in PCI 450 * Configuration Space. 451 */ 452 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 453 if (error != 0) { 454 device_printf(dev, "PCIe capability structure not found!\n"); 455 goto fail2; 456 } 457 458 /* Clear device-specific "PCI retry timeout" register (41h). */ 459 pci_write_config(dev, 0x41, 0, 1); 460 461 /* Hardware bug workaround. */ 462 reg = pci_read_config(dev, PCIR_COMMAND, 2); 463 if (reg & PCIM_CMD_INTxDIS) { 464 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n", 465 __func__); 466 reg &= ~PCIM_CMD_INTxDIS; 467 pci_write_config(dev, PCIR_COMMAND, reg, 2); 468 } 469 470 /* Enable bus-mastering. */ 471 pci_enable_busmaster(dev); 472 473 sc->mem_rid = PCIR_BAR(0); 474 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 475 RF_ACTIVE); 476 if (sc->mem == NULL) { 477 device_printf(dev, "can't map mem space\n"); 478 error = ENOMEM; 479 goto fail2; 480 } 481 sc->sc_st = rman_get_bustag(sc->mem); 482 sc->sc_sh = rman_get_bushandle(sc->mem); 483 484 sc->irq_rid = 0; 485 #ifdef OLD_MSI 486 if ((result = pci_msi_count(dev)) == 1 && 487 pci_alloc_msi(dev, &result) == 0) 488 sc->irq_rid = 1; 489 #endif 490 /* Install interrupt handler. */ 491 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, 492 RF_ACTIVE | RF_SHAREABLE); 493 if (sc->irq == NULL) { 494 device_printf(dev, "can't map interrupt\n"); 495 error = ENOMEM; 496 goto fail; 497 } 498 499 /* Read hardware revision and attach. */ 500 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) 501 & IWN_HW_REV_TYPE_MASK; 502 sc->subdevice_id = pci_get_subdevice(dev); 503 504 /* 505 * 4965 versus 5000 and later have different methods. 506 * Let's set those up first. 507 */ 508 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 509 error = iwn4965_attach(sc, pci_get_device(dev)); 510 else 511 error = iwn5000_attach(sc, pci_get_device(dev)); 512 if (error != 0) { 513 device_printf(dev, "could not attach device, error %d\n", 514 error); 515 goto fail; 516 } 517 518 /* 519 * Next, let's setup the various parameters of each NIC. 520 */ 521 error = iwn_config_specific(sc, pci_get_device(dev)); 522 if (error != 0) { 523 device_printf(dev, "could not attach device, error %d\n", 524 error); 525 goto fail; 526 } 527 528 if ((error = iwn_hw_prepare(sc)) != 0) { 529 device_printf(dev, "hardware not ready, error %d\n", error); 530 goto fail; 531 } 532 533 /* Allocate DMA memory for firmware transfers. */ 534 if ((error = iwn_alloc_fwmem(sc)) != 0) { 535 device_printf(dev, 536 "could not allocate memory for firmware, error %d\n", 537 error); 538 goto fail; 539 } 540 541 /* Allocate "Keep Warm" page. */ 542 if ((error = iwn_alloc_kw(sc)) != 0) { 543 device_printf(dev, 544 "could not allocate keep warm page, error %d\n", error); 545 goto fail; 546 } 547 548 /* Allocate ICT table for 5000 Series. */ 549 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 550 (error = iwn_alloc_ict(sc)) != 0) { 551 device_printf(dev, "could not allocate ICT table, error %d\n", 552 error); 553 goto fail; 554 } 555 556 /* Allocate TX scheduler "rings". */ 557 if ((error = iwn_alloc_sched(sc)) != 0) { 558 device_printf(dev, 559 "could not allocate TX scheduler rings, error %d\n", error); 560 goto fail; 561 } 562 563 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 564 for (i = 0; i < sc->ntxqs; i++) { 565 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 566 device_printf(dev, 567 "could not allocate TX ring %d, error %d\n", i, 568 error); 569 goto fail; 570 } 571 } 572 573 /* Allocate RX ring. */ 574 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 575 device_printf(dev, "could not allocate RX ring, error %d\n", 576 error); 577 goto fail; 578 } 579 580 /* Clear pending interrupts. */ 581 IWN_WRITE(sc, IWN_INT, 0xffffffff); 582 583 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 584 if (ifp == NULL) { 585 device_printf(dev, "can not allocate ifnet structure\n"); 586 goto fail; 587 } 588 589 ic = ifp->if_l2com; 590 ic->ic_ifp = ifp; 591 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 592 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 593 594 /* Set device capabilities. */ 595 ic->ic_caps = 596 IEEE80211_C_STA /* station mode supported */ 597 | IEEE80211_C_MONITOR /* monitor mode supported */ 598 | IEEE80211_C_BGSCAN /* background scanning */ 599 | IEEE80211_C_TXPMGT /* tx power management */ 600 | IEEE80211_C_SHSLOT /* short slot time supported */ 601 | IEEE80211_C_WPA 602 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 603 #if 0 604 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 605 #endif 606 | IEEE80211_C_WME /* WME */ 607 | IEEE80211_C_PMGT /* Station-side power mgmt */ 608 ; 609 610 /* Read MAC address, channels, etc from EEPROM. */ 611 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { 612 device_printf(dev, "could not read EEPROM, error %d\n", 613 error); 614 goto fail; 615 } 616 617 /* Count the number of available chains. */ 618 sc->ntxchains = 619 ((sc->txchainmask >> 2) & 1) + 620 ((sc->txchainmask >> 1) & 1) + 621 ((sc->txchainmask >> 0) & 1); 622 sc->nrxchains = 623 ((sc->rxchainmask >> 2) & 1) + 624 ((sc->rxchainmask >> 1) & 1) + 625 ((sc->rxchainmask >> 0) & 1); 626 if (bootverbose) { 627 device_printf(dev, "MIMO %dT%dR, %.4s, address %s\n", 628 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 629 kether_ntoa(macaddr, ethstr)); 630 } 631 632 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 633 #if notyet 634 ic->ic_rxstream = sc->nrxchains; 635 ic->ic_txstream = sc->ntxchains; 636 #endif 637 638 /* 639 * The NICs we currently support cap out at 2x2 support 640 * separate from the chains being used. 641 * 642 * This is a total hack to work around that until some 643 * per-device method is implemented to return the 644 * actual stream support. 645 * 646 * XXX Note: the 5350 is a 3x3 device; so we shouldn't 647 * cap this! But, anything that touches rates in the 648 * driver needs to be audited first before 3x3 is enabled. 649 */ 650 #if notyet 651 if (ic->ic_rxstream > 2) 652 ic->ic_rxstream = 2; 653 if (ic->ic_txstream > 2) 654 ic->ic_txstream = 2; 655 #endif 656 657 ic->ic_htcaps = 658 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 659 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 660 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 661 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 662 #ifdef notyet 663 | IEEE80211_HTCAP_GREENFIELD 664 #if IWN_RBUF_SIZE == 8192 665 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 666 #else 667 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 668 #endif 669 #endif 670 /* s/w capabilities */ 671 | IEEE80211_HTC_HT /* HT operation */ 672 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 673 #ifdef notyet 674 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 675 #endif 676 ; 677 } 678 679 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 680 ifp->if_softc = sc; 681 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 682 ifp->if_init = iwn_init; 683 ifp->if_ioctl = iwn_ioctl; 684 ifp->if_start = iwn_start; 685 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN); 686 #ifdef notyet 687 ifq_set_ready(&ifp->if_snd); 688 #endif 689 690 ieee80211_ifattach(ic, macaddr); 691 ic->ic_vap_create = iwn_vap_create; 692 ic->ic_vap_delete = iwn_vap_delete; 693 ic->ic_raw_xmit = iwn_raw_xmit; 694 ic->ic_node_alloc = iwn_node_alloc; 695 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 696 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 697 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 698 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 699 sc->sc_addba_request = ic->ic_addba_request; 700 ic->ic_addba_request = iwn_addba_request; 701 sc->sc_addba_response = ic->ic_addba_response; 702 ic->ic_addba_response = iwn_addba_response; 703 sc->sc_addba_stop = ic->ic_addba_stop; 704 ic->ic_addba_stop = iwn_ampdu_tx_stop; 705 ic->ic_newassoc = iwn_newassoc; 706 ic->ic_wme.wme_update = iwn_updateedca; 707 ic->ic_update_mcast = iwn_update_mcast; 708 ic->ic_scan_start = iwn_scan_start; 709 ic->ic_scan_end = iwn_scan_end; 710 ic->ic_set_channel = iwn_set_channel; 711 ic->ic_scan_curchan = iwn_scan_curchan; 712 ic->ic_scan_mindwell = iwn_scan_mindwell; 713 ic->ic_setregdomain = iwn_setregdomain; 714 715 iwn_radiotap_attach(sc); 716 717 callout_init(&sc->calib_to); 718 callout_init(&sc->watchdog_to); 719 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset_task, sc); 720 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on_task, sc); 721 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off_task, sc); 722 723 iwn_sysctlattach(sc); 724 725 /* 726 * Hook our interrupt after all initialization is complete. 727 */ 728 error = bus_setup_intr(dev, sc->irq, INTR_MPSAFE, 729 iwn_intr, sc, &sc->sc_ih, 730 &wlan_global_serializer); 731 if (error != 0) { 732 device_printf(dev, "can't establish interrupt, error %d\n", 733 error); 734 goto fail; 735 } 736 737 if (bootverbose) 738 ieee80211_announce(ic); 739 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 740 wlan_serialize_exit(); 741 return 0; 742 fail: 743 wlan_serialize_exit(); 744 iwn_pci_detach(dev); 745 wlan_serialize_enter(); 746 fail2: 747 wlan_serialize_exit(); 748 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 749 return error; 750 } 751 752 /* 753 * Define specific configuration based on device id and subdevice id 754 * pid : PCI device id 755 */ 756 static int 757 iwn_config_specific(struct iwn_softc *sc, uint16_t pid) 758 { 759 760 switch (pid) { 761 /* 4965 series */ 762 case IWN_DID_4965_1: 763 case IWN_DID_4965_2: 764 case IWN_DID_4965_3: 765 case IWN_DID_4965_4: 766 sc->base_params = &iwn4965_base_params; 767 sc->limits = &iwn4965_sensitivity_limits; 768 sc->fwname = "iwn4965fw"; 769 /* Override chains masks, ROM is known to be broken. */ 770 sc->txchainmask = IWN_ANT_AB; 771 sc->rxchainmask = IWN_ANT_ABC; 772 /* Enable normal btcoex */ 773 sc->sc_flags |= IWN_FLAG_BTCOEX; 774 break; 775 /* 1000 Series */ 776 case IWN_DID_1000_1: 777 case IWN_DID_1000_2: 778 switch(sc->subdevice_id) { 779 case IWN_SDID_1000_1: 780 case IWN_SDID_1000_2: 781 case IWN_SDID_1000_3: 782 case IWN_SDID_1000_4: 783 case IWN_SDID_1000_5: 784 case IWN_SDID_1000_6: 785 case IWN_SDID_1000_7: 786 case IWN_SDID_1000_8: 787 case IWN_SDID_1000_9: 788 case IWN_SDID_1000_10: 789 case IWN_SDID_1000_11: 790 case IWN_SDID_1000_12: 791 sc->limits = &iwn1000_sensitivity_limits; 792 sc->base_params = &iwn1000_base_params; 793 sc->fwname = "iwn1000fw"; 794 break; 795 default: 796 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 797 "0x%04x rev %d not supported (subdevice)\n", pid, 798 sc->subdevice_id,sc->hw_type); 799 return ENOTSUP; 800 } 801 break; 802 /* 6x00 Series */ 803 case IWN_DID_6x00_2: 804 case IWN_DID_6x00_4: 805 case IWN_DID_6x00_1: 806 case IWN_DID_6x00_3: 807 sc->fwname = "iwn6000fw"; 808 sc->limits = &iwn6000_sensitivity_limits; 809 switch(sc->subdevice_id) { 810 case IWN_SDID_6x00_1: 811 case IWN_SDID_6x00_2: 812 case IWN_SDID_6x00_8: 813 //iwl6000_3agn_cfg 814 sc->base_params = &iwn_6000_base_params; 815 break; 816 case IWN_SDID_6x00_3: 817 case IWN_SDID_6x00_6: 818 case IWN_SDID_6x00_9: 819 ////iwl6000i_2agn 820 case IWN_SDID_6x00_4: 821 case IWN_SDID_6x00_7: 822 case IWN_SDID_6x00_10: 823 //iwl6000i_2abg_cfg 824 case IWN_SDID_6x00_5: 825 //iwl6000i_2bg_cfg 826 sc->base_params = &iwn_6000i_base_params; 827 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 828 sc->txchainmask = IWN_ANT_BC; 829 sc->rxchainmask = IWN_ANT_BC; 830 break; 831 default: 832 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 833 "0x%04x rev %d not supported (subdevice)\n", pid, 834 sc->subdevice_id,sc->hw_type); 835 return ENOTSUP; 836 } 837 break; 838 /* 6x05 Series */ 839 case IWN_DID_6x05_1: 840 case IWN_DID_6x05_2: 841 switch(sc->subdevice_id) { 842 case IWN_SDID_6x05_1: 843 case IWN_SDID_6x05_4: 844 case IWN_SDID_6x05_6: 845 //iwl6005_2agn_cfg 846 case IWN_SDID_6x05_2: 847 case IWN_SDID_6x05_5: 848 case IWN_SDID_6x05_7: 849 //iwl6005_2abg_cfg 850 case IWN_SDID_6x05_3: 851 //iwl6005_2bg_cfg 852 case IWN_SDID_6x05_8: 853 case IWN_SDID_6x05_9: 854 //iwl6005_2agn_sff_cfg 855 case IWN_SDID_6x05_10: 856 //iwl6005_2agn_d_cfg 857 case IWN_SDID_6x05_11: 858 //iwl6005_2agn_mow1_cfg 859 case IWN_SDID_6x05_12: 860 //iwl6005_2agn_mow2_cfg 861 sc->fwname = "iwn6000g2afw"; 862 sc->limits = &iwn6000_sensitivity_limits; 863 sc->base_params = &iwn_6000g2_base_params; 864 break; 865 default: 866 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 867 "0x%04x rev %d not supported (subdevice)\n", pid, 868 sc->subdevice_id,sc->hw_type); 869 return ENOTSUP; 870 } 871 break; 872 /* 6x35 Series */ 873 case IWN_DID_6035_1: 874 case IWN_DID_6035_2: 875 switch(sc->subdevice_id) { 876 case IWN_SDID_6035_1: 877 case IWN_SDID_6035_2: 878 case IWN_SDID_6035_3: 879 case IWN_SDID_6035_4: 880 sc->fwname = "iwn6000g2bfw"; 881 sc->limits = &iwn6000_sensitivity_limits; 882 sc->base_params = &iwn_6000g2b_base_params; 883 break; 884 default: 885 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 886 "0x%04x rev %d not supported (subdevice)\n", pid, 887 sc->subdevice_id,sc->hw_type); 888 return ENOTSUP; 889 } 890 break; 891 /* 6x50 WiFi/WiMax Series */ 892 case IWN_DID_6050_1: 893 case IWN_DID_6050_2: 894 switch(sc->subdevice_id) { 895 case IWN_SDID_6050_1: 896 case IWN_SDID_6050_3: 897 case IWN_SDID_6050_5: 898 //iwl6050_2agn_cfg 899 case IWN_SDID_6050_2: 900 case IWN_SDID_6050_4: 901 case IWN_SDID_6050_6: 902 //iwl6050_2abg_cfg 903 sc->fwname = "iwn6050fw"; 904 sc->txchainmask = IWN_ANT_AB; 905 sc->rxchainmask = IWN_ANT_AB; 906 sc->limits = &iwn6000_sensitivity_limits; 907 sc->base_params = &iwn_6050_base_params; 908 break; 909 default: 910 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 911 "0x%04x rev %d not supported (subdevice)\n", pid, 912 sc->subdevice_id,sc->hw_type); 913 return ENOTSUP; 914 } 915 break; 916 /* 6150 WiFi/WiMax Series */ 917 case IWN_DID_6150_1: 918 case IWN_DID_6150_2: 919 switch(sc->subdevice_id) { 920 case IWN_SDID_6150_1: 921 case IWN_SDID_6150_3: 922 case IWN_SDID_6150_5: 923 // iwl6150_bgn_cfg 924 case IWN_SDID_6150_2: 925 case IWN_SDID_6150_4: 926 case IWN_SDID_6150_6: 927 //iwl6150_bg_cfg 928 sc->fwname = "iwn6050fw"; 929 sc->limits = &iwn6000_sensitivity_limits; 930 sc->base_params = &iwn_6150_base_params; 931 break; 932 default: 933 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 934 "0x%04x rev %d not supported (subdevice)\n", pid, 935 sc->subdevice_id,sc->hw_type); 936 return ENOTSUP; 937 } 938 break; 939 /* 6030 Series and 1030 Series */ 940 case IWN_DID_x030_1: 941 case IWN_DID_x030_2: 942 case IWN_DID_x030_3: 943 case IWN_DID_x030_4: 944 switch(sc->subdevice_id) { 945 case IWN_SDID_x030_1: 946 case IWN_SDID_x030_3: 947 case IWN_SDID_x030_5: 948 // iwl1030_bgn_cfg 949 case IWN_SDID_x030_2: 950 case IWN_SDID_x030_4: 951 case IWN_SDID_x030_6: 952 //iwl1030_bg_cfg 953 case IWN_SDID_x030_7: 954 case IWN_SDID_x030_10: 955 case IWN_SDID_x030_14: 956 //iwl6030_2agn_cfg 957 case IWN_SDID_x030_8: 958 case IWN_SDID_x030_11: 959 case IWN_SDID_x030_15: 960 // iwl6030_2bgn_cfg 961 case IWN_SDID_x030_9: 962 case IWN_SDID_x030_12: 963 case IWN_SDID_x030_16: 964 // iwl6030_2abg_cfg 965 case IWN_SDID_x030_13: 966 //iwl6030_2bg_cfg 967 sc->fwname = "iwn6000g2bfw"; 968 sc->limits = &iwn6000_sensitivity_limits; 969 sc->base_params = &iwn_6000g2b_base_params; 970 break; 971 default: 972 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 973 "0x%04x rev %d not supported (subdevice)\n", pid, 974 sc->subdevice_id,sc->hw_type); 975 return ENOTSUP; 976 } 977 break; 978 /* 130 Series WiFi */ 979 /* XXX: This series will need adjustment for rate. 980 * see rx_with_siso_diversity in linux kernel 981 */ 982 case IWN_DID_130_1: 983 case IWN_DID_130_2: 984 switch(sc->subdevice_id) { 985 case IWN_SDID_130_1: 986 case IWN_SDID_130_3: 987 case IWN_SDID_130_5: 988 //iwl130_bgn_cfg 989 case IWN_SDID_130_2: 990 case IWN_SDID_130_4: 991 case IWN_SDID_130_6: 992 //iwl130_bg_cfg 993 sc->fwname = "iwn6000g2bfw"; 994 sc->limits = &iwn6000_sensitivity_limits; 995 sc->base_params = &iwn_6000g2b_base_params; 996 break; 997 default: 998 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 999 "0x%04x rev %d not supported (subdevice)\n", pid, 1000 sc->subdevice_id,sc->hw_type); 1001 return ENOTSUP; 1002 } 1003 break; 1004 /* 100 Series WiFi */ 1005 case IWN_DID_100_1: 1006 case IWN_DID_100_2: 1007 switch(sc->subdevice_id) { 1008 case IWN_SDID_100_1: 1009 case IWN_SDID_100_2: 1010 case IWN_SDID_100_3: 1011 case IWN_SDID_100_4: 1012 case IWN_SDID_100_5: 1013 case IWN_SDID_100_6: 1014 sc->limits = &iwn1000_sensitivity_limits; 1015 sc->base_params = &iwn1000_base_params; 1016 sc->fwname = "iwn100fw"; 1017 break; 1018 default: 1019 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1020 "0x%04x rev %d not supported (subdevice)\n", pid, 1021 sc->subdevice_id,sc->hw_type); 1022 return ENOTSUP; 1023 } 1024 break; 1025 1026 /* 2x00 Series */ 1027 case IWN_DID_2x00_1: 1028 case IWN_DID_2x00_2: 1029 switch(sc->subdevice_id) { 1030 case IWN_SDID_2x00_1: 1031 case IWN_SDID_2x00_2: 1032 case IWN_SDID_2x00_3: 1033 //iwl2000_2bgn_cfg 1034 case IWN_SDID_2x00_4: 1035 //iwl2000_2bgn_d_cfg 1036 sc->limits = &iwn2030_sensitivity_limits; 1037 sc->base_params = &iwn2000_base_params; 1038 sc->fwname = "iwn2000fw"; 1039 break; 1040 default: 1041 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1042 "0x%04x rev %d not supported (subdevice) \n", 1043 pid, sc->subdevice_id, sc->hw_type); 1044 return ENOTSUP; 1045 } 1046 break; 1047 /* 2x30 Series */ 1048 case IWN_DID_2x30_1: 1049 case IWN_DID_2x30_2: 1050 switch(sc->subdevice_id) { 1051 case IWN_SDID_2x30_1: 1052 case IWN_SDID_2x30_3: 1053 case IWN_SDID_2x30_5: 1054 //iwl100_bgn_cfg 1055 case IWN_SDID_2x30_2: 1056 case IWN_SDID_2x30_4: 1057 case IWN_SDID_2x30_6: 1058 //iwl100_bg_cfg 1059 sc->limits = &iwn2030_sensitivity_limits; 1060 sc->base_params = &iwn2030_base_params; 1061 sc->fwname = "iwn2030fw"; 1062 break; 1063 default: 1064 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1065 "0x%04x rev %d not supported (subdevice)\n", pid, 1066 sc->subdevice_id,sc->hw_type); 1067 return ENOTSUP; 1068 } 1069 break; 1070 /* 5x00 Series */ 1071 case IWN_DID_5x00_1: 1072 case IWN_DID_5x00_2: 1073 case IWN_DID_5x00_3: 1074 case IWN_DID_5x00_4: 1075 sc->limits = &iwn5000_sensitivity_limits; 1076 sc->base_params = &iwn5000_base_params; 1077 sc->fwname = "iwn5000fw"; 1078 switch(sc->subdevice_id) { 1079 case IWN_SDID_5x00_1: 1080 case IWN_SDID_5x00_2: 1081 case IWN_SDID_5x00_3: 1082 case IWN_SDID_5x00_4: 1083 case IWN_SDID_5x00_9: 1084 case IWN_SDID_5x00_10: 1085 case IWN_SDID_5x00_11: 1086 case IWN_SDID_5x00_12: 1087 case IWN_SDID_5x00_17: 1088 case IWN_SDID_5x00_18: 1089 case IWN_SDID_5x00_19: 1090 case IWN_SDID_5x00_20: 1091 //iwl5100_agn_cfg 1092 sc->txchainmask = IWN_ANT_B; 1093 sc->rxchainmask = IWN_ANT_AB; 1094 break; 1095 case IWN_SDID_5x00_5: 1096 case IWN_SDID_5x00_6: 1097 case IWN_SDID_5x00_13: 1098 case IWN_SDID_5x00_14: 1099 case IWN_SDID_5x00_21: 1100 case IWN_SDID_5x00_22: 1101 //iwl5100_bgn_cfg 1102 sc->txchainmask = IWN_ANT_B; 1103 sc->rxchainmask = IWN_ANT_AB; 1104 break; 1105 case IWN_SDID_5x00_7: 1106 case IWN_SDID_5x00_8: 1107 case IWN_SDID_5x00_15: 1108 case IWN_SDID_5x00_16: 1109 case IWN_SDID_5x00_23: 1110 case IWN_SDID_5x00_24: 1111 //iwl5100_abg_cfg 1112 sc->txchainmask = IWN_ANT_B; 1113 sc->rxchainmask = IWN_ANT_AB; 1114 break; 1115 case IWN_SDID_5x00_25: 1116 case IWN_SDID_5x00_26: 1117 case IWN_SDID_5x00_27: 1118 case IWN_SDID_5x00_28: 1119 case IWN_SDID_5x00_29: 1120 case IWN_SDID_5x00_30: 1121 case IWN_SDID_5x00_31: 1122 case IWN_SDID_5x00_32: 1123 case IWN_SDID_5x00_33: 1124 case IWN_SDID_5x00_34: 1125 case IWN_SDID_5x00_35: 1126 case IWN_SDID_5x00_36: 1127 //iwl5300_agn_cfg 1128 sc->txchainmask = IWN_ANT_ABC; 1129 sc->rxchainmask = IWN_ANT_ABC; 1130 break; 1131 default: 1132 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1133 "0x%04x rev %d not supported (subdevice)\n", pid, 1134 sc->subdevice_id,sc->hw_type); 1135 return ENOTSUP; 1136 } 1137 break; 1138 /* 5x50 Series */ 1139 case IWN_DID_5x50_1: 1140 case IWN_DID_5x50_2: 1141 case IWN_DID_5x50_3: 1142 case IWN_DID_5x50_4: 1143 sc->limits = &iwn5000_sensitivity_limits; 1144 sc->base_params = &iwn5000_base_params; 1145 sc->fwname = "iwn5000fw"; 1146 switch(sc->subdevice_id) { 1147 case IWN_SDID_5x50_1: 1148 case IWN_SDID_5x50_2: 1149 case IWN_SDID_5x50_3: 1150 //iwl5350_agn_cfg 1151 sc->limits = &iwn5000_sensitivity_limits; 1152 sc->base_params = &iwn5000_base_params; 1153 sc->fwname = "iwn5000fw"; 1154 break; 1155 case IWN_SDID_5x50_4: 1156 case IWN_SDID_5x50_5: 1157 case IWN_SDID_5x50_8: 1158 case IWN_SDID_5x50_9: 1159 case IWN_SDID_5x50_10: 1160 case IWN_SDID_5x50_11: 1161 //iwl5150_agn_cfg 1162 case IWN_SDID_5x50_6: 1163 case IWN_SDID_5x50_7: 1164 case IWN_SDID_5x50_12: 1165 case IWN_SDID_5x50_13: 1166 //iwl5150_abg_cfg 1167 sc->limits = &iwn5000_sensitivity_limits; 1168 sc->fwname = "iwn5150fw"; 1169 sc->base_params = &iwn_5x50_base_params; 1170 break; 1171 default: 1172 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1173 "0x%04x rev %d not supported (subdevice)\n", pid, 1174 sc->subdevice_id,sc->hw_type); 1175 return ENOTSUP; 1176 } 1177 break; 1178 default: 1179 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" 1180 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, 1181 sc->hw_type); 1182 return ENOTSUP; 1183 } 1184 return 0; 1185 } 1186 1187 static int 1188 iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 1189 { 1190 struct iwn_ops *ops = &sc->ops; 1191 1192 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1193 ops->load_firmware = iwn4965_load_firmware; 1194 ops->read_eeprom = iwn4965_read_eeprom; 1195 ops->post_alive = iwn4965_post_alive; 1196 ops->nic_config = iwn4965_nic_config; 1197 ops->update_sched = iwn4965_update_sched; 1198 ops->get_temperature = iwn4965_get_temperature; 1199 ops->get_rssi = iwn4965_get_rssi; 1200 ops->set_txpower = iwn4965_set_txpower; 1201 ops->init_gains = iwn4965_init_gains; 1202 ops->set_gains = iwn4965_set_gains; 1203 ops->add_node = iwn4965_add_node; 1204 ops->tx_done = iwn4965_tx_done; 1205 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 1206 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 1207 sc->ntxqs = IWN4965_NTXQUEUES; 1208 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 1209 sc->ndmachnls = IWN4965_NDMACHNLS; 1210 sc->broadcast_id = IWN4965_ID_BROADCAST; 1211 sc->rxonsz = IWN4965_RXONSZ; 1212 sc->schedsz = IWN4965_SCHEDSZ; 1213 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 1214 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 1215 sc->fwsz = IWN4965_FWSZ; 1216 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 1217 sc->limits = &iwn4965_sensitivity_limits; 1218 sc->fwname = "iwn4965fw"; 1219 /* Override chains masks, ROM is known to be broken. */ 1220 sc->txchainmask = IWN_ANT_AB; 1221 sc->rxchainmask = IWN_ANT_ABC; 1222 /* Enable normal btcoex */ 1223 sc->sc_flags |= IWN_FLAG_BTCOEX; 1224 1225 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 1226 1227 return 0; 1228 } 1229 1230 static int 1231 iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 1232 { 1233 struct iwn_ops *ops = &sc->ops; 1234 1235 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1236 1237 ops->load_firmware = iwn5000_load_firmware; 1238 ops->read_eeprom = iwn5000_read_eeprom; 1239 ops->post_alive = iwn5000_post_alive; 1240 ops->nic_config = iwn5000_nic_config; 1241 ops->update_sched = iwn5000_update_sched; 1242 ops->get_temperature = iwn5000_get_temperature; 1243 ops->get_rssi = iwn5000_get_rssi; 1244 ops->set_txpower = iwn5000_set_txpower; 1245 ops->init_gains = iwn5000_init_gains; 1246 ops->set_gains = iwn5000_set_gains; 1247 ops->add_node = iwn5000_add_node; 1248 ops->tx_done = iwn5000_tx_done; 1249 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 1250 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 1251 sc->ntxqs = IWN5000_NTXQUEUES; 1252 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 1253 sc->ndmachnls = IWN5000_NDMACHNLS; 1254 sc->broadcast_id = IWN5000_ID_BROADCAST; 1255 sc->rxonsz = IWN5000_RXONSZ; 1256 sc->schedsz = IWN5000_SCHEDSZ; 1257 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 1258 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 1259 sc->fwsz = IWN5000_FWSZ; 1260 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 1261 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 1262 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 1263 1264 return 0; 1265 } 1266 1267 /* 1268 * Attach the interface to 802.11 radiotap. 1269 */ 1270 static void 1271 iwn_radiotap_attach(struct iwn_softc *sc) 1272 { 1273 struct ifnet *ifp = sc->sc_ifp; 1274 struct ieee80211com *ic = ifp->if_l2com; 1275 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1276 ieee80211_radiotap_attach(ic, 1277 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 1278 IWN_TX_RADIOTAP_PRESENT, 1279 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 1280 IWN_RX_RADIOTAP_PRESENT); 1281 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1282 } 1283 1284 static void 1285 iwn_sysctlattach(struct iwn_softc *sc) 1286 { 1287 #ifdef IWN_DEBUG 1288 struct sysctl_ctx_list *ctx; 1289 struct sysctl_oid *tree; 1290 1291 ctx = &sc->sc_sysctl_ctx; 1292 tree = sc->sc_sysctl_tree; 1293 1294 if (tree) { 1295 device_printf(sc->sc_dev, "can't add sysctl node\n"); 1296 return; 1297 } 1298 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1299 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 1300 "control debugging printfs"); 1301 #endif 1302 } 1303 1304 static struct ieee80211vap * 1305 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1306 enum ieee80211_opmode opmode, int flags, 1307 const uint8_t bssid[IEEE80211_ADDR_LEN], 1308 const uint8_t mac[IEEE80211_ADDR_LEN]) 1309 { 1310 struct iwn_vap *ivp; 1311 struct ieee80211vap *vap; 1312 uint8_t mac1[IEEE80211_ADDR_LEN]; 1313 struct iwn_softc *sc = ic->ic_ifp->if_softc; 1314 1315 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 1316 return NULL; 1317 1318 IEEE80211_ADDR_COPY(mac1, mac); 1319 1320 ivp = kmalloc(sizeof(struct iwn_vap), M_80211_VAP, M_INTWAIT | M_ZERO); 1321 vap = &ivp->iv_vap; 1322 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1); 1323 ivp->ctx = IWN_RXON_BSS_CTX; 1324 IEEE80211_ADDR_COPY(ivp->macaddr, mac1); 1325 vap->iv_bmissthreshold = 10; /* override default */ 1326 /* Override with driver methods. */ 1327 ivp->iv_newstate = vap->iv_newstate; 1328 vap->iv_newstate = iwn_newstate; 1329 sc->ivap[IWN_RXON_BSS_CTX] = vap; 1330 1331 ieee80211_ratectl_init(vap); 1332 /* Complete setup. */ 1333 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 1334 ic->ic_opmode = opmode; 1335 return vap; 1336 } 1337 1338 static void 1339 iwn_vap_delete(struct ieee80211vap *vap) 1340 { 1341 struct iwn_vap *ivp = IWN_VAP(vap); 1342 1343 ieee80211_ratectl_deinit(vap); 1344 ieee80211_vap_detach(vap); 1345 kfree(ivp, M_80211_VAP); 1346 } 1347 1348 static int 1349 iwn_pci_detach(device_t dev) 1350 { 1351 struct iwn_softc *sc = device_get_softc(dev); 1352 struct ifnet *ifp = sc->sc_ifp; 1353 struct ieee80211com *ic; 1354 int qid; 1355 1356 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1357 1358 wlan_serialize_enter(); 1359 1360 if (ifp != NULL) { 1361 ic = ifp->if_l2com; 1362 1363 ieee80211_draintask(ic, &sc->sc_reinit_task); 1364 ieee80211_draintask(ic, &sc->sc_radioon_task); 1365 ieee80211_draintask(ic, &sc->sc_radiooff_task); 1366 1367 iwn_stop_locked(sc); 1368 callout_stop(&sc->watchdog_to); 1369 callout_stop(&sc->calib_to); 1370 ieee80211_ifdetach(ic); 1371 } 1372 1373 /* cleanup sysctl nodes */ 1374 sysctl_ctx_free(&sc->sc_sysctl_ctx); 1375 1376 /* Uninstall interrupt handler. */ 1377 if (sc->irq != NULL) { 1378 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 1379 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); 1380 if (sc->irq_rid == 1) 1381 pci_release_msi(dev); 1382 sc->irq = NULL; 1383 } 1384 1385 /* Free DMA resources. */ 1386 iwn_free_rx_ring(sc, &sc->rxq); 1387 for (qid = 0; qid < sc->ntxqs; qid++) 1388 iwn_free_tx_ring(sc, &sc->txq[qid]); 1389 iwn_free_sched(sc); 1390 iwn_free_kw(sc); 1391 if (sc->ict != NULL) { 1392 iwn_free_ict(sc); 1393 sc->ict = NULL; 1394 } 1395 iwn_free_fwmem(sc); 1396 1397 if (sc->mem != NULL) { 1398 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); 1399 sc->mem = NULL; 1400 } 1401 1402 if (ifp != NULL) { 1403 if_free(ifp); 1404 sc->sc_ifp = NULL; 1405 } 1406 1407 bus_dma_tag_destroy(sc->sc_dmat); 1408 1409 wlan_serialize_exit(); 1410 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); 1411 return 0; 1412 } 1413 1414 static int 1415 iwn_pci_shutdown(device_t dev) 1416 { 1417 struct iwn_softc *sc = device_get_softc(dev); 1418 1419 wlan_serialize_enter(); 1420 iwn_stop_locked(sc); 1421 wlan_serialize_exit(); 1422 1423 return 0; 1424 } 1425 1426 static int 1427 iwn_pci_suspend(device_t dev) 1428 { 1429 struct iwn_softc *sc = device_get_softc(dev); 1430 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1431 1432 ieee80211_suspend_all(ic); 1433 return 0; 1434 } 1435 1436 static int 1437 iwn_pci_resume(device_t dev) 1438 { 1439 struct iwn_softc *sc = device_get_softc(dev); 1440 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1441 1442 /* Clear device-specific "PCI retry timeout" register (41h). */ 1443 pci_write_config(dev, 0x41, 0, 1); 1444 1445 ieee80211_resume_all(ic); 1446 return 0; 1447 } 1448 1449 static int 1450 iwn_nic_lock(struct iwn_softc *sc) 1451 { 1452 int ntries; 1453 1454 /* Request exclusive access to NIC. */ 1455 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1456 1457 /* Spin until we actually get the lock. */ 1458 for (ntries = 0; ntries < 1000; ntries++) { 1459 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1460 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1461 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1462 return 0; 1463 DELAY(10); 1464 } 1465 return ETIMEDOUT; 1466 } 1467 1468 static __inline void 1469 iwn_nic_unlock(struct iwn_softc *sc) 1470 { 1471 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1472 } 1473 1474 static __inline uint32_t 1475 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1476 { 1477 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1478 IWN_BARRIER_READ_WRITE(sc); 1479 return IWN_READ(sc, IWN_PRPH_RDATA); 1480 } 1481 1482 static __inline void 1483 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1484 { 1485 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1486 IWN_BARRIER_WRITE(sc); 1487 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1488 } 1489 1490 static __inline void 1491 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1492 { 1493 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1494 } 1495 1496 static __inline void 1497 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1498 { 1499 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1500 } 1501 1502 static __inline void 1503 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1504 const uint32_t *data, int count) 1505 { 1506 for (; count > 0; count--, data++, addr += 4) 1507 iwn_prph_write(sc, addr, *data); 1508 } 1509 1510 static __inline uint32_t 1511 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1512 { 1513 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1514 IWN_BARRIER_READ_WRITE(sc); 1515 return IWN_READ(sc, IWN_MEM_RDATA); 1516 } 1517 1518 static __inline void 1519 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1520 { 1521 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1522 IWN_BARRIER_WRITE(sc); 1523 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1524 } 1525 1526 static __inline void 1527 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1528 { 1529 uint32_t tmp; 1530 1531 tmp = iwn_mem_read(sc, addr & ~3); 1532 if (addr & 3) 1533 tmp = (tmp & 0x0000ffff) | data << 16; 1534 else 1535 tmp = (tmp & 0xffff0000) | data; 1536 iwn_mem_write(sc, addr & ~3, tmp); 1537 } 1538 1539 static __inline void 1540 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1541 int count) 1542 { 1543 for (; count > 0; count--, addr += 4) 1544 *data++ = iwn_mem_read(sc, addr); 1545 } 1546 1547 static __inline void 1548 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1549 int count) 1550 { 1551 for (; count > 0; count--, addr += 4) 1552 iwn_mem_write(sc, addr, val); 1553 } 1554 1555 static int 1556 iwn_eeprom_lock(struct iwn_softc *sc) 1557 { 1558 int i, ntries; 1559 1560 for (i = 0; i < 100; i++) { 1561 /* Request exclusive access to EEPROM. */ 1562 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1563 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1564 1565 /* Spin until we actually get the lock. */ 1566 for (ntries = 0; ntries < 100; ntries++) { 1567 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1568 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1569 return 0; 1570 DELAY(10); 1571 } 1572 } 1573 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1574 return ETIMEDOUT; 1575 } 1576 1577 static __inline void 1578 iwn_eeprom_unlock(struct iwn_softc *sc) 1579 { 1580 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1581 } 1582 1583 /* 1584 * Initialize access by host to One Time Programmable ROM. 1585 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1586 */ 1587 static int 1588 iwn_init_otprom(struct iwn_softc *sc) 1589 { 1590 uint16_t prev, base, next; 1591 int count, error; 1592 1593 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1594 1595 /* Wait for clock stabilization before accessing prph. */ 1596 if ((error = iwn_clock_wait(sc)) != 0) 1597 return error; 1598 1599 if ((error = iwn_nic_lock(sc)) != 0) 1600 return error; 1601 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1602 DELAY(5); 1603 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1604 iwn_nic_unlock(sc); 1605 1606 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1607 if (sc->base_params->shadow_ram_support) { 1608 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1609 IWN_RESET_LINK_PWR_MGMT_DIS); 1610 } 1611 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1612 /* Clear ECC status. */ 1613 IWN_SETBITS(sc, IWN_OTP_GP, 1614 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1615 1616 /* 1617 * Find the block before last block (contains the EEPROM image) 1618 * for HW without OTP shadow RAM. 1619 */ 1620 if (! sc->base_params->shadow_ram_support) { 1621 /* Switch to absolute addressing mode. */ 1622 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1623 base = prev = 0; 1624 for (count = 0; count < sc->base_params->max_ll_items; 1625 count++) { 1626 error = iwn_read_prom_data(sc, base, &next, 2); 1627 if (error != 0) 1628 return error; 1629 if (next == 0) /* End of linked-list. */ 1630 break; 1631 prev = base; 1632 base = le16toh(next); 1633 } 1634 if (count == 0 || count == sc->base_params->max_ll_items) 1635 return EIO; 1636 /* Skip "next" word. */ 1637 sc->prom_base = prev + 1; 1638 } 1639 1640 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1641 1642 return 0; 1643 } 1644 1645 static int 1646 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1647 { 1648 uint8_t *out = data; 1649 uint32_t val, tmp; 1650 int ntries; 1651 1652 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1653 1654 addr += sc->prom_base; 1655 for (; count > 0; count -= 2, addr++) { 1656 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1657 for (ntries = 0; ntries < 10; ntries++) { 1658 val = IWN_READ(sc, IWN_EEPROM); 1659 if (val & IWN_EEPROM_READ_VALID) 1660 break; 1661 DELAY(5); 1662 } 1663 if (ntries == 10) { 1664 device_printf(sc->sc_dev, 1665 "timeout reading ROM at 0x%x\n", addr); 1666 return ETIMEDOUT; 1667 } 1668 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1669 /* OTPROM, check for ECC errors. */ 1670 tmp = IWN_READ(sc, IWN_OTP_GP); 1671 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1672 device_printf(sc->sc_dev, 1673 "OTPROM ECC error at 0x%x\n", addr); 1674 return EIO; 1675 } 1676 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1677 /* Correctable ECC error, clear bit. */ 1678 IWN_SETBITS(sc, IWN_OTP_GP, 1679 IWN_OTP_GP_ECC_CORR_STTS); 1680 } 1681 } 1682 *out++ = val >> 16; 1683 if (count > 1) 1684 *out++ = val >> 24; 1685 } 1686 1687 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1688 1689 return 0; 1690 } 1691 1692 static void 1693 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1694 { 1695 if (error != 0) 1696 return; 1697 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1698 *(bus_addr_t *)arg = segs[0].ds_addr; 1699 } 1700 1701 static int 1702 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1703 void **kvap, bus_size_t size, bus_size_t alignment) 1704 { 1705 int error; 1706 1707 dma->tag = NULL; 1708 dma->size = size; 1709 1710 error = bus_dma_tag_create(sc->sc_dmat, alignment, 1711 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1712 1, size, BUS_DMA_NOWAIT, &dma->tag); 1713 if (error != 0) 1714 goto fail; 1715 1716 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1717 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1718 if (error != 0) 1719 goto fail; 1720 1721 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1722 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1723 if (error != 0) 1724 goto fail; 1725 1726 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1727 1728 if (kvap != NULL) 1729 *kvap = dma->vaddr; 1730 1731 return 0; 1732 1733 fail: iwn_dma_contig_free(dma); 1734 return error; 1735 } 1736 1737 static void 1738 iwn_dma_contig_free(struct iwn_dma_info *dma) 1739 { 1740 if (dma->map != NULL) { 1741 if (dma->vaddr != NULL) { 1742 bus_dmamap_sync(dma->tag, dma->map, 1743 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1744 bus_dmamap_unload(dma->tag, dma->map); 1745 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1746 dma->vaddr = NULL; 1747 } 1748 bus_dmamap_destroy(dma->tag, dma->map); 1749 dma->map = NULL; 1750 } 1751 if (dma->tag != NULL) { 1752 bus_dma_tag_destroy(dma->tag); 1753 dma->tag = NULL; 1754 } 1755 } 1756 1757 static int 1758 iwn_alloc_sched(struct iwn_softc *sc) 1759 { 1760 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1761 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1762 sc->schedsz, 1024); 1763 } 1764 1765 static void 1766 iwn_free_sched(struct iwn_softc *sc) 1767 { 1768 iwn_dma_contig_free(&sc->sched_dma); 1769 } 1770 1771 static int 1772 iwn_alloc_kw(struct iwn_softc *sc) 1773 { 1774 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1775 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1776 } 1777 1778 static void 1779 iwn_free_kw(struct iwn_softc *sc) 1780 { 1781 iwn_dma_contig_free(&sc->kw_dma); 1782 } 1783 1784 static int 1785 iwn_alloc_ict(struct iwn_softc *sc) 1786 { 1787 /* ICT table must be aligned on a 4KB boundary. */ 1788 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1789 IWN_ICT_SIZE, 4096); 1790 } 1791 1792 static void 1793 iwn_free_ict(struct iwn_softc *sc) 1794 { 1795 iwn_dma_contig_free(&sc->ict_dma); 1796 } 1797 1798 static int 1799 iwn_alloc_fwmem(struct iwn_softc *sc) 1800 { 1801 /* Must be aligned on a 16-byte boundary. */ 1802 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1803 } 1804 1805 static void 1806 iwn_free_fwmem(struct iwn_softc *sc) 1807 { 1808 iwn_dma_contig_free(&sc->fw_dma); 1809 } 1810 1811 static int 1812 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1813 { 1814 bus_size_t size; 1815 int i, error; 1816 1817 ring->cur = 0; 1818 1819 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1820 1821 /* Allocate RX descriptors (256-byte aligned). */ 1822 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1823 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1824 size, 256); 1825 if (error != 0) { 1826 device_printf(sc->sc_dev, 1827 "%s: could not allocate RX ring DMA memory, error %d\n", 1828 __func__, error); 1829 goto fail; 1830 } 1831 1832 /* Allocate RX status area (16-byte aligned). */ 1833 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1834 sizeof (struct iwn_rx_status), 16); 1835 if (error != 0) { 1836 device_printf(sc->sc_dev, 1837 "%s: could not allocate RX status DMA memory, error %d\n", 1838 __func__, error); 1839 goto fail; 1840 } 1841 1842 /* Create RX buffer DMA tag. */ 1843 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1844 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1845 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, &ring->data_dmat); 1846 if (error != 0) { 1847 device_printf(sc->sc_dev, 1848 "%s: could not create RX buf DMA tag, error %d\n", 1849 __func__, error); 1850 goto fail; 1851 } 1852 1853 /* 1854 * Allocate and map RX buffers. 1855 */ 1856 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1857 struct iwn_rx_data *data = &ring->data[i]; 1858 bus_addr_t paddr; 1859 1860 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1861 if (error != 0) { 1862 device_printf(sc->sc_dev, 1863 "%s: could not create RX buf DMA map, error %d\n", 1864 __func__, error); 1865 goto fail; 1866 } 1867 1868 data->m = m_getjcl(MB_DONTWAIT, MT_DATA, 1869 M_PKTHDR, IWN_RBUF_SIZE); 1870 if (data->m == NULL) { 1871 device_printf(sc->sc_dev, 1872 "%s: could not allocate RX mbuf\n", __func__); 1873 error = ENOBUFS; 1874 goto fail; 1875 } 1876 1877 error = bus_dmamap_load(ring->data_dmat, data->map, 1878 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1879 &paddr, BUS_DMA_NOWAIT); 1880 if (error != 0 && error != EFBIG) { 1881 device_printf(sc->sc_dev, 1882 "%s: can't not map mbuf, error %d\n", __func__, 1883 error); 1884 goto fail; 1885 } 1886 1887 /* Set physical address of RX buffer (256-byte aligned). */ 1888 ring->desc[i] = htole32(paddr >> 8); 1889 } 1890 1891 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1892 BUS_DMASYNC_PREWRITE); 1893 1894 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1895 1896 return 0; 1897 1898 fail: iwn_free_rx_ring(sc, ring); 1899 1900 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1901 1902 return error; 1903 } 1904 1905 static void 1906 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1907 { 1908 int ntries; 1909 1910 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 1911 1912 if (iwn_nic_lock(sc) == 0) { 1913 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1914 for (ntries = 0; ntries < 1000; ntries++) { 1915 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1916 IWN_FH_RX_STATUS_IDLE) 1917 break; 1918 DELAY(10); 1919 } 1920 iwn_nic_unlock(sc); 1921 } 1922 ring->cur = 0; 1923 sc->last_rx_valid = 0; 1924 } 1925 1926 static void 1927 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1928 { 1929 int i; 1930 1931 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1932 1933 iwn_dma_contig_free(&ring->desc_dma); 1934 iwn_dma_contig_free(&ring->stat_dma); 1935 1936 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1937 struct iwn_rx_data *data = &ring->data[i]; 1938 1939 if (data->m != NULL) { 1940 bus_dmamap_sync(ring->data_dmat, data->map, 1941 BUS_DMASYNC_POSTREAD); 1942 bus_dmamap_unload(ring->data_dmat, data->map); 1943 m_freem(data->m); 1944 data->m = NULL; 1945 } 1946 if (data->map != NULL) 1947 bus_dmamap_destroy(ring->data_dmat, data->map); 1948 } 1949 if (ring->data_dmat != NULL) { 1950 bus_dma_tag_destroy(ring->data_dmat); 1951 ring->data_dmat = NULL; 1952 } 1953 } 1954 1955 static int 1956 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1957 { 1958 bus_addr_t paddr; 1959 bus_size_t size; 1960 int i, error; 1961 1962 ring->qid = qid; 1963 ring->queued = 0; 1964 ring->cur = 0; 1965 1966 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1967 1968 /* Allocate TX descriptors (256-byte aligned). */ 1969 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1970 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1971 size, 256); 1972 if (error != 0) { 1973 device_printf(sc->sc_dev, 1974 "%s: could not allocate TX ring DMA memory, error %d\n", 1975 __func__, error); 1976 goto fail; 1977 } 1978 1979 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1980 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1981 size, 4); 1982 if (error != 0) { 1983 device_printf(sc->sc_dev, 1984 "%s: could not allocate TX cmd DMA memory, error %d\n", 1985 __func__, error); 1986 goto fail; 1987 } 1988 1989 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1990 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1991 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, &ring->data_dmat); 1992 if (error != 0) { 1993 device_printf(sc->sc_dev, 1994 "%s: could not create TX buf DMA tag, error %d\n", 1995 __func__, error); 1996 goto fail; 1997 } 1998 1999 paddr = ring->cmd_dma.paddr; 2000 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2001 struct iwn_tx_data *data = &ring->data[i]; 2002 2003 data->cmd_paddr = paddr; 2004 data->scratch_paddr = paddr + 12; 2005 paddr += sizeof (struct iwn_tx_cmd); 2006 2007 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2008 if (error != 0) { 2009 device_printf(sc->sc_dev, 2010 "%s: could not create TX buf DMA map, error %d\n", 2011 __func__, error); 2012 goto fail; 2013 } 2014 } 2015 2016 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2017 2018 return 0; 2019 2020 fail: iwn_free_tx_ring(sc, ring); 2021 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2022 return error; 2023 } 2024 2025 static void 2026 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2027 { 2028 int i; 2029 2030 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 2031 2032 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2033 struct iwn_tx_data *data = &ring->data[i]; 2034 2035 if (data->m != NULL) { 2036 bus_dmamap_sync(ring->data_dmat, data->map, 2037 BUS_DMASYNC_POSTWRITE); 2038 bus_dmamap_unload(ring->data_dmat, data->map); 2039 m_freem(data->m); 2040 data->m = NULL; 2041 } 2042 } 2043 /* Clear TX descriptors. */ 2044 memset(ring->desc, 0, ring->desc_dma.size); 2045 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2046 BUS_DMASYNC_PREWRITE); 2047 sc->qfullmsk &= ~(1 << ring->qid); 2048 ring->queued = 0; 2049 ring->cur = 0; 2050 } 2051 2052 static void 2053 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2054 { 2055 int i; 2056 2057 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 2058 2059 iwn_dma_contig_free(&ring->desc_dma); 2060 iwn_dma_contig_free(&ring->cmd_dma); 2061 2062 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2063 struct iwn_tx_data *data = &ring->data[i]; 2064 2065 if (data->m != NULL) { 2066 bus_dmamap_sync(ring->data_dmat, data->map, 2067 BUS_DMASYNC_POSTWRITE); 2068 bus_dmamap_unload(ring->data_dmat, data->map); 2069 m_freem(data->m); 2070 } 2071 if (data->map != NULL) 2072 bus_dmamap_destroy(ring->data_dmat, data->map); 2073 } 2074 if (ring->data_dmat != NULL) { 2075 bus_dma_tag_destroy(ring->data_dmat); 2076 ring->data_dmat = NULL; 2077 } 2078 } 2079 2080 static void 2081 iwn5000_ict_reset(struct iwn_softc *sc) 2082 { 2083 /* Disable interrupts. */ 2084 IWN_WRITE(sc, IWN_INT_MASK, 0); 2085 2086 /* Reset ICT table. */ 2087 memset(sc->ict, 0, IWN_ICT_SIZE); 2088 sc->ict_cur = 0; 2089 2090 /* Set physical address of ICT table (4KB aligned). */ 2091 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 2092 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 2093 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 2094 2095 /* Enable periodic RX interrupt. */ 2096 sc->int_mask |= IWN_INT_RX_PERIODIC; 2097 /* Switch to ICT interrupt mode in driver. */ 2098 sc->sc_flags |= IWN_FLAG_USE_ICT; 2099 2100 /* Re-enable interrupts. */ 2101 IWN_WRITE(sc, IWN_INT, 0xffffffff); 2102 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2103 } 2104 2105 static int 2106 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 2107 { 2108 struct iwn_ops *ops = &sc->ops; 2109 uint16_t val; 2110 int error; 2111 2112 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2113 2114 /* Check whether adapter has an EEPROM or an OTPROM. */ 2115 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 2116 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 2117 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 2118 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 2119 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 2120 2121 /* Adapter has to be powered on for EEPROM access to work. */ 2122 if ((error = iwn_apm_init(sc)) != 0) { 2123 device_printf(sc->sc_dev, 2124 "%s: could not power ON adapter, error %d\n", __func__, 2125 error); 2126 return error; 2127 } 2128 2129 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 2130 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 2131 return EIO; 2132 } 2133 if ((error = iwn_eeprom_lock(sc)) != 0) { 2134 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 2135 __func__, error); 2136 return error; 2137 } 2138 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 2139 if ((error = iwn_init_otprom(sc)) != 0) { 2140 device_printf(sc->sc_dev, 2141 "%s: could not initialize OTPROM, error %d\n", 2142 __func__, error); 2143 return error; 2144 } 2145 } 2146 2147 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 2148 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 2149 /* Check if HT support is bonded out. */ 2150 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 2151 sc->sc_flags |= IWN_FLAG_HAS_11N; 2152 2153 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 2154 sc->rfcfg = le16toh(val); 2155 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 2156 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 2157 if (sc->txchainmask == 0) 2158 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 2159 if (sc->rxchainmask == 0) 2160 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 2161 2162 /* Read MAC address. */ 2163 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 2164 2165 /* Read adapter-specific information from EEPROM. */ 2166 ops->read_eeprom(sc); 2167 2168 iwn_apm_stop(sc); /* Power OFF adapter. */ 2169 2170 iwn_eeprom_unlock(sc); 2171 2172 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2173 2174 return 0; 2175 } 2176 2177 static void 2178 iwn4965_read_eeprom(struct iwn_softc *sc) 2179 { 2180 uint32_t addr; 2181 uint16_t val; 2182 int i; 2183 2184 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2185 2186 /* Read regulatory domain (4 ASCII characters). */ 2187 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 2188 2189 /* Read the list of authorized channels (20MHz ones only). */ 2190 for (i = 0; i < IWN_NBANDS - 1; i++) { 2191 addr = iwn4965_regulatory_bands[i]; 2192 iwn_read_eeprom_channels(sc, i, addr); 2193 } 2194 2195 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 2196 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 2197 sc->maxpwr2GHz = val & 0xff; 2198 sc->maxpwr5GHz = val >> 8; 2199 /* Check that EEPROM values are within valid range. */ 2200 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 2201 sc->maxpwr5GHz = 38; 2202 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 2203 sc->maxpwr2GHz = 38; 2204 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 2205 sc->maxpwr2GHz, sc->maxpwr5GHz); 2206 2207 /* Read samples for each TX power group. */ 2208 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 2209 sizeof sc->bands); 2210 2211 /* Read voltage at which samples were taken. */ 2212 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 2213 sc->eeprom_voltage = (int16_t)le16toh(val); 2214 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 2215 sc->eeprom_voltage); 2216 2217 #ifdef IWN_DEBUG 2218 /* Print samples. */ 2219 if (sc->sc_debug & IWN_DEBUG_ANY) { 2220 for (i = 0; i < IWN_NBANDS - 1; i++) 2221 iwn4965_print_power_group(sc, i); 2222 } 2223 #endif 2224 2225 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2226 } 2227 2228 #ifdef IWN_DEBUG 2229 static void 2230 iwn4965_print_power_group(struct iwn_softc *sc, int i) 2231 { 2232 struct iwn4965_eeprom_band *band = &sc->bands[i]; 2233 struct iwn4965_eeprom_chan_samples *chans = band->chans; 2234 int j, c; 2235 2236 kprintf("===band %d===\n", i); 2237 kprintf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 2238 kprintf("chan1 num=%d\n", chans[0].num); 2239 for (c = 0; c < 2; c++) { 2240 for (j = 0; j < IWN_NSAMPLES; j++) { 2241 kprintf("chain %d, sample %d: temp=%d gain=%d " 2242 "power=%d pa_det=%d\n", c, j, 2243 chans[0].samples[c][j].temp, 2244 chans[0].samples[c][j].gain, 2245 chans[0].samples[c][j].power, 2246 chans[0].samples[c][j].pa_det); 2247 } 2248 } 2249 kprintf("chan2 num=%d\n", chans[1].num); 2250 for (c = 0; c < 2; c++) { 2251 for (j = 0; j < IWN_NSAMPLES; j++) { 2252 kprintf("chain %d, sample %d: temp=%d gain=%d " 2253 "power=%d pa_det=%d\n", c, j, 2254 chans[1].samples[c][j].temp, 2255 chans[1].samples[c][j].gain, 2256 chans[1].samples[c][j].power, 2257 chans[1].samples[c][j].pa_det); 2258 } 2259 } 2260 } 2261 #endif 2262 2263 static void 2264 iwn5000_read_eeprom(struct iwn_softc *sc) 2265 { 2266 struct iwn5000_eeprom_calib_hdr hdr; 2267 int32_t volt; 2268 uint32_t base, addr; 2269 uint16_t val; 2270 int i; 2271 2272 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2273 2274 /* Read regulatory domain (4 ASCII characters). */ 2275 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2276 base = le16toh(val); 2277 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 2278 sc->eeprom_domain, 4); 2279 2280 /* Read the list of authorized channels (20MHz ones only). */ 2281 for (i = 0; i < IWN_NBANDS - 1; i++) { 2282 addr = base + sc->base_params->regulatory_bands[i]; 2283 iwn_read_eeprom_channels(sc, i, addr); 2284 } 2285 2286 /* Read enhanced TX power information for 6000 Series. */ 2287 if (sc->base_params->enhanced_TX_power) 2288 iwn_read_eeprom_enhinfo(sc); 2289 2290 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 2291 base = le16toh(val); 2292 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 2293 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2294 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 2295 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 2296 sc->calib_ver = hdr.version; 2297 2298 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 2299 sc->eeprom_voltage = le16toh(hdr.volt); 2300 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2301 sc->eeprom_temp_high=le16toh(val); 2302 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2303 sc->eeprom_temp = le16toh(val); 2304 } 2305 2306 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 2307 /* Compute temperature offset. */ 2308 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2309 sc->eeprom_temp = le16toh(val); 2310 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2311 volt = le16toh(val); 2312 sc->temp_off = sc->eeprom_temp - (volt / -5); 2313 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 2314 sc->eeprom_temp, volt, sc->temp_off); 2315 } else { 2316 /* Read crystal calibration. */ 2317 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 2318 &sc->eeprom_crystal, sizeof (uint32_t)); 2319 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 2320 le32toh(sc->eeprom_crystal)); 2321 } 2322 2323 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2324 2325 } 2326 2327 /* 2328 * Translate EEPROM flags to net80211. 2329 */ 2330 static uint32_t 2331 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 2332 { 2333 uint32_t nflags; 2334 2335 nflags = 0; 2336 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 2337 nflags |= IEEE80211_CHAN_PASSIVE; 2338 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 2339 nflags |= IEEE80211_CHAN_NOADHOC; 2340 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 2341 nflags |= IEEE80211_CHAN_DFS; 2342 /* XXX apparently IBSS may still be marked */ 2343 nflags |= IEEE80211_CHAN_NOADHOC; 2344 } 2345 2346 return nflags; 2347 } 2348 2349 static void 2350 iwn_read_eeprom_band(struct iwn_softc *sc, int n) 2351 { 2352 struct ifnet *ifp = sc->sc_ifp; 2353 struct ieee80211com *ic = ifp->if_l2com; 2354 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2355 const struct iwn_chan_band *band = &iwn_bands[n]; 2356 struct ieee80211_channel *c; 2357 uint8_t chan; 2358 int i, nflags; 2359 2360 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2361 2362 for (i = 0; i < band->nchan; i++) { 2363 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2364 DPRINTF(sc, IWN_DEBUG_RESET, 2365 "skip chan %d flags 0x%x maxpwr %d\n", 2366 band->chan[i], channels[i].flags, 2367 channels[i].maxpwr); 2368 continue; 2369 } 2370 chan = band->chan[i]; 2371 nflags = iwn_eeprom_channel_flags(&channels[i]); 2372 2373 c = &ic->ic_channels[ic->ic_nchans++]; 2374 c->ic_ieee = chan; 2375 c->ic_maxregpower = channels[i].maxpwr; 2376 c->ic_maxpower = 2*c->ic_maxregpower; 2377 2378 if (n == 0) { /* 2GHz band */ 2379 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); 2380 /* G =>'s B is supported */ 2381 c->ic_flags = IEEE80211_CHAN_B | nflags; 2382 c = &ic->ic_channels[ic->ic_nchans++]; 2383 c[0] = c[-1]; 2384 c->ic_flags = IEEE80211_CHAN_G | nflags; 2385 } else { /* 5GHz band */ 2386 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); 2387 c->ic_flags = IEEE80211_CHAN_A | nflags; 2388 } 2389 2390 /* Save maximum allowed TX power for this channel. */ 2391 sc->maxpwr[chan] = channels[i].maxpwr; 2392 2393 DPRINTF(sc, IWN_DEBUG_RESET, 2394 "add chan %d flags 0x%x maxpwr %d\n", chan, 2395 channels[i].flags, channels[i].maxpwr); 2396 2397 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 2398 /* add HT20, HT40 added separately */ 2399 c = &ic->ic_channels[ic->ic_nchans++]; 2400 c[0] = c[-1]; 2401 c->ic_flags |= IEEE80211_CHAN_HT20; 2402 } 2403 } 2404 2405 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2406 2407 } 2408 2409 static void 2410 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 2411 { 2412 struct ifnet *ifp = sc->sc_ifp; 2413 struct ieee80211com *ic = ifp->if_l2com; 2414 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2415 const struct iwn_chan_band *band = &iwn_bands[n]; 2416 struct ieee80211_channel *c, *cent, *extc; 2417 uint8_t chan; 2418 int i, nflags; 2419 2420 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 2421 2422 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 2423 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 2424 return; 2425 } 2426 2427 for (i = 0; i < band->nchan; i++) { 2428 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2429 DPRINTF(sc, IWN_DEBUG_RESET, 2430 "skip chan %d flags 0x%x maxpwr %d\n", 2431 band->chan[i], channels[i].flags, 2432 channels[i].maxpwr); 2433 continue; 2434 } 2435 chan = band->chan[i]; 2436 nflags = iwn_eeprom_channel_flags(&channels[i]); 2437 2438 /* 2439 * Each entry defines an HT40 channel pair; find the 2440 * center channel, then the extension channel above. 2441 */ 2442 cent = ieee80211_find_channel_byieee(ic, chan, 2443 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2444 if (cent == NULL) { /* XXX shouldn't happen */ 2445 device_printf(sc->sc_dev, 2446 "%s: no entry for channel %d\n", __func__, chan); 2447 continue; 2448 } 2449 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 2450 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2451 if (extc == NULL) { 2452 DPRINTF(sc, IWN_DEBUG_RESET, 2453 "%s: skip chan %d, extension channel not found\n", 2454 __func__, chan); 2455 continue; 2456 } 2457 2458 DPRINTF(sc, IWN_DEBUG_RESET, 2459 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2460 chan, channels[i].flags, channels[i].maxpwr); 2461 2462 c = &ic->ic_channels[ic->ic_nchans++]; 2463 c[0] = cent[0]; 2464 c->ic_extieee = extc->ic_ieee; 2465 c->ic_flags &= ~IEEE80211_CHAN_HT; 2466 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; 2467 c = &ic->ic_channels[ic->ic_nchans++]; 2468 c[0] = extc[0]; 2469 c->ic_extieee = cent->ic_ieee; 2470 c->ic_flags &= ~IEEE80211_CHAN_HT; 2471 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; 2472 } 2473 2474 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2475 2476 } 2477 2478 static void 2479 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2480 { 2481 struct ifnet *ifp = sc->sc_ifp; 2482 struct ieee80211com *ic = ifp->if_l2com; 2483 2484 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2485 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2486 2487 if (n < 5) 2488 iwn_read_eeprom_band(sc, n); 2489 else 2490 iwn_read_eeprom_ht40(sc, n); 2491 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2492 } 2493 2494 static struct iwn_eeprom_chan * 2495 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2496 { 2497 int band, chan, i, j; 2498 2499 if (IEEE80211_IS_CHAN_HT40(c)) { 2500 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2501 if (IEEE80211_IS_CHAN_HT40D(c)) 2502 chan = c->ic_extieee; 2503 else 2504 chan = c->ic_ieee; 2505 for (i = 0; i < iwn_bands[band].nchan; i++) { 2506 if (iwn_bands[band].chan[i] == chan) 2507 return &sc->eeprom_channels[band][i]; 2508 } 2509 } else { 2510 for (j = 0; j < 5; j++) { 2511 for (i = 0; i < iwn_bands[j].nchan; i++) { 2512 if (iwn_bands[j].chan[i] == c->ic_ieee) 2513 return &sc->eeprom_channels[j][i]; 2514 } 2515 } 2516 } 2517 return NULL; 2518 } 2519 2520 /* 2521 * Enforce flags read from EEPROM. 2522 */ 2523 static int 2524 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2525 int nchan, struct ieee80211_channel chans[]) 2526 { 2527 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2528 int i; 2529 2530 for (i = 0; i < nchan; i++) { 2531 struct ieee80211_channel *c = &chans[i]; 2532 struct iwn_eeprom_chan *channel; 2533 2534 channel = iwn_find_eeprom_channel(sc, c); 2535 if (channel == NULL) { 2536 if_printf(ic->ic_ifp, 2537 "%s: invalid channel %u freq %u/0x%x\n", 2538 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2539 return EINVAL; 2540 } 2541 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2542 } 2543 2544 return 0; 2545 } 2546 2547 static void 2548 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2549 { 2550 struct iwn_eeprom_enhinfo enhinfo[35]; 2551 struct ifnet *ifp = sc->sc_ifp; 2552 struct ieee80211com *ic = ifp->if_l2com; 2553 struct ieee80211_channel *c; 2554 uint16_t val, base; 2555 int8_t maxpwr; 2556 uint8_t flags; 2557 int i, j; 2558 2559 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2560 2561 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2562 base = le16toh(val); 2563 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2564 enhinfo, sizeof enhinfo); 2565 2566 for (i = 0; i < nitems(enhinfo); i++) { 2567 flags = enhinfo[i].flags; 2568 if (!(flags & IWN_ENHINFO_VALID)) 2569 continue; /* Skip invalid entries. */ 2570 2571 maxpwr = 0; 2572 if (sc->txchainmask & IWN_ANT_A) 2573 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2574 if (sc->txchainmask & IWN_ANT_B) 2575 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2576 if (sc->txchainmask & IWN_ANT_C) 2577 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2578 if (sc->ntxchains == 2) 2579 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2580 else if (sc->ntxchains == 3) 2581 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2582 2583 for (j = 0; j < ic->ic_nchans; j++) { 2584 c = &ic->ic_channels[j]; 2585 if ((flags & IWN_ENHINFO_5GHZ)) { 2586 if (!IEEE80211_IS_CHAN_A(c)) 2587 continue; 2588 } else if ((flags & IWN_ENHINFO_OFDM)) { 2589 if (!IEEE80211_IS_CHAN_G(c)) 2590 continue; 2591 } else if (!IEEE80211_IS_CHAN_B(c)) 2592 continue; 2593 if ((flags & IWN_ENHINFO_HT40)) { 2594 if (!IEEE80211_IS_CHAN_HT40(c)) 2595 continue; 2596 } else { 2597 if (IEEE80211_IS_CHAN_HT40(c)) 2598 continue; 2599 } 2600 if (enhinfo[i].chan != 0 && 2601 enhinfo[i].chan != c->ic_ieee) 2602 continue; 2603 2604 DPRINTF(sc, IWN_DEBUG_RESET, 2605 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2606 c->ic_flags, maxpwr / 2); 2607 c->ic_maxregpower = maxpwr / 2; 2608 c->ic_maxpower = maxpwr; 2609 } 2610 } 2611 2612 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2613 2614 } 2615 2616 static struct ieee80211_node * 2617 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2618 { 2619 return kmalloc(sizeof(struct iwn_node), M_80211_NODE, 2620 M_INTWAIT | M_ZERO); 2621 } 2622 2623 static __inline int 2624 rate2plcp(int rate) 2625 { 2626 switch (rate & 0xff) { 2627 case 12: return 0xd; 2628 case 18: return 0xf; 2629 case 24: return 0x5; 2630 case 36: return 0x7; 2631 case 48: return 0x9; 2632 case 72: return 0xb; 2633 case 96: return 0x1; 2634 case 108: return 0x3; 2635 case 2: return 10; 2636 case 4: return 20; 2637 case 11: return 55; 2638 case 22: return 110; 2639 } 2640 return 0; 2641 } 2642 2643 /* 2644 * Calculate the required PLCP value from the given rate, 2645 * to the given node. 2646 * 2647 * This will take the node configuration (eg 11n, rate table 2648 * setup, etc) into consideration. 2649 */ 2650 static uint32_t 2651 iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2652 uint8_t rate) 2653 { 2654 #define RV(v) ((v) & IEEE80211_RATE_VAL) 2655 struct ieee80211com *ic = ni->ni_ic; 2656 uint8_t txant1, txant2; 2657 uint32_t plcp = 0; 2658 int ridx; 2659 2660 /* Use the first valid TX antenna. */ 2661 txant1 = IWN_LSB(sc->txchainmask); 2662 txant2 = IWN_LSB(sc->txchainmask & ~txant1); 2663 2664 /* 2665 * If it's an MCS rate, let's set the plcp correctly 2666 * and set the relevant flags based on the node config. 2667 */ 2668 if (rate & IEEE80211_RATE_MCS) { 2669 /* 2670 * Set the initial PLCP value to be between 0->31 for 2671 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2672 * flag. 2673 */ 2674 plcp = RV(rate) | IWN_RFLAG_MCS; 2675 2676 /* 2677 * XXX the following should only occur if both 2678 * the local configuration _and_ the remote node 2679 * advertise these capabilities. Thus this code 2680 * may need fixing! 2681 */ 2682 2683 /* 2684 * Set the channel width and guard interval. 2685 */ 2686 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2687 plcp |= IWN_RFLAG_HT40; 2688 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2689 plcp |= IWN_RFLAG_SGI; 2690 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2691 plcp |= IWN_RFLAG_SGI; 2692 } 2693 2694 /* 2695 * If it's a two stream rate, enable TX on both 2696 * antennas. 2697 * 2698 * XXX three stream rates? 2699 */ 2700 if (rate > 0x87) 2701 plcp |= IWN_RFLAG_ANT(txant1 | txant2); 2702 else 2703 plcp |= IWN_RFLAG_ANT(txant1); 2704 } else { 2705 /* 2706 * Set the initial PLCP - fine for both 2707 * OFDM and CCK rates. 2708 */ 2709 plcp = rate2plcp(rate); 2710 2711 /* Set CCK flag if it's CCK */ 2712 2713 /* XXX It would be nice to have a method 2714 * to map the ridx -> phy table entry 2715 * so we could just query that, rather than 2716 * this hack to check against IWN_RIDX_OFDM6. 2717 */ 2718 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2719 rate & IEEE80211_RATE_VAL); 2720 if (ridx < IWN_RIDX_OFDM6 && 2721 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2722 plcp |= IWN_RFLAG_CCK; 2723 2724 /* Set antenna configuration */ 2725 plcp |= IWN_RFLAG_ANT(txant1); 2726 } 2727 2728 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2729 __func__, 2730 rate, 2731 plcp); 2732 2733 return (htole32(plcp)); 2734 #undef RV 2735 } 2736 2737 static void 2738 iwn_newassoc(struct ieee80211_node *ni, int isnew) 2739 { 2740 /* Doesn't do anything at the moment */ 2741 } 2742 2743 static int 2744 iwn_media_change(struct ifnet *ifp) 2745 { 2746 int error; 2747 2748 error = ieee80211_media_change(ifp); 2749 /* NB: only the fixed rate can change and that doesn't need a reset */ 2750 return (error == ENETRESET ? 0 : error); 2751 } 2752 2753 static int 2754 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2755 { 2756 struct iwn_vap *ivp = IWN_VAP(vap); 2757 struct ieee80211com *ic = vap->iv_ic; 2758 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2759 int error = 0; 2760 2761 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2762 2763 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2764 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2765 2766 callout_stop(&sc->calib_to); 2767 2768 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 2769 2770 switch (nstate) { 2771 case IEEE80211_S_ASSOC: 2772 if (vap->iv_state != IEEE80211_S_RUN) 2773 break; 2774 /* FALLTHROUGH */ 2775 case IEEE80211_S_AUTH: 2776 if (vap->iv_state == IEEE80211_S_AUTH) 2777 break; 2778 2779 /* 2780 * !AUTH -> AUTH transition requires state reset to handle 2781 * reassociations correctly. 2782 */ 2783 sc->rxon->associd = 0; 2784 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); 2785 sc->calib.state = IWN_CALIB_STATE_INIT; 2786 2787 if ((error = iwn_auth(sc, vap)) != 0) { 2788 device_printf(sc->sc_dev, 2789 "%s: could not move to auth state\n", __func__); 2790 } 2791 break; 2792 2793 case IEEE80211_S_RUN: 2794 /* 2795 * RUN -> RUN transition; Just restart the timers. 2796 */ 2797 if (vap->iv_state == IEEE80211_S_RUN) { 2798 sc->calib_cnt = 0; 2799 break; 2800 } 2801 2802 /* 2803 * !RUN -> RUN requires setting the association id 2804 * which is done with a firmware cmd. We also defer 2805 * starting the timers until that work is done. 2806 */ 2807 if ((error = iwn_run(sc, vap)) != 0) { 2808 device_printf(sc->sc_dev, 2809 "%s: could not move to run state\n", __func__); 2810 } 2811 break; 2812 2813 case IEEE80211_S_INIT: 2814 sc->calib.state = IWN_CALIB_STATE_INIT; 2815 break; 2816 2817 default: 2818 break; 2819 } 2820 if (error != 0){ 2821 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2822 return error; 2823 } 2824 2825 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2826 2827 return ivp->iv_newstate(vap, nstate, arg); 2828 } 2829 2830 static void 2831 iwn_calib_timeout(void *arg) 2832 { 2833 struct iwn_softc *sc = arg; 2834 2835 wlan_serialize_enter(); 2836 2837 /* Force automatic TX power calibration every 60 secs. */ 2838 if (++sc->calib_cnt >= 120) { 2839 uint32_t flags = 0; 2840 2841 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2842 "sending request for statistics"); 2843 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2844 sizeof flags, 1); 2845 sc->calib_cnt = 0; 2846 } 2847 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 2848 sc); 2849 wlan_serialize_exit(); 2850 } 2851 2852 /* 2853 * Process an RX_PHY firmware notification. This is usually immediately 2854 * followed by an MPDU_RX_DONE notification. 2855 */ 2856 static void 2857 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2858 struct iwn_rx_data *data) 2859 { 2860 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2861 2862 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2863 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2864 2865 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2866 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2867 sc->last_rx_valid = 1; 2868 } 2869 2870 /* 2871 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2872 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2873 */ 2874 static void 2875 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2876 struct iwn_rx_data *data) 2877 { 2878 struct iwn_ops *ops = &sc->ops; 2879 struct ifnet *ifp = sc->sc_ifp; 2880 struct ieee80211com *ic = ifp->if_l2com; 2881 struct iwn_rx_ring *ring = &sc->rxq; 2882 struct ieee80211_frame *wh; 2883 struct ieee80211_node *ni; 2884 struct mbuf *m, *m1; 2885 struct iwn_rx_stat *stat; 2886 caddr_t head; 2887 bus_addr_t paddr; 2888 uint32_t flags; 2889 int error, len, rssi, nf; 2890 2891 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2892 2893 if (desc->type == IWN_MPDU_RX_DONE) { 2894 /* Check for prior RX_PHY notification. */ 2895 if (!sc->last_rx_valid) { 2896 DPRINTF(sc, IWN_DEBUG_ANY, 2897 "%s: missing RX_PHY\n", __func__); 2898 return; 2899 } 2900 stat = &sc->last_rx_stat; 2901 } else 2902 stat = (struct iwn_rx_stat *)(desc + 1); 2903 2904 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2905 2906 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2907 device_printf(sc->sc_dev, 2908 "%s: invalid RX statistic header, len %d\n", __func__, 2909 stat->cfg_phy_len); 2910 return; 2911 } 2912 if (desc->type == IWN_MPDU_RX_DONE) { 2913 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2914 head = (caddr_t)(mpdu + 1); 2915 len = le16toh(mpdu->len); 2916 } else { 2917 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2918 len = le16toh(stat->len); 2919 } 2920 2921 flags = le32toh(*(uint32_t *)(head + len)); 2922 2923 /* Discard frames with a bad FCS early. */ 2924 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2925 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 2926 __func__, flags); 2927 IFNET_STAT_INC(ifp, ierrors, 1); 2928 return; 2929 } 2930 /* Discard frames that are too short. */ 2931 if (len < sizeof (*wh)) { 2932 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2933 __func__, len); 2934 IFNET_STAT_INC(ifp, ierrors, 1); 2935 return; 2936 } 2937 2938 m1 = m_getjcl(MB_DONTWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 2939 if (m1 == NULL) { 2940 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2941 __func__); 2942 IFNET_STAT_INC(ifp, ierrors, 1); 2943 return; 2944 } 2945 bus_dmamap_unload(ring->data_dmat, data->map); 2946 2947 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 2948 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2949 if (error != 0 && error != EFBIG) { 2950 device_printf(sc->sc_dev, 2951 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2952 m_freem(m1); 2953 2954 /* Try to reload the old mbuf. */ 2955 error = bus_dmamap_load(ring->data_dmat, data->map, 2956 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 2957 &paddr, BUS_DMA_NOWAIT); 2958 if (error != 0 && error != EFBIG) { 2959 panic("%s: could not load old RX mbuf", __func__); 2960 } 2961 /* Physical address may have changed. */ 2962 ring->desc[ring->cur] = htole32(paddr >> 8); 2963 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2964 BUS_DMASYNC_PREWRITE); 2965 IFNET_STAT_INC(ifp, ierrors, 1); 2966 return; 2967 } 2968 2969 m = data->m; 2970 data->m = m1; 2971 /* Update RX descriptor. */ 2972 ring->desc[ring->cur] = htole32(paddr >> 8); 2973 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2974 BUS_DMASYNC_PREWRITE); 2975 2976 /* Finalize mbuf. */ 2977 m->m_pkthdr.rcvif = ifp; 2978 m->m_data = head; 2979 m->m_pkthdr.len = m->m_len = len; 2980 2981 /* Grab a reference to the source node. */ 2982 wh = mtod(m, struct ieee80211_frame *); 2983 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2984 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2985 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2986 2987 rssi = ops->get_rssi(sc, stat); 2988 2989 if (ieee80211_radiotap_active(ic)) { 2990 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2991 2992 tap->wr_flags = 0; 2993 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2994 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2995 tap->wr_dbm_antsignal = (int8_t)rssi; 2996 tap->wr_dbm_antnoise = (int8_t)nf; 2997 tap->wr_tsft = stat->tstamp; 2998 switch (stat->rate) { 2999 /* CCK rates. */ 3000 case 10: tap->wr_rate = 2; break; 3001 case 20: tap->wr_rate = 4; break; 3002 case 55: tap->wr_rate = 11; break; 3003 case 110: tap->wr_rate = 22; break; 3004 /* OFDM rates. */ 3005 case 0xd: tap->wr_rate = 12; break; 3006 case 0xf: tap->wr_rate = 18; break; 3007 case 0x5: tap->wr_rate = 24; break; 3008 case 0x7: tap->wr_rate = 36; break; 3009 case 0x9: tap->wr_rate = 48; break; 3010 case 0xb: tap->wr_rate = 72; break; 3011 case 0x1: tap->wr_rate = 96; break; 3012 case 0x3: tap->wr_rate = 108; break; 3013 /* Unknown rate: should not happen. */ 3014 default: tap->wr_rate = 0; 3015 } 3016 } 3017 3018 /* Send the frame to the 802.11 layer. */ 3019 if (ni != NULL) { 3020 if (ni->ni_flags & IEEE80211_NODE_HT) 3021 m->m_flags |= M_AMPDU; 3022 (void)ieee80211_input(ni, m, rssi - nf, nf); 3023 /* Node is no longer needed. */ 3024 ieee80211_free_node(ni); 3025 } else { 3026 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 3027 } 3028 3029 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3030 3031 } 3032 3033 /* Process an incoming Compressed BlockAck. */ 3034 static void 3035 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3036 struct iwn_rx_data *data) 3037 { 3038 struct iwn_ops *ops = &sc->ops; 3039 struct ifnet *ifp = sc->sc_ifp; 3040 struct iwn_node *wn; 3041 struct ieee80211_node *ni; 3042 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 3043 struct iwn_tx_ring *txq; 3044 struct iwn_tx_data *txdata; 3045 struct ieee80211_tx_ampdu *tap; 3046 struct mbuf *m; 3047 uint64_t bitmap; 3048 uint16_t ssn; 3049 uint8_t tid; 3050 int ackfailcnt = 0, i, lastidx, qid, *res, shift; 3051 3052 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3053 3054 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3055 3056 qid = le16toh(ba->qid); 3057 txq = &sc->txq[ba->qid]; 3058 tap = sc->qid2tap[ba->qid]; 3059 tid = tap->txa_ac; 3060 wn = (void *)tap->txa_ni; 3061 3062 res = NULL; 3063 ssn = 0; 3064 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3065 res = tap->txa_private; 3066 ssn = tap->txa_start & 0xfff; 3067 } 3068 3069 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { 3070 txdata = &txq->data[txq->read]; 3071 3072 /* Unmap and free mbuf. */ 3073 bus_dmamap_sync(txq->data_dmat, txdata->map, 3074 BUS_DMASYNC_POSTWRITE); 3075 bus_dmamap_unload(txq->data_dmat, txdata->map); 3076 m = txdata->m, txdata->m = NULL; 3077 ni = txdata->ni, txdata->ni = NULL; 3078 3079 KASSERT(ni != NULL, ("no node")); 3080 KASSERT(m != NULL, ("no mbuf")); 3081 3082 ieee80211_tx_complete(ni, m, 1); 3083 3084 txq->queued--; 3085 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 3086 } 3087 3088 if (txq->queued == 0 && res != NULL) { 3089 iwn_nic_lock(sc); 3090 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3091 iwn_nic_unlock(sc); 3092 sc->qid2tap[qid] = NULL; 3093 kfree(res, M_DEVBUF); 3094 return; 3095 } 3096 3097 if (wn->agg[tid].bitmap == 0) 3098 return; 3099 3100 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 3101 if (shift < 0) 3102 shift += 0x100; 3103 3104 if (wn->agg[tid].nframes > (64 - shift)) 3105 return; 3106 3107 ni = tap->txa_ni; 3108 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 3109 for (i = 0; bitmap; i++) { 3110 if ((bitmap & 1) == 0) { 3111 IFNET_STAT_INC(ifp, oerrors, 1); 3112 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3113 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3114 } else { 3115 IFNET_STAT_INC(ifp, opackets, 1); 3116 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3117 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3118 } 3119 bitmap >>= 1; 3120 } 3121 3122 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3123 3124 } 3125 3126 /* 3127 * Process a CALIBRATION_RESULT notification sent by the initialization 3128 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 3129 */ 3130 static void 3131 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3132 struct iwn_rx_data *data) 3133 { 3134 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 3135 int len, idx = -1; 3136 3137 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3138 3139 /* Runtime firmware should not send such a notification. */ 3140 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 3141 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", 3142 __func__); 3143 return; 3144 } 3145 len = (le32toh(desc->len) & 0x3fff) - 4; 3146 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3147 3148 switch (calib->code) { 3149 case IWN5000_PHY_CALIB_DC: 3150 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) 3151 idx = 0; 3152 break; 3153 case IWN5000_PHY_CALIB_LO: 3154 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) 3155 idx = 1; 3156 break; 3157 case IWN5000_PHY_CALIB_TX_IQ: 3158 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) 3159 idx = 2; 3160 break; 3161 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 3162 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) 3163 idx = 3; 3164 break; 3165 case IWN5000_PHY_CALIB_BASE_BAND: 3166 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) 3167 idx = 4; 3168 break; 3169 } 3170 if (idx == -1) /* Ignore other results. */ 3171 return; 3172 3173 /* Save calibration result. */ 3174 if (sc->calibcmd[idx].buf != NULL) 3175 kfree(sc->calibcmd[idx].buf, M_DEVBUF); 3176 sc->calibcmd[idx].buf = kmalloc(len, M_DEVBUF, M_INTWAIT); 3177 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3178 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); 3179 sc->calibcmd[idx].len = len; 3180 memcpy(sc->calibcmd[idx].buf, calib, len); 3181 } 3182 3183 /* 3184 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 3185 * The latter is sent by the firmware after each received beacon. 3186 */ 3187 static void 3188 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3189 struct iwn_rx_data *data) 3190 { 3191 struct iwn_ops *ops = &sc->ops; 3192 struct ifnet *ifp = sc->sc_ifp; 3193 struct ieee80211com *ic = ifp->if_l2com; 3194 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3195 struct iwn_calib_state *calib = &sc->calib; 3196 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 3197 int temp; 3198 3199 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3200 3201 /* Ignore statistics received during a scan. */ 3202 if (vap->iv_state != IEEE80211_S_RUN || 3203 (ic->ic_flags & IEEE80211_F_SCAN)){ 3204 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 3205 __func__); 3206 return; 3207 } 3208 3209 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3210 3211 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n", 3212 __func__, desc->type); 3213 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 3214 3215 /* Test if temperature has changed. */ 3216 if (stats->general.temp != sc->rawtemp) { 3217 /* Convert "raw" temperature to degC. */ 3218 sc->rawtemp = stats->general.temp; 3219 temp = ops->get_temperature(sc); 3220 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 3221 __func__, temp); 3222 3223 /* Update TX power if need be (4965AGN only). */ 3224 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3225 iwn4965_power_calibration(sc, temp); 3226 } 3227 3228 if (desc->type != IWN_BEACON_STATISTICS) 3229 return; /* Reply to a statistics request. */ 3230 3231 sc->noise = iwn_get_noise(&stats->rx.general); 3232 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 3233 3234 /* Test that RSSI and noise are present in stats report. */ 3235 if (le32toh(stats->rx.general.flags) != 1) { 3236 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 3237 "received statistics without RSSI"); 3238 return; 3239 } 3240 3241 if (calib->state == IWN_CALIB_STATE_ASSOC) 3242 iwn_collect_noise(sc, &stats->rx.general); 3243 else if (calib->state == IWN_CALIB_STATE_RUN) 3244 iwn_tune_sensitivity(sc, &stats->rx); 3245 3246 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3247 } 3248 3249 /* 3250 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 3251 * and 5000 adapters have different incompatible TX status formats. 3252 */ 3253 static void 3254 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3255 struct iwn_rx_data *data) 3256 { 3257 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 3258 struct iwn_tx_ring *ring; 3259 int qid; 3260 3261 qid = desc->qid & 0xf; 3262 ring = &sc->txq[qid]; 3263 3264 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3265 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3266 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3267 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3268 le32toh(stat->status)); 3269 3270 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3271 if (qid >= sc->firstaggqueue) { 3272 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3273 &stat->status); 3274 } else { 3275 iwn_tx_done(sc, desc, stat->ackfailcnt, 3276 le32toh(stat->status) & 0xff); 3277 } 3278 } 3279 3280 static void 3281 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3282 struct iwn_rx_data *data) 3283 { 3284 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 3285 struct iwn_tx_ring *ring; 3286 int qid; 3287 3288 qid = desc->qid & 0xf; 3289 ring = &sc->txq[qid]; 3290 3291 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3292 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 3293 __func__, desc->qid, desc->idx, stat->ackfailcnt, 3294 stat->btkillcnt, stat->rate, le16toh(stat->duration), 3295 le32toh(stat->status)); 3296 3297 #ifdef notyet 3298 /* Reset TX scheduler slot. */ 3299 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 3300 #endif 3301 3302 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3303 if (qid >= sc->firstaggqueue) { 3304 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3305 &stat->status); 3306 } else { 3307 iwn_tx_done(sc, desc, stat->ackfailcnt, 3308 le16toh(stat->status) & 0xff); 3309 } 3310 } 3311 3312 /* 3313 * Adapter-independent backend for TX_DONE firmware notifications. 3314 */ 3315 static void 3316 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 3317 uint8_t status) 3318 { 3319 struct ifnet *ifp = sc->sc_ifp; 3320 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 3321 struct iwn_tx_data *data = &ring->data[desc->idx]; 3322 struct mbuf *m; 3323 struct ieee80211_node *ni; 3324 struct ieee80211vap *vap; 3325 3326 KASSERT(data->ni != NULL, ("no node")); 3327 3328 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3329 3330 /* Unmap and free mbuf. */ 3331 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 3332 bus_dmamap_unload(ring->data_dmat, data->map); 3333 m = data->m, data->m = NULL; 3334 ni = data->ni, data->ni = NULL; 3335 vap = ni->ni_vap; 3336 3337 /* 3338 * Update rate control statistics for the node. 3339 */ 3340 if (status & IWN_TX_FAIL) { 3341 IFNET_STAT_INC(ifp, oerrors, 1); 3342 ieee80211_ratectl_tx_complete(vap, ni, 3343 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3344 } else { 3345 IFNET_STAT_INC(ifp, opackets, 1); 3346 ieee80211_ratectl_tx_complete(vap, ni, 3347 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3348 } 3349 3350 /* 3351 * Channels marked for "radar" require traffic to be received 3352 * to unlock before we can transmit. Until traffic is seen 3353 * any attempt to transmit is returned immediately with status 3354 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 3355 * happen on first authenticate after scanning. To workaround 3356 * this we ignore a failure of this sort in AUTH state so the 3357 * 802.11 layer will fall back to using a timeout to wait for 3358 * the AUTH reply. This allows the firmware time to see 3359 * traffic so a subsequent retry of AUTH succeeds. It's 3360 * unclear why the firmware does not maintain state for 3361 * channels recently visited as this would allow immediate 3362 * use of the channel after a scan (where we see traffic). 3363 */ 3364 if (status == IWN_TX_FAIL_TX_LOCKED && 3365 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 3366 ieee80211_tx_complete(ni, m, 0); 3367 else 3368 ieee80211_tx_complete(ni, m, 3369 (status & IWN_TX_FAIL) != 0); 3370 3371 sc->sc_tx_timer = 0; 3372 if (--ring->queued < IWN_TX_RING_LOMARK) { 3373 sc->qfullmsk &= ~(1 << ring->qid); 3374 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) { 3375 ifq_clr_oactive(&ifp->if_snd); 3376 iwn_start_locked(ifp); 3377 } 3378 } 3379 3380 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3381 3382 } 3383 3384 /* 3385 * Process a "command done" firmware notification. This is where we wakeup 3386 * processes waiting for a synchronous command completion. 3387 */ 3388 static void 3389 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3390 { 3391 struct iwn_tx_ring *ring; 3392 struct iwn_tx_data *data; 3393 int cmd_queue_num; 3394 3395 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 3396 cmd_queue_num = IWN_PAN_CMD_QUEUE; 3397 else 3398 cmd_queue_num = IWN_CMD_QUEUE_NUM; 3399 3400 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) 3401 return; /* Not a command ack. */ 3402 3403 ring = &sc->txq[cmd_queue_num]; 3404 data = &ring->data[desc->idx]; 3405 3406 /* If the command was mapped in an mbuf, free it. */ 3407 if (data->m != NULL) { 3408 bus_dmamap_sync(ring->data_dmat, data->map, 3409 BUS_DMASYNC_POSTWRITE); 3410 bus_dmamap_unload(ring->data_dmat, data->map); 3411 m_freem(data->m); 3412 data->m = NULL; 3413 } 3414 wakeup(&ring->desc[desc->idx]); 3415 } 3416 3417 static void 3418 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 3419 void *stat) 3420 { 3421 struct iwn_ops *ops = &sc->ops; 3422 struct ifnet *ifp = sc->sc_ifp; 3423 struct iwn_tx_ring *ring = &sc->txq[qid]; 3424 struct iwn_tx_data *data; 3425 struct mbuf *m; 3426 struct iwn_node *wn; 3427 struct ieee80211_node *ni; 3428 struct ieee80211_tx_ampdu *tap; 3429 uint64_t bitmap; 3430 uint32_t *status = stat; 3431 uint16_t *aggstatus = stat; 3432 uint16_t ssn; 3433 uint8_t tid; 3434 int bit, i, lastidx, *res, seqno, shift, start; 3435 3436 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3437 3438 if (nframes == 1) { 3439 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) { 3440 #ifdef NOT_YET 3441 kprintf("ieee80211_send_bar()\n"); 3442 #endif 3443 /* 3444 * If we completely fail a transmit, make sure a 3445 * notification is pushed up to the rate control 3446 * layer. 3447 */ 3448 tap = sc->qid2tap[qid]; 3449 tid = tap->txa_ac; 3450 wn = (void *)tap->txa_ni; 3451 ni = tap->txa_ni; 3452 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3453 IEEE80211_RATECTL_TX_FAILURE, &nframes, NULL); 3454 } 3455 } 3456 3457 bitmap = 0; 3458 start = idx; 3459 for (i = 0; i < nframes; i++) { 3460 if (le16toh(aggstatus[i * 2]) & 0xc) 3461 continue; 3462 3463 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 3464 bit = idx - start; 3465 shift = 0; 3466 if (bit >= 64) { 3467 shift = 0x100 - idx + start; 3468 bit = 0; 3469 start = idx; 3470 } else if (bit <= -64) 3471 bit = 0x100 - start + idx; 3472 else if (bit < 0) { 3473 shift = start - idx; 3474 start = idx; 3475 bit = 0; 3476 } 3477 bitmap = bitmap << shift; 3478 bitmap |= 1ULL << bit; 3479 } 3480 tap = sc->qid2tap[qid]; 3481 tid = tap->txa_ac; 3482 wn = (void *)tap->txa_ni; 3483 wn->agg[tid].bitmap = bitmap; 3484 wn->agg[tid].startidx = start; 3485 wn->agg[tid].nframes = nframes; 3486 3487 res = NULL; 3488 ssn = 0; 3489 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3490 res = tap->txa_private; 3491 ssn = tap->txa_start & 0xfff; 3492 } 3493 3494 seqno = le32toh(*(status + nframes)) & 0xfff; 3495 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 3496 data = &ring->data[ring->read]; 3497 3498 /* Unmap and free mbuf. */ 3499 bus_dmamap_sync(ring->data_dmat, data->map, 3500 BUS_DMASYNC_POSTWRITE); 3501 bus_dmamap_unload(ring->data_dmat, data->map); 3502 m = data->m, data->m = NULL; 3503 ni = data->ni, data->ni = NULL; 3504 3505 KASSERT(ni != NULL, ("no node")); 3506 KASSERT(m != NULL, ("no mbuf")); 3507 3508 ieee80211_tx_complete(ni, m, 1); 3509 3510 ring->queued--; 3511 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 3512 } 3513 3514 if (ring->queued == 0 && res != NULL) { 3515 iwn_nic_lock(sc); 3516 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3517 iwn_nic_unlock(sc); 3518 sc->qid2tap[qid] = NULL; 3519 kfree(res, M_DEVBUF); 3520 return; 3521 } 3522 3523 sc->sc_tx_timer = 0; 3524 if (ring->queued < IWN_TX_RING_LOMARK) { 3525 sc->qfullmsk &= ~(1 << ring->qid); 3526 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) { 3527 ifq_clr_oactive(&ifp->if_snd); 3528 iwn_start_locked(ifp); 3529 } 3530 } 3531 3532 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3533 3534 } 3535 3536 /* 3537 * Process an INT_FH_RX or INT_SW_RX interrupt. 3538 */ 3539 static void 3540 iwn_notif_intr(struct iwn_softc *sc) 3541 { 3542 struct iwn_ops *ops = &sc->ops; 3543 struct ifnet *ifp = sc->sc_ifp; 3544 struct ieee80211com *ic = ifp->if_l2com; 3545 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3546 uint16_t hw; 3547 3548 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3549 BUS_DMASYNC_POSTREAD); 3550 3551 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3552 while (sc->rxq.cur != hw) { 3553 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3554 struct iwn_rx_desc *desc; 3555 3556 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3557 BUS_DMASYNC_POSTREAD); 3558 desc = mtod(data->m, struct iwn_rx_desc *); 3559 3560 DPRINTF(sc, IWN_DEBUG_RECV, 3561 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 3562 __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags, 3563 desc->type, iwn_intr_str(desc->type), 3564 le16toh(desc->len)); 3565 3566 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ 3567 iwn_cmd_done(sc, desc); 3568 3569 switch (desc->type) { 3570 case IWN_RX_PHY: 3571 iwn_rx_phy(sc, desc, data); 3572 break; 3573 3574 case IWN_RX_DONE: /* 4965AGN only. */ 3575 case IWN_MPDU_RX_DONE: 3576 /* An 802.11 frame has been received. */ 3577 iwn_rx_done(sc, desc, data); 3578 break; 3579 3580 case IWN_RX_COMPRESSED_BA: 3581 /* A Compressed BlockAck has been received. */ 3582 iwn_rx_compressed_ba(sc, desc, data); 3583 break; 3584 3585 case IWN_TX_DONE: 3586 /* An 802.11 frame has been transmitted. */ 3587 ops->tx_done(sc, desc, data); 3588 break; 3589 3590 case IWN_RX_STATISTICS: 3591 case IWN_BEACON_STATISTICS: 3592 iwn_rx_statistics(sc, desc, data); 3593 break; 3594 3595 case IWN_BEACON_MISSED: 3596 { 3597 struct iwn_beacon_missed *miss = 3598 (struct iwn_beacon_missed *)(desc + 1); 3599 int misses; 3600 3601 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3602 BUS_DMASYNC_POSTREAD); 3603 misses = le32toh(miss->consecutive); 3604 3605 DPRINTF(sc, IWN_DEBUG_STATE, 3606 "%s: beacons missed %d/%d\n", __func__, 3607 misses, le32toh(miss->total)); 3608 /* 3609 * If more than 5 consecutive beacons are missed, 3610 * reinitialize the sensitivity state machine. 3611 */ 3612 if (vap->iv_state == IEEE80211_S_RUN && 3613 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 3614 if (misses > 5) 3615 (void)iwn_init_sensitivity(sc); 3616 if (misses >= vap->iv_bmissthreshold) { 3617 ieee80211_beacon_miss(ic); 3618 } 3619 } 3620 break; 3621 } 3622 case IWN_UC_READY: 3623 { 3624 struct iwn_ucode_info *uc = 3625 (struct iwn_ucode_info *)(desc + 1); 3626 3627 /* The microcontroller is ready. */ 3628 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3629 BUS_DMASYNC_POSTREAD); 3630 DPRINTF(sc, IWN_DEBUG_RESET, 3631 "microcode alive notification version=%d.%d " 3632 "subtype=%x alive=%x\n", uc->major, uc->minor, 3633 uc->subtype, le32toh(uc->valid)); 3634 3635 if (le32toh(uc->valid) != 1) { 3636 device_printf(sc->sc_dev, 3637 "microcontroller initialization failed"); 3638 break; 3639 } 3640 if (uc->subtype == IWN_UCODE_INIT) { 3641 /* Save microcontroller report. */ 3642 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 3643 } 3644 /* Save the address of the error log in SRAM. */ 3645 sc->errptr = le32toh(uc->errptr); 3646 break; 3647 } 3648 case IWN_STATE_CHANGED: 3649 { 3650 /* 3651 * State change allows hardware switch change to be 3652 * noted. However, we handle this in iwn_intr as we 3653 * get both the enable/disble intr. 3654 */ 3655 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3656 BUS_DMASYNC_POSTREAD); 3657 #ifdef IWN_DEBUG 3658 uint32_t *status = (uint32_t *)(desc + 1); 3659 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, 3660 "state changed to %x\n", 3661 le32toh(*status)); 3662 #endif 3663 break; 3664 } 3665 case IWN_START_SCAN: 3666 { 3667 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3668 BUS_DMASYNC_POSTREAD); 3669 #ifdef IWN_DEBUG 3670 struct iwn_start_scan *scan = 3671 (struct iwn_start_scan *)(desc + 1); 3672 DPRINTF(sc, IWN_DEBUG_ANY, 3673 "%s: scanning channel %d status %x\n", 3674 __func__, scan->chan, le32toh(scan->status)); 3675 #endif 3676 break; 3677 } 3678 case IWN_STOP_SCAN: 3679 { 3680 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3681 BUS_DMASYNC_POSTREAD); 3682 #ifdef IWN_DEBUG 3683 struct iwn_stop_scan *scan = 3684 (struct iwn_stop_scan *)(desc + 1); 3685 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, 3686 "scan finished nchan=%d status=%d chan=%d\n", 3687 scan->nchan, scan->status, scan->chan); 3688 #endif 3689 sc->sc_is_scanning = 0; 3690 ieee80211_scan_next(vap); 3691 break; 3692 } 3693 case IWN5000_CALIBRATION_RESULT: 3694 iwn5000_rx_calib_results(sc, desc, data); 3695 break; 3696 3697 case IWN5000_CALIBRATION_DONE: 3698 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3699 wakeup(sc); 3700 break; 3701 } 3702 3703 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3704 } 3705 3706 /* Tell the firmware what we have processed. */ 3707 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3708 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3709 } 3710 3711 /* 3712 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3713 * from power-down sleep mode. 3714 */ 3715 static void 3716 iwn_wakeup_intr(struct iwn_softc *sc) 3717 { 3718 int qid; 3719 3720 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 3721 __func__); 3722 3723 /* Wakeup RX and TX rings. */ 3724 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3725 for (qid = 0; qid < sc->ntxqs; qid++) { 3726 struct iwn_tx_ring *ring = &sc->txq[qid]; 3727 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3728 } 3729 } 3730 3731 static void 3732 iwn_rftoggle_intr(struct iwn_softc *sc) 3733 { 3734 struct ifnet *ifp = sc->sc_ifp; 3735 struct ieee80211com *ic = ifp->if_l2com; 3736 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 3737 3738 device_printf(sc->sc_dev, "RF switch: radio %s\n", 3739 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 3740 if (tmp & IWN_GP_CNTRL_RFKILL) 3741 ieee80211_runtask(ic, &sc->sc_radioon_task); 3742 else 3743 ieee80211_runtask(ic, &sc->sc_radiooff_task); 3744 } 3745 3746 /* 3747 * Dump the error log of the firmware when a firmware panic occurs. Although 3748 * we can't debug the firmware because it is neither open source nor free, it 3749 * can help us to identify certain classes of problems. 3750 */ 3751 static void 3752 iwn_fatal_intr(struct iwn_softc *sc) 3753 { 3754 struct iwn_fw_dump dump; 3755 int i; 3756 3757 /* Force a complete recalibration on next init. */ 3758 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3759 3760 /* Check that the error log address is valid. */ 3761 if (sc->errptr < IWN_FW_DATA_BASE || 3762 sc->errptr + sizeof (dump) > 3763 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3764 kprintf("%s: bad firmware error log address 0x%08x\n", __func__, 3765 sc->errptr); 3766 return; 3767 } 3768 if (iwn_nic_lock(sc) != 0) { 3769 kprintf("%s: could not read firmware error log\n", __func__); 3770 return; 3771 } 3772 /* Read firmware error log from SRAM. */ 3773 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3774 sizeof (dump) / sizeof (uint32_t)); 3775 iwn_nic_unlock(sc); 3776 3777 if (dump.valid == 0) { 3778 kprintf("%s: firmware error log is empty\n", __func__); 3779 return; 3780 } 3781 kprintf("firmware error log:\n"); 3782 kprintf(" error type = \"%s\" (0x%08X)\n", 3783 (dump.id < nitems(iwn_fw_errmsg)) ? 3784 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3785 dump.id); 3786 kprintf(" program counter = 0x%08X\n", dump.pc); 3787 kprintf(" source line = 0x%08X\n", dump.src_line); 3788 kprintf(" error data = 0x%08X%08X\n", 3789 dump.error_data[0], dump.error_data[1]); 3790 kprintf(" branch link = 0x%08X%08X\n", 3791 dump.branch_link[0], dump.branch_link[1]); 3792 kprintf(" interrupt link = 0x%08X%08X\n", 3793 dump.interrupt_link[0], dump.interrupt_link[1]); 3794 kprintf(" time = %u\n", dump.time[0]); 3795 3796 /* Dump driver status (TX and RX rings) while we're here. */ 3797 kprintf("driver status:\n"); 3798 for (i = 0; i < sc->ntxqs; i++) { 3799 struct iwn_tx_ring *ring = &sc->txq[i]; 3800 kprintf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3801 i, ring->qid, ring->cur, ring->queued); 3802 } 3803 kprintf(" rx ring: cur=%d\n", sc->rxq.cur); 3804 } 3805 3806 static void 3807 iwn_intr(void *arg) 3808 { 3809 struct iwn_softc *sc = arg; 3810 struct ifnet *ifp = sc->sc_ifp; 3811 uint32_t r1, r2, tmp; 3812 3813 /* Disable interrupts. */ 3814 IWN_WRITE(sc, IWN_INT_MASK, 0); 3815 3816 /* Read interrupts from ICT (fast) or from registers (slow). */ 3817 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3818 tmp = 0; 3819 while (sc->ict[sc->ict_cur] != 0) { 3820 tmp |= sc->ict[sc->ict_cur]; 3821 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3822 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3823 } 3824 tmp = le32toh(tmp); 3825 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3826 tmp = 0; 3827 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3828 tmp |= 0x8000; 3829 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3830 r2 = 0; /* Unused. */ 3831 } else { 3832 r1 = IWN_READ(sc, IWN_INT); 3833 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3834 return; /* Hardware gone! */ 3835 r2 = IWN_READ(sc, IWN_FH_INT); 3836 } 3837 3838 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 3839 , r1, r2); 3840 3841 if (r1 == 0 && r2 == 0) 3842 goto done; /* Interrupt not for us. */ 3843 3844 /* Acknowledge interrupts. */ 3845 IWN_WRITE(sc, IWN_INT, r1); 3846 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3847 IWN_WRITE(sc, IWN_FH_INT, r2); 3848 3849 if (r1 & IWN_INT_RF_TOGGLED) { 3850 iwn_rftoggle_intr(sc); 3851 goto done; 3852 } 3853 if (r1 & IWN_INT_CT_REACHED) { 3854 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 3855 __func__); 3856 } 3857 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3858 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 3859 __func__); 3860 #ifdef IWN_DEBUG 3861 iwn_debug_register(sc); 3862 #endif 3863 /* Dump firmware error log and stop. */ 3864 iwn_fatal_intr(sc); 3865 ifp->if_flags &= ~IFF_UP; 3866 iwn_stop_locked(sc); 3867 goto done; 3868 } 3869 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3870 (r2 & IWN_FH_INT_RX)) { 3871 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3872 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3873 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3874 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3875 IWN_INT_PERIODIC_DIS); 3876 iwn_notif_intr(sc); 3877 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 3878 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3879 IWN_INT_PERIODIC_ENA); 3880 } 3881 } else 3882 iwn_notif_intr(sc); 3883 } 3884 3885 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 3886 if (sc->sc_flags & IWN_FLAG_USE_ICT) 3887 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 3888 wakeup(sc); /* FH DMA transfer completed. */ 3889 } 3890 3891 if (r1 & IWN_INT_ALIVE) 3892 wakeup(sc); /* Firmware is alive. */ 3893 3894 if (r1 & IWN_INT_WAKEUP) 3895 iwn_wakeup_intr(sc); 3896 3897 done: 3898 /* Re-enable interrupts. */ 3899 if (ifp->if_flags & IFF_UP) 3900 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 3901 } 3902 3903 /* 3904 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 3905 * 5000 adapters use a slightly different format). 3906 */ 3907 static void 3908 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3909 uint16_t len) 3910 { 3911 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 3912 3913 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3914 3915 *w = htole16(len + 8); 3916 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3917 BUS_DMASYNC_PREWRITE); 3918 if (idx < IWN_SCHED_WINSZ) { 3919 *(w + IWN_TX_RING_COUNT) = *w; 3920 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3921 BUS_DMASYNC_PREWRITE); 3922 } 3923 } 3924 3925 static void 3926 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3927 uint16_t len) 3928 { 3929 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3930 3931 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3932 3933 *w = htole16(id << 12 | (len + 8)); 3934 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3935 BUS_DMASYNC_PREWRITE); 3936 if (idx < IWN_SCHED_WINSZ) { 3937 *(w + IWN_TX_RING_COUNT) = *w; 3938 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3939 BUS_DMASYNC_PREWRITE); 3940 } 3941 } 3942 3943 #ifdef notyet 3944 static void 3945 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 3946 { 3947 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3948 3949 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3950 3951 *w = (*w & htole16(0xf000)) | htole16(1); 3952 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3953 BUS_DMASYNC_PREWRITE); 3954 if (idx < IWN_SCHED_WINSZ) { 3955 *(w + IWN_TX_RING_COUNT) = *w; 3956 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3957 BUS_DMASYNC_PREWRITE); 3958 } 3959 } 3960 #endif 3961 3962 /* 3963 * Check whether OFDM 11g protection will be enabled for the given rate. 3964 * 3965 * The original driver code only enabled protection for OFDM rates. 3966 * It didn't check to see whether it was operating in 11a or 11bg mode. 3967 */ 3968 static int 3969 iwn_check_rate_needs_protection(struct iwn_softc *sc, 3970 struct ieee80211vap *vap, uint8_t rate) 3971 { 3972 struct ieee80211com *ic = vap->iv_ic; 3973 3974 /* 3975 * Not in 2GHz mode? Then there's no need to enable OFDM 3976 * 11bg protection. 3977 */ 3978 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { 3979 return (0); 3980 } 3981 3982 /* 3983 * 11bg protection not enabled? Then don't use it. 3984 */ 3985 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) 3986 return (0); 3987 3988 /* 3989 * If it's an 11n rate, then for now we enable 3990 * protection. 3991 */ 3992 if (rate & IEEE80211_RATE_MCS) { 3993 return (1); 3994 } 3995 3996 /* 3997 * Do a rate table lookup. If the PHY is CCK, 3998 * don't do protection. 3999 */ 4000 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) 4001 return (0); 4002 4003 /* 4004 * Yup, enable protection. 4005 */ 4006 return (1); 4007 } 4008 4009 /* 4010 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into 4011 * the link quality table that reflects this particular entry. 4012 */ 4013 static int 4014 iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, 4015 uint8_t rate) 4016 { 4017 struct ieee80211_rateset *rs; 4018 int is_11n; 4019 int nr; 4020 int i; 4021 uint8_t cmp_rate; 4022 4023 /* 4024 * Figure out if we're using 11n or not here. 4025 */ 4026 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) 4027 is_11n = 1; 4028 else 4029 is_11n = 0; 4030 4031 /* 4032 * Use the correct rate table. 4033 */ 4034 if (is_11n) { 4035 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4036 nr = ni->ni_htrates.rs_nrates; 4037 } else { 4038 rs = &ni->ni_rates; 4039 nr = rs->rs_nrates; 4040 } 4041 4042 /* 4043 * Find the relevant link quality entry in the table. 4044 */ 4045 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { 4046 /* 4047 * The link quality table index starts at 0 == highest 4048 * rate, so we walk the rate table backwards. 4049 */ 4050 cmp_rate = rs->rs_rates[(nr - 1) - i]; 4051 if (rate & IEEE80211_RATE_MCS) 4052 cmp_rate |= IEEE80211_RATE_MCS; 4053 4054 #if 0 4055 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", 4056 __func__, 4057 i, 4058 nr, 4059 rate, 4060 cmp_rate); 4061 #endif 4062 4063 if (cmp_rate == rate) 4064 return (i); 4065 } 4066 4067 /* Failed? Start at the end */ 4068 return (IWN_MAX_TX_RETRIES - 1); 4069 } 4070 4071 static int 4072 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 4073 { 4074 struct iwn_ops *ops = &sc->ops; 4075 const struct ieee80211_txparam *tp; 4076 struct ieee80211vap *vap = ni->ni_vap; 4077 struct ieee80211com *ic = ni->ni_ic; 4078 struct iwn_node *wn = (void *)ni; 4079 struct iwn_tx_ring *ring; 4080 struct iwn_tx_desc *desc; 4081 struct iwn_tx_data *data; 4082 struct iwn_tx_cmd *cmd; 4083 struct iwn_cmd_data *tx; 4084 struct ieee80211_frame *wh; 4085 struct ieee80211_key *k = NULL; 4086 struct mbuf *m1; 4087 uint32_t flags; 4088 uint16_t qos; 4089 u_int hdrlen; 4090 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4091 uint8_t tid, type; 4092 int ac, i, totlen, error, pad, nsegs = 0, rate; 4093 4094 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4095 4096 wh = mtod(m, struct ieee80211_frame *); 4097 hdrlen = ieee80211_anyhdrsize(wh); 4098 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4099 4100 /* Select EDCA Access Category and TX ring for this frame. */ 4101 if (IEEE80211_QOS_HAS_SEQ(wh)) { 4102 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 4103 tid = qos & IEEE80211_QOS_TID; 4104 } else { 4105 qos = 0; 4106 tid = 0; 4107 } 4108 ac = M_WME_GETAC(m); 4109 if (m->m_flags & M_AMPDU_MPDU) { 4110 uint16_t seqno; 4111 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 4112 4113 if (!IEEE80211_AMPDU_RUNNING(tap)) { 4114 m_freem(m); 4115 return EINVAL; 4116 } 4117 4118 /* 4119 * Queue this frame to the hardware ring that we've 4120 * negotiated AMPDU TX on. 4121 * 4122 * Note that the sequence number must match the TX slot 4123 * being used! 4124 */ 4125 ac = *(int *)tap->txa_private; 4126 seqno = ni->ni_txseqs[tid]; 4127 *(uint16_t *)wh->i_seq = 4128 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 4129 ring = &sc->txq[ac]; 4130 if ((seqno % 256) != ring->cur) { 4131 device_printf(sc->sc_dev, 4132 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", 4133 __func__, 4134 m, 4135 seqno, 4136 seqno % 256, 4137 ring->cur); 4138 } 4139 ni->ni_txseqs[tid]++; 4140 } 4141 ring = &sc->txq[ac]; 4142 desc = &ring->desc[ring->cur]; 4143 data = &ring->data[ring->cur]; 4144 4145 /* Choose a TX rate index. */ 4146 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 4147 if (type == IEEE80211_FC0_TYPE_MGT) 4148 rate = tp->mgmtrate; 4149 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 4150 rate = tp->mcastrate; 4151 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 4152 rate = tp->ucastrate; 4153 else if (m->m_flags & M_EAPOL) 4154 rate = tp->mgmtrate; 4155 else { 4156 /* XXX pass pktlen */ 4157 (void) ieee80211_ratectl_rate(ni, NULL, 0); 4158 rate = ni->ni_txrate; 4159 } 4160 4161 /* Encrypt the frame if need be. */ 4162 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 4163 /* Retrieve key for TX. */ 4164 k = ieee80211_crypto_encap(ni, m); 4165 if (k == NULL) { 4166 m_freem(m); 4167 return ENOBUFS; 4168 } 4169 /* 802.11 header may have moved. */ 4170 wh = mtod(m, struct ieee80211_frame *); 4171 } 4172 totlen = m->m_pkthdr.len; 4173 4174 if (ieee80211_radiotap_active_vap(vap)) { 4175 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4176 4177 tap->wt_flags = 0; 4178 tap->wt_rate = rate; 4179 if (k != NULL) 4180 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 4181 4182 ieee80211_radiotap_tx(vap, m); 4183 } 4184 4185 /* Prepare TX firmware command. */ 4186 cmd = &ring->cmd[ring->cur]; 4187 cmd->code = IWN_CMD_TX_DATA; 4188 cmd->flags = 0; 4189 cmd->qid = ring->qid; 4190 cmd->idx = ring->cur; 4191 4192 tx = (struct iwn_cmd_data *)cmd->data; 4193 /* NB: No need to clear tx, all fields are reinitialized here. */ 4194 tx->scratch = 0; /* clear "scratch" area */ 4195 4196 flags = 0; 4197 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4198 /* Unicast frame, check if an ACK is expected. */ 4199 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 4200 IEEE80211_QOS_ACKPOLICY_NOACK) 4201 flags |= IWN_TX_NEED_ACK; 4202 } 4203 if ((wh->i_fc[0] & 4204 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 4205 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 4206 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 4207 4208 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 4209 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 4210 4211 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 4212 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4213 /* NB: Group frames are sent using CCK in 802.11b/g. */ 4214 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 4215 flags |= IWN_TX_NEED_RTS; 4216 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { 4217 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 4218 flags |= IWN_TX_NEED_CTS; 4219 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 4220 flags |= IWN_TX_NEED_RTS; 4221 } 4222 4223 /* XXX HT protection? */ 4224 4225 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 4226 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4227 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4228 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 4229 flags |= IWN_TX_NEED_PROTECTION; 4230 } else 4231 flags |= IWN_TX_FULL_TXOP; 4232 } 4233 } 4234 4235 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 4236 type != IEEE80211_FC0_TYPE_DATA) 4237 tx->id = sc->broadcast_id; 4238 else 4239 tx->id = wn->id; 4240 4241 if (type == IEEE80211_FC0_TYPE_MGT) { 4242 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4243 4244 /* Tell HW to set timestamp in probe responses. */ 4245 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4246 flags |= IWN_TX_INSERT_TSTAMP; 4247 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4248 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4249 tx->timeout = htole16(3); 4250 else 4251 tx->timeout = htole16(2); 4252 } else 4253 tx->timeout = htole16(0); 4254 4255 if (hdrlen & 3) { 4256 /* First segment length must be a multiple of 4. */ 4257 flags |= IWN_TX_NEED_PADDING; 4258 pad = 4 - (hdrlen & 3); 4259 } else 4260 pad = 0; 4261 4262 tx->len = htole16(totlen); 4263 tx->tid = tid; 4264 tx->rts_ntries = 60; 4265 tx->data_ntries = 15; 4266 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4267 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4268 if (tx->id == sc->broadcast_id) { 4269 /* Group or management frame. */ 4270 tx->linkq = 0; 4271 } else { 4272 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); 4273 flags |= IWN_TX_LINKQ; /* enable MRR */ 4274 } 4275 4276 /* Set physical address of "scratch area". */ 4277 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4278 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4279 4280 /* Copy 802.11 header in TX command. */ 4281 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4282 4283 /* Trim 802.11 header. */ 4284 m_adj(m, hdrlen); 4285 tx->security = 0; 4286 tx->flags = htole32(flags); 4287 4288 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, 4289 m, segs, IWN_MAX_SCATTER - 1, 4290 &nsegs, BUS_DMA_NOWAIT); 4291 if (error != 0) { 4292 if (error != EFBIG) { 4293 device_printf(sc->sc_dev, 4294 "%s: can't map mbuf (error %d)\n", __func__, error); 4295 m_freem(m); 4296 return error; 4297 } 4298 /* Too many DMA segments, linearize mbuf. */ 4299 m1 = m_defrag(m, MB_DONTWAIT); 4300 if (m1 == NULL) { 4301 device_printf(sc->sc_dev, 4302 "%s: could not defrag mbuf\n", __func__); 4303 m_freem(m); 4304 return ENOBUFS; 4305 } 4306 m = m1; 4307 4308 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, 4309 data->map, m, segs, 4310 IWN_MAX_SCATTER - 1, 4311 &nsegs, BUS_DMA_NOWAIT); 4312 if (error != 0) { 4313 device_printf(sc->sc_dev, 4314 "%s: can't map mbuf (error %d)\n", __func__, error); 4315 m_freem(m); 4316 return error; 4317 } 4318 } 4319 4320 data->m = m; 4321 data->ni = ni; 4322 4323 DPRINTF(sc, IWN_DEBUG_XMIT, 4324 "%s: qid %d idx %d len %d nsegs %d rate %04x plcp 0x%08x\n", 4325 __func__, 4326 ring->qid, 4327 ring->cur, 4328 m->m_pkthdr.len, 4329 nsegs, 4330 rate, 4331 tx->rate); 4332 4333 /* Fill TX descriptor. */ 4334 desc->nsegs = 1; 4335 if (m->m_len != 0) 4336 desc->nsegs += nsegs; 4337 /* First DMA segment is used by the TX command. */ 4338 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4339 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4340 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4341 /* Other DMA segments are for data payload. */ 4342 seg = &segs[0]; 4343 for (i = 1; i <= nsegs; i++) { 4344 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4345 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4346 seg->ds_len << 4); 4347 seg++; 4348 } 4349 4350 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4351 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4352 BUS_DMASYNC_PREWRITE); 4353 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4354 BUS_DMASYNC_PREWRITE); 4355 4356 /* Update TX scheduler. */ 4357 if (ring->qid >= sc->firstaggqueue) 4358 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4359 4360 /* Kick TX ring. */ 4361 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4362 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4363 4364 /* Mark TX ring as full if we reach a certain threshold. */ 4365 if (++ring->queued > IWN_TX_RING_HIMARK) 4366 sc->qfullmsk |= 1 << ring->qid; 4367 4368 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4369 4370 return 0; 4371 } 4372 4373 static int 4374 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 4375 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 4376 { 4377 struct iwn_ops *ops = &sc->ops; 4378 // struct ifnet *ifp = sc->sc_ifp; 4379 struct ieee80211vap *vap = ni->ni_vap; 4380 // struct ieee80211com *ic = ifp->if_l2com; 4381 struct iwn_tx_cmd *cmd; 4382 struct iwn_cmd_data *tx; 4383 struct ieee80211_frame *wh; 4384 struct iwn_tx_ring *ring; 4385 struct iwn_tx_desc *desc; 4386 struct iwn_tx_data *data; 4387 struct mbuf *m1; 4388 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4389 uint32_t flags; 4390 u_int hdrlen; 4391 int ac, totlen, error, pad, nsegs = 0, i, rate; 4392 uint8_t type; 4393 4394 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4395 4396 wh = mtod(m, struct ieee80211_frame *); 4397 hdrlen = ieee80211_anyhdrsize(wh); 4398 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4399 4400 ac = params->ibp_pri & 3; 4401 4402 ring = &sc->txq[ac]; 4403 desc = &ring->desc[ring->cur]; 4404 data = &ring->data[ring->cur]; 4405 4406 /* Choose a TX rate. */ 4407 rate = params->ibp_rate0; 4408 totlen = m->m_pkthdr.len; 4409 4410 /* Prepare TX firmware command. */ 4411 cmd = &ring->cmd[ring->cur]; 4412 cmd->code = IWN_CMD_TX_DATA; 4413 cmd->flags = 0; 4414 cmd->qid = ring->qid; 4415 cmd->idx = ring->cur; 4416 4417 tx = (struct iwn_cmd_data *)cmd->data; 4418 /* NB: No need to clear tx, all fields are reinitialized here. */ 4419 tx->scratch = 0; /* clear "scratch" area */ 4420 4421 flags = 0; 4422 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 4423 flags |= IWN_TX_NEED_ACK; 4424 if (params->ibp_flags & IEEE80211_BPF_RTS) { 4425 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4426 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4427 flags &= ~IWN_TX_NEED_RTS; 4428 flags |= IWN_TX_NEED_PROTECTION; 4429 } else 4430 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 4431 } 4432 if (params->ibp_flags & IEEE80211_BPF_CTS) { 4433 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4434 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4435 flags &= ~IWN_TX_NEED_CTS; 4436 flags |= IWN_TX_NEED_PROTECTION; 4437 } else 4438 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 4439 } 4440 if (type == IEEE80211_FC0_TYPE_MGT) { 4441 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4442 4443 /* Tell HW to set timestamp in probe responses. */ 4444 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4445 flags |= IWN_TX_INSERT_TSTAMP; 4446 4447 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4448 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4449 tx->timeout = htole16(3); 4450 else 4451 tx->timeout = htole16(2); 4452 } else 4453 tx->timeout = htole16(0); 4454 4455 if (hdrlen & 3) { 4456 /* First segment length must be a multiple of 4. */ 4457 flags |= IWN_TX_NEED_PADDING; 4458 pad = 4 - (hdrlen & 3); 4459 } else 4460 pad = 0; 4461 4462 if (ieee80211_radiotap_active_vap(vap)) { 4463 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4464 4465 tap->wt_flags = 0; 4466 tap->wt_rate = rate; 4467 4468 ieee80211_radiotap_tx(vap, m); 4469 } 4470 4471 tx->len = htole16(totlen); 4472 tx->tid = 0; 4473 tx->id = sc->broadcast_id; 4474 tx->rts_ntries = params->ibp_try1; 4475 tx->data_ntries = params->ibp_try0; 4476 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4477 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4478 4479 /* Group or management frame. */ 4480 tx->linkq = 0; 4481 4482 /* Set physical address of "scratch area". */ 4483 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4484 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4485 4486 /* Copy 802.11 header in TX command. */ 4487 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4488 4489 /* Trim 802.11 header. */ 4490 m_adj(m, hdrlen); 4491 tx->security = 0; 4492 tx->flags = htole32(flags); 4493 4494 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, 4495 m, segs, 4496 IWN_MAX_SCATTER - 1, 4497 &nsegs, BUS_DMA_NOWAIT); 4498 if (error != 0) { 4499 if (error != EFBIG) { 4500 device_printf(sc->sc_dev, 4501 "%s: can't map mbuf (error %d)\n", __func__, error); 4502 m_freem(m); 4503 return error; 4504 } 4505 /* Too many DMA segments, linearize mbuf. */ 4506 m1 = m_defrag(m, M_NOWAIT); 4507 if (m1 == NULL) { 4508 device_printf(sc->sc_dev, 4509 "%s: could not defrag mbuf\n", __func__); 4510 m_freem(m); 4511 return ENOBUFS; 4512 } 4513 m = m1; 4514 4515 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, 4516 data->map, m, segs, 4517 IWN_MAX_SCATTER - 1, 4518 &nsegs, BUS_DMA_NOWAIT); 4519 if (error != 0) { 4520 device_printf(sc->sc_dev, 4521 "%s: can't map mbuf (error %d)\n", __func__, error); 4522 m_freem(m); 4523 return error; 4524 } 4525 } 4526 4527 data->m = m; 4528 data->ni = ni; 4529 4530 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 4531 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 4532 4533 /* Fill TX descriptor. */ 4534 desc->nsegs = 1; 4535 if (m->m_len != 0) 4536 desc->nsegs += nsegs; 4537 /* First DMA segment is used by the TX command. */ 4538 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4539 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4540 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4541 /* Other DMA segments are for data payload. */ 4542 seg = &segs[0]; 4543 for (i = 1; i <= nsegs; i++) { 4544 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4545 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4546 seg->ds_len << 4); 4547 seg++; 4548 } 4549 4550 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4551 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4552 BUS_DMASYNC_PREWRITE); 4553 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4554 BUS_DMASYNC_PREWRITE); 4555 4556 /* Update TX scheduler. */ 4557 if (ring->qid >= sc->firstaggqueue) 4558 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4559 4560 /* Kick TX ring. */ 4561 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4562 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4563 4564 /* Mark TX ring as full if we reach a certain threshold. */ 4565 if (++ring->queued > IWN_TX_RING_HIMARK) 4566 sc->qfullmsk |= 1 << ring->qid; 4567 4568 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4569 4570 return 0; 4571 } 4572 4573 static int 4574 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 4575 const struct ieee80211_bpf_params *params) 4576 { 4577 struct ieee80211com *ic = ni->ni_ic; 4578 struct ifnet *ifp = ic->ic_ifp; 4579 struct iwn_softc *sc = ifp->if_softc; 4580 int error = 0; 4581 4582 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4583 4584 if ((ifp->if_flags & IFF_RUNNING) == 0) { 4585 ieee80211_free_node(ni); 4586 m_freem(m); 4587 return ENETDOWN; 4588 } 4589 4590 if (params == NULL) { 4591 /* 4592 * Legacy path; interpret frame contents to decide 4593 * precisely how to send the frame. 4594 */ 4595 error = iwn_tx_data(sc, m, ni); 4596 } else { 4597 /* 4598 * Caller supplied explicit parameters to use in 4599 * sending the frame. 4600 */ 4601 error = iwn_tx_data_raw(sc, m, ni, params); 4602 } 4603 if (error != 0) { 4604 /* NB: m is reclaimed on tx failure */ 4605 ieee80211_free_node(ni); 4606 IFNET_STAT_INC(ifp, oerrors, 1); 4607 } 4608 sc->sc_tx_timer = 5; 4609 4610 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4611 4612 return error; 4613 } 4614 4615 static void 4616 iwn_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 4617 { 4618 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 4619 iwn_start_locked(ifp); 4620 } 4621 4622 static void 4623 iwn_start_locked(struct ifnet *ifp) 4624 { 4625 struct iwn_softc *sc = ifp->if_softc; 4626 struct ieee80211_node *ni; 4627 struct mbuf *m; 4628 4629 wlan_assert_serialized(); 4630 4631 if ((ifp->if_flags & IFF_RUNNING) == 0 || 4632 ifq_is_oactive(&ifp->if_snd)) 4633 return; 4634 4635 for (;;) { 4636 if (sc->qfullmsk != 0) { 4637 ifq_set_oactive(&ifp->if_snd); 4638 break; 4639 } 4640 m = ifq_dequeue(&ifp->if_snd); 4641 if (m == NULL) 4642 break; 4643 KKASSERT(M_TRAILINGSPACE(m) >= 0); 4644 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4645 if (iwn_tx_data(sc, m, ni) != 0) { 4646 ieee80211_free_node(ni); 4647 IFNET_STAT_INC(ifp, oerrors, 1); 4648 continue; 4649 } 4650 sc->sc_tx_timer = 5; 4651 } 4652 } 4653 4654 static void 4655 iwn_watchdog_timeout(void *arg) 4656 { 4657 struct iwn_softc *sc = arg; 4658 struct ifnet *ifp = sc->sc_ifp; 4659 struct ieee80211com *ic = ifp->if_l2com; 4660 4661 wlan_serialize_enter(); 4662 4663 KASSERT(ifp->if_flags & IFF_RUNNING, ("not running")); 4664 4665 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4666 4667 if (sc->sc_tx_timer > 0) { 4668 if (--sc->sc_tx_timer == 0) { 4669 if_printf(ifp, "device timeout\n"); 4670 ieee80211_runtask(ic, &sc->sc_reinit_task); 4671 wlan_serialize_exit(); 4672 return; 4673 } 4674 } 4675 callout_reset(&sc->watchdog_to, hz, iwn_watchdog_timeout, sc); 4676 wlan_serialize_exit(); 4677 } 4678 4679 static int 4680 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *ucred) 4681 { 4682 struct iwn_softc *sc = ifp->if_softc; 4683 struct ieee80211com *ic = ifp->if_l2com; 4684 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4685 struct ifreq *ifr = (struct ifreq *) data; 4686 int error = 0, startall = 0, stop = 0; 4687 4688 wlan_assert_serialized(); 4689 4690 switch (cmd) { 4691 case SIOCGIFADDR: 4692 error = ether_ioctl(ifp, cmd, data); 4693 break; 4694 case SIOCSIFFLAGS: 4695 if (ifp->if_flags & IFF_UP) { 4696 if (!(ifp->if_flags & IFF_RUNNING)) { 4697 iwn_init_locked(sc); 4698 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 4699 startall = 1; 4700 else 4701 stop = 1; 4702 } 4703 } else { 4704 if (ifp->if_flags & IFF_RUNNING) 4705 iwn_stop_locked(sc); 4706 } 4707 if (startall) 4708 ieee80211_start_all(ic); 4709 else if (vap != NULL && stop) 4710 ieee80211_stop(vap); 4711 break; 4712 case SIOCGIFMEDIA: 4713 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4714 break; 4715 default: 4716 error = EINVAL; 4717 break; 4718 } 4719 return error; 4720 } 4721 4722 /* 4723 * Send a command to the firmware. 4724 */ 4725 static int 4726 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 4727 { 4728 struct iwn_tx_ring *ring; 4729 struct iwn_tx_desc *desc; 4730 struct iwn_tx_data *data; 4731 struct iwn_tx_cmd *cmd; 4732 struct mbuf *m; 4733 bus_addr_t paddr; 4734 int totlen, error; 4735 int cmd_queue_num; 4736 4737 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4738 4739 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 4740 cmd_queue_num = IWN_PAN_CMD_QUEUE; 4741 else 4742 cmd_queue_num = IWN_CMD_QUEUE_NUM; 4743 4744 ring = &sc->txq[cmd_queue_num]; 4745 desc = &ring->desc[ring->cur]; 4746 data = &ring->data[ring->cur]; 4747 totlen = 4 + size; 4748 4749 if (size > sizeof cmd->data) { 4750 /* Command is too large to fit in a descriptor. */ 4751 if (totlen > MJUMPAGESIZE) 4752 return EINVAL; 4753 m = m_getjcl(MB_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 4754 if (m == NULL) 4755 return ENOMEM; 4756 cmd = mtod(m, struct iwn_tx_cmd *); 4757 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 4758 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 4759 if (error != 0) { 4760 m_freem(m); 4761 return error; 4762 } 4763 data->m = m; 4764 } else { 4765 cmd = &ring->cmd[ring->cur]; 4766 paddr = data->cmd_paddr; 4767 } 4768 4769 cmd->code = code; 4770 cmd->flags = 0; 4771 cmd->qid = ring->qid; 4772 cmd->idx = ring->cur; 4773 memcpy(cmd->data, buf, size); 4774 4775 desc->nsegs = 1; 4776 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 4777 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 4778 4779 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 4780 __func__, iwn_intr_str(cmd->code), cmd->code, 4781 cmd->flags, cmd->qid, cmd->idx); 4782 4783 if (size > sizeof cmd->data) { 4784 bus_dmamap_sync(ring->data_dmat, data->map, 4785 BUS_DMASYNC_PREWRITE); 4786 } else { 4787 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4788 BUS_DMASYNC_PREWRITE); 4789 } 4790 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4791 BUS_DMASYNC_PREWRITE); 4792 4793 /* Kick command ring. */ 4794 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4795 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4796 4797 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4798 4799 return async ? 0 : zsleep(desc, &wlan_global_serializer, 0, "iwncmd", hz); 4800 } 4801 4802 static int 4803 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4804 { 4805 struct iwn4965_node_info hnode; 4806 caddr_t src, dst; 4807 4808 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4809 4810 /* 4811 * We use the node structure for 5000 Series internally (it is 4812 * a superset of the one for 4965AGN). We thus copy the common 4813 * fields before sending the command. 4814 */ 4815 src = (caddr_t)node; 4816 dst = (caddr_t)&hnode; 4817 memcpy(dst, src, 48); 4818 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 4819 memcpy(dst + 48, src + 72, 20); 4820 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 4821 } 4822 4823 static int 4824 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4825 { 4826 4827 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4828 4829 /* Direct mapping. */ 4830 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 4831 } 4832 4833 static int 4834 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 4835 { 4836 #define RV(v) ((v) & IEEE80211_RATE_VAL) 4837 struct iwn_node *wn = (void *)ni; 4838 struct ieee80211_rateset *rs; 4839 struct iwn_cmd_link_quality linkq; 4840 uint8_t txant; 4841 int i, rate, txrate; 4842 int is_11n; 4843 4844 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4845 4846 /* Use the first valid TX antenna. */ 4847 txant = IWN_LSB(sc->txchainmask); 4848 4849 memset(&linkq, 0, sizeof linkq); 4850 linkq.id = wn->id; 4851 linkq.antmsk_1stream = txant; 4852 4853 /* 4854 * The '2 stream' setup is a bit .. odd. 4855 * 4856 * For NICs that support only 1 antenna, default to IWN_ANT_AB or 4857 * the firmware panics (eg Intel 5100.) 4858 * 4859 * For NICs that support two antennas, we use ANT_AB. 4860 * 4861 * For NICs that support three antennas, we use the two that 4862 * wasn't the default one. 4863 * 4864 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict 4865 * this to only one antenna. 4866 */ 4867 4868 /* So - if there's no secondary antenna, assume IWN_ANT_AB */ 4869 4870 /* Default - transmit on the other antennas */ 4871 linkq.antmsk_2stream = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); 4872 4873 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ 4874 if (linkq.antmsk_2stream == 0) 4875 linkq.antmsk_2stream = IWN_ANT_AB; 4876 4877 /* 4878 * If the NIC is a two-stream TX NIC, configure the TX mask to 4879 * the default chainmask 4880 */ 4881 else if (sc->ntxchains == 2) 4882 linkq.antmsk_2stream = sc->txchainmask; 4883 4884 linkq.ampdu_max = 32; /* XXX negotiated? */ 4885 linkq.ampdu_threshold = 3; 4886 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4887 4888 DPRINTF(sc, IWN_DEBUG_XMIT, 4889 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", 4890 __func__, 4891 linkq.antmsk_1stream, 4892 linkq.antmsk_2stream, 4893 sc->ntxchains); 4894 4895 /* 4896 * Are we using 11n rates? Ensure the channel is 4897 * 11n _and_ we have some 11n rates, or don't 4898 * try. 4899 */ 4900 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { 4901 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4902 is_11n = 1; 4903 } else { 4904 rs = &ni->ni_rates; 4905 is_11n = 0; 4906 } 4907 4908 /* Start at highest available bit-rate. */ 4909 /* 4910 * XXX this is all very dirty! 4911 */ 4912 if (is_11n) 4913 txrate = ni->ni_htrates.rs_nrates - 1; 4914 else 4915 txrate = rs->rs_nrates - 1; 4916 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 4917 uint32_t plcp; 4918 4919 if (is_11n) 4920 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; 4921 else 4922 rate = RV(rs->rs_rates[txrate]); 4923 4924 DPRINTF(sc, IWN_DEBUG_XMIT, 4925 "%s: i=%d, txrate=%d, rate=0x%02x\n", 4926 __func__, 4927 i, 4928 txrate, 4929 rate); 4930 4931 /* Do rate -> PLCP config mapping */ 4932 plcp = iwn_rate_to_plcp(sc, ni, rate); 4933 linkq.retry[i] = plcp; 4934 4935 /* 4936 * The mimo field is an index into the table which 4937 * indicates the first index where it and subsequent entries 4938 * will not be using MIMO. 4939 * 4940 * Since we're filling linkq from 0..15 and we're filling 4941 * from the higest MCS rates to the lowest rates, if we 4942 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, 4943 * the next entry.) That way if the next entry is a non-MIMO 4944 * entry, we're already pointing at it. 4945 */ 4946 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 4947 RV(le32toh(plcp)) > 7) 4948 linkq.mimo = i + 1; 4949 4950 /* Next retry at immediate lower bit-rate. */ 4951 if (txrate > 0) 4952 txrate--; 4953 } 4954 4955 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4956 4957 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 4958 #undef RV 4959 } 4960 4961 /* 4962 * Broadcast node is used to send group-addressed and management frames. 4963 */ 4964 static int 4965 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 4966 { 4967 struct iwn_ops *ops = &sc->ops; 4968 struct ifnet *ifp = sc->sc_ifp; 4969 struct ieee80211com *ic = ifp->if_l2com; 4970 struct iwn_node_info node; 4971 struct iwn_cmd_link_quality linkq; 4972 uint8_t txant; 4973 int i, error; 4974 4975 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4976 4977 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 4978 4979 memset(&node, 0, sizeof node); 4980 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 4981 node.id = sc->broadcast_id; 4982 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 4983 if ((error = ops->add_node(sc, &node, async)) != 0) 4984 return error; 4985 4986 /* Use the first valid TX antenna. */ 4987 txant = IWN_LSB(sc->txchainmask); 4988 4989 memset(&linkq, 0, sizeof linkq); 4990 linkq.id = sc->broadcast_id; 4991 linkq.antmsk_1stream = txant; 4992 linkq.antmsk_2stream = IWN_ANT_AB; 4993 linkq.ampdu_max = 64; 4994 linkq.ampdu_threshold = 3; 4995 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4996 4997 /* Use lowest mandatory bit-rate. */ 4998 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 4999 linkq.retry[0] = htole32(0xd); 5000 else 5001 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 5002 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 5003 /* Use same bit-rate for all TX retries. */ 5004 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 5005 linkq.retry[i] = linkq.retry[0]; 5006 } 5007 5008 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5009 5010 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 5011 } 5012 5013 static int 5014 iwn_updateedca(struct ieee80211com *ic) 5015 { 5016 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 5017 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5018 struct iwn_edca_params cmd; 5019 int aci; 5020 5021 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5022 5023 memset(&cmd, 0, sizeof cmd); 5024 cmd.flags = htole32(IWN_EDCA_UPDATE); 5025 for (aci = 0; aci < WME_NUM_AC; aci++) { 5026 const struct wmeParams *ac = 5027 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 5028 cmd.ac[aci].aifsn = ac->wmep_aifsn; 5029 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 5030 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 5031 cmd.ac[aci].txoplimit = 5032 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 5033 } 5034 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5035 5036 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5037 5038 return 0; 5039 #undef IWN_EXP2 5040 } 5041 5042 static void 5043 iwn_update_mcast(struct ifnet *ifp) 5044 { 5045 /* Ignore */ 5046 } 5047 5048 static void 5049 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 5050 { 5051 struct iwn_cmd_led led; 5052 5053 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5054 5055 #if 0 5056 /* XXX don't set LEDs during scan? */ 5057 if (sc->sc_is_scanning) 5058 return; 5059 #endif 5060 5061 /* Clear microcode LED ownership. */ 5062 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 5063 5064 led.which = which; 5065 led.unit = htole32(10000); /* on/off in unit of 100ms */ 5066 led.off = off; 5067 led.on = on; 5068 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 5069 } 5070 5071 /* 5072 * Set the critical temperature at which the firmware will stop the radio 5073 * and notify us. 5074 */ 5075 static int 5076 iwn_set_critical_temp(struct iwn_softc *sc) 5077 { 5078 struct iwn_critical_temp crit; 5079 int32_t temp; 5080 5081 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5082 5083 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 5084 5085 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 5086 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 5087 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 5088 temp = IWN_CTOK(110); 5089 else 5090 temp = 110; 5091 memset(&crit, 0, sizeof crit); 5092 crit.tempR = htole32(temp); 5093 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 5094 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 5095 } 5096 5097 static int 5098 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 5099 { 5100 struct iwn_cmd_timing cmd; 5101 uint64_t val, mod; 5102 5103 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5104 5105 memset(&cmd, 0, sizeof cmd); 5106 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 5107 cmd.bintval = htole16(ni->ni_intval); 5108 cmd.lintval = htole16(10); 5109 5110 /* Compute remaining time until next beacon. */ 5111 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 5112 mod = le64toh(cmd.tstamp) % val; 5113 cmd.binitval = htole32((uint32_t)(val - mod)); 5114 5115 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 5116 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 5117 5118 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 5119 } 5120 5121 static void 5122 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 5123 { 5124 struct ifnet *ifp = sc->sc_ifp; 5125 struct ieee80211com *ic = ifp->if_l2com; 5126 5127 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5128 5129 /* Adjust TX power if need be (delta >= 3 degC). */ 5130 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 5131 __func__, sc->temp, temp); 5132 if (abs(temp - sc->temp) >= 3) { 5133 /* Record temperature of last calibration. */ 5134 sc->temp = temp; 5135 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 5136 } 5137 } 5138 5139 /* 5140 * Set TX power for current channel (each rate has its own power settings). 5141 * This function takes into account the regulatory information from EEPROM, 5142 * the current temperature and the current voltage. 5143 */ 5144 static int 5145 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5146 int async) 5147 { 5148 /* Fixed-point arithmetic division using a n-bit fractional part. */ 5149 #define fdivround(a, b, n) \ 5150 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 5151 /* Linear interpolation. */ 5152 #define interpolate(x, x1, y1, x2, y2, n) \ 5153 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 5154 5155 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 5156 struct iwn_ucode_info *uc = &sc->ucode_info; 5157 struct iwn4965_cmd_txpower cmd; 5158 struct iwn4965_eeprom_chan_samples *chans; 5159 const uint8_t *rf_gain, *dsp_gain; 5160 int32_t vdiff, tdiff; 5161 int i, c, grp, maxpwr; 5162 uint8_t chan; 5163 5164 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5165 /* Retrieve current channel from last RXON. */ 5166 chan = sc->rxon->chan; 5167 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 5168 chan); 5169 5170 memset(&cmd, 0, sizeof cmd); 5171 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 5172 cmd.chan = chan; 5173 5174 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 5175 maxpwr = sc->maxpwr5GHz; 5176 rf_gain = iwn4965_rf_gain_5ghz; 5177 dsp_gain = iwn4965_dsp_gain_5ghz; 5178 } else { 5179 maxpwr = sc->maxpwr2GHz; 5180 rf_gain = iwn4965_rf_gain_2ghz; 5181 dsp_gain = iwn4965_dsp_gain_2ghz; 5182 } 5183 5184 /* Compute voltage compensation. */ 5185 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 5186 if (vdiff > 0) 5187 vdiff *= 2; 5188 if (abs(vdiff) > 2) 5189 vdiff = 0; 5190 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5191 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 5192 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 5193 5194 /* Get channel attenuation group. */ 5195 if (chan <= 20) /* 1-20 */ 5196 grp = 4; 5197 else if (chan <= 43) /* 34-43 */ 5198 grp = 0; 5199 else if (chan <= 70) /* 44-70 */ 5200 grp = 1; 5201 else if (chan <= 124) /* 71-124 */ 5202 grp = 2; 5203 else /* 125-200 */ 5204 grp = 3; 5205 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5206 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 5207 5208 /* Get channel sub-band. */ 5209 for (i = 0; i < IWN_NBANDS; i++) 5210 if (sc->bands[i].lo != 0 && 5211 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 5212 break; 5213 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 5214 return EINVAL; 5215 chans = sc->bands[i].chans; 5216 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5217 "%s: chan %d sub-band=%d\n", __func__, chan, i); 5218 5219 for (c = 0; c < 2; c++) { 5220 uint8_t power, gain, temp; 5221 int maxchpwr, pwr, ridx, idx; 5222 5223 power = interpolate(chan, 5224 chans[0].num, chans[0].samples[c][1].power, 5225 chans[1].num, chans[1].samples[c][1].power, 1); 5226 gain = interpolate(chan, 5227 chans[0].num, chans[0].samples[c][1].gain, 5228 chans[1].num, chans[1].samples[c][1].gain, 1); 5229 temp = interpolate(chan, 5230 chans[0].num, chans[0].samples[c][1].temp, 5231 chans[1].num, chans[1].samples[c][1].temp, 1); 5232 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5233 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 5234 __func__, c, power, gain, temp); 5235 5236 /* Compute temperature compensation. */ 5237 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 5238 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5239 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 5240 __func__, tdiff, sc->temp, temp); 5241 5242 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 5243 /* Convert dBm to half-dBm. */ 5244 maxchpwr = sc->maxpwr[chan] * 2; 5245 if ((ridx / 8) & 1) 5246 maxchpwr -= 6; /* MIMO 2T: -3dB */ 5247 5248 pwr = maxpwr; 5249 5250 /* Adjust TX power based on rate. */ 5251 if ((ridx % 8) == 5) 5252 pwr -= 15; /* OFDM48: -7.5dB */ 5253 else if ((ridx % 8) == 6) 5254 pwr -= 17; /* OFDM54: -8.5dB */ 5255 else if ((ridx % 8) == 7) 5256 pwr -= 20; /* OFDM60: -10dB */ 5257 else 5258 pwr -= 10; /* Others: -5dB */ 5259 5260 /* Do not exceed channel max TX power. */ 5261 if (pwr > maxchpwr) 5262 pwr = maxchpwr; 5263 5264 idx = gain - (pwr - power) - tdiff - vdiff; 5265 if ((ridx / 8) & 1) /* MIMO */ 5266 idx += (int32_t)le32toh(uc->atten[grp][c]); 5267 5268 if (cmd.band == 0) 5269 idx += 9; /* 5GHz */ 5270 if (ridx == IWN_RIDX_MAX) 5271 idx += 5; /* CCK */ 5272 5273 /* Make sure idx stays in a valid range. */ 5274 if (idx < 0) 5275 idx = 0; 5276 else if (idx > IWN4965_MAX_PWR_INDEX) 5277 idx = IWN4965_MAX_PWR_INDEX; 5278 5279 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5280 "%s: Tx chain %d, rate idx %d: power=%d\n", 5281 __func__, c, ridx, idx); 5282 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 5283 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 5284 } 5285 } 5286 5287 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5288 "%s: set tx power for chan %d\n", __func__, chan); 5289 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 5290 5291 #undef interpolate 5292 #undef fdivround 5293 } 5294 5295 static int 5296 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5297 int async) 5298 { 5299 struct iwn5000_cmd_txpower cmd; 5300 5301 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5302 5303 /* 5304 * TX power calibration is handled automatically by the firmware 5305 * for 5000 Series. 5306 */ 5307 memset(&cmd, 0, sizeof cmd); 5308 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 5309 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 5310 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 5311 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 5312 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 5313 } 5314 5315 /* 5316 * Retrieve the maximum RSSI (in dBm) among receivers. 5317 */ 5318 static int 5319 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5320 { 5321 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 5322 uint8_t mask, agc; 5323 int rssi; 5324 5325 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5326 5327 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 5328 agc = (le16toh(phy->agc) >> 7) & 0x7f; 5329 5330 rssi = 0; 5331 if (mask & IWN_ANT_A) 5332 rssi = MAX(rssi, phy->rssi[0]); 5333 if (mask & IWN_ANT_B) 5334 rssi = MAX(rssi, phy->rssi[2]); 5335 if (mask & IWN_ANT_C) 5336 rssi = MAX(rssi, phy->rssi[4]); 5337 5338 DPRINTF(sc, IWN_DEBUG_RECV, 5339 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 5340 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 5341 rssi - agc - IWN_RSSI_TO_DBM); 5342 return rssi - agc - IWN_RSSI_TO_DBM; 5343 } 5344 5345 static int 5346 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5347 { 5348 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 5349 uint8_t agc; 5350 int rssi; 5351 5352 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5353 5354 agc = (le32toh(phy->agc) >> 9) & 0x7f; 5355 5356 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 5357 le16toh(phy->rssi[1]) & 0xff); 5358 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 5359 5360 DPRINTF(sc, IWN_DEBUG_RECV, 5361 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 5362 phy->rssi[0], phy->rssi[1], phy->rssi[2], 5363 rssi - agc - IWN_RSSI_TO_DBM); 5364 return rssi - agc - IWN_RSSI_TO_DBM; 5365 } 5366 5367 /* 5368 * Retrieve the average noise (in dBm) among receivers. 5369 */ 5370 static int 5371 iwn_get_noise(const struct iwn_rx_general_stats *stats) 5372 { 5373 int i, total, nbant, noise; 5374 5375 total = nbant = 0; 5376 for (i = 0; i < 3; i++) { 5377 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 5378 continue; 5379 total += noise; 5380 nbant++; 5381 } 5382 /* There should be at least one antenna but check anyway. */ 5383 return (nbant == 0) ? -127 : (total / nbant) - 107; 5384 } 5385 5386 /* 5387 * Compute temperature (in degC) from last received statistics. 5388 */ 5389 static int 5390 iwn4965_get_temperature(struct iwn_softc *sc) 5391 { 5392 struct iwn_ucode_info *uc = &sc->ucode_info; 5393 int32_t r1, r2, r3, r4, temp; 5394 5395 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5396 5397 r1 = le32toh(uc->temp[0].chan20MHz); 5398 r2 = le32toh(uc->temp[1].chan20MHz); 5399 r3 = le32toh(uc->temp[2].chan20MHz); 5400 r4 = le32toh(sc->rawtemp); 5401 5402 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 5403 return 0; 5404 5405 /* Sign-extend 23-bit R4 value to 32-bit. */ 5406 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 5407 /* Compute temperature in Kelvin. */ 5408 temp = (259 * (r4 - r2)) / (r3 - r1); 5409 temp = (temp * 97) / 100 + 8; 5410 5411 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 5412 IWN_KTOC(temp)); 5413 return IWN_KTOC(temp); 5414 } 5415 5416 static int 5417 iwn5000_get_temperature(struct iwn_softc *sc) 5418 { 5419 int32_t temp; 5420 5421 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5422 5423 /* 5424 * Temperature is not used by the driver for 5000 Series because 5425 * TX power calibration is handled by firmware. 5426 */ 5427 temp = le32toh(sc->rawtemp); 5428 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 5429 temp = (temp / -5) + sc->temp_off; 5430 temp = IWN_KTOC(temp); 5431 } 5432 return temp; 5433 } 5434 5435 /* 5436 * Initialize sensitivity calibration state machine. 5437 */ 5438 static int 5439 iwn_init_sensitivity(struct iwn_softc *sc) 5440 { 5441 struct iwn_ops *ops = &sc->ops; 5442 struct iwn_calib_state *calib = &sc->calib; 5443 uint32_t flags; 5444 int error; 5445 5446 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5447 5448 /* Reset calibration state machine. */ 5449 memset(calib, 0, sizeof (*calib)); 5450 calib->state = IWN_CALIB_STATE_INIT; 5451 calib->cck_state = IWN_CCK_STATE_HIFA; 5452 /* Set initial correlation values. */ 5453 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 5454 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 5455 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 5456 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 5457 calib->cck_x4 = 125; 5458 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 5459 calib->energy_cck = sc->limits->energy_cck; 5460 5461 /* Write initial sensitivity. */ 5462 if ((error = iwn_send_sensitivity(sc)) != 0) 5463 return error; 5464 5465 /* Write initial gains. */ 5466 if ((error = ops->init_gains(sc)) != 0) 5467 return error; 5468 5469 /* Request statistics at each beacon interval. */ 5470 flags = 0; 5471 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 5472 __func__); 5473 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 5474 } 5475 5476 /* 5477 * Collect noise and RSSI statistics for the first 20 beacons received 5478 * after association and use them to determine connected antennas and 5479 * to set differential gains. 5480 */ 5481 static void 5482 iwn_collect_noise(struct iwn_softc *sc, 5483 const struct iwn_rx_general_stats *stats) 5484 { 5485 struct iwn_ops *ops = &sc->ops; 5486 struct iwn_calib_state *calib = &sc->calib; 5487 struct ifnet *ifp = sc->sc_ifp; 5488 struct ieee80211com *ic = ifp->if_l2com; 5489 uint32_t val; 5490 int i; 5491 5492 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5493 5494 /* Accumulate RSSI and noise for all 3 antennas. */ 5495 for (i = 0; i < 3; i++) { 5496 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 5497 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 5498 } 5499 /* NB: We update differential gains only once after 20 beacons. */ 5500 if (++calib->nbeacons < 20) 5501 return; 5502 5503 /* Determine highest average RSSI. */ 5504 val = MAX(calib->rssi[0], calib->rssi[1]); 5505 val = MAX(calib->rssi[2], val); 5506 5507 /* Determine which antennas are connected. */ 5508 sc->chainmask = sc->rxchainmask; 5509 for (i = 0; i < 3; i++) 5510 if (val - calib->rssi[i] > 15 * 20) 5511 sc->chainmask &= ~(1 << i); 5512 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5513 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 5514 __func__, sc->rxchainmask, sc->chainmask); 5515 5516 /* If none of the TX antennas are connected, keep at least one. */ 5517 if ((sc->chainmask & sc->txchainmask) == 0) 5518 sc->chainmask |= IWN_LSB(sc->txchainmask); 5519 5520 (void)ops->set_gains(sc); 5521 calib->state = IWN_CALIB_STATE_RUN; 5522 5523 #ifdef notyet 5524 /* XXX Disable RX chains with no antennas connected. */ 5525 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 5526 if (sc->sc_is_scanning) 5527 device_printf(sc->sc_dev, 5528 "%s: is_scanning set, before RXON\n", 5529 __func__); 5530 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 5531 #endif 5532 5533 /* Enable power-saving mode if requested by user. */ 5534 if (ic->ic_flags & IEEE80211_F_PMGTON) 5535 (void)iwn_set_pslevel(sc, 0, 3, 1); 5536 5537 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5538 5539 } 5540 5541 static int 5542 iwn4965_init_gains(struct iwn_softc *sc) 5543 { 5544 struct iwn_phy_calib_gain cmd; 5545 5546 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5547 5548 memset(&cmd, 0, sizeof cmd); 5549 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5550 /* Differential gains initially set to 0 for all 3 antennas. */ 5551 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5552 "%s: setting initial differential gains\n", __func__); 5553 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5554 } 5555 5556 static int 5557 iwn5000_init_gains(struct iwn_softc *sc) 5558 { 5559 struct iwn_phy_calib cmd; 5560 5561 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5562 5563 memset(&cmd, 0, sizeof cmd); 5564 cmd.code = sc->reset_noise_gain; 5565 cmd.ngroups = 1; 5566 cmd.isvalid = 1; 5567 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5568 "%s: setting initial differential gains\n", __func__); 5569 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5570 } 5571 5572 static int 5573 iwn4965_set_gains(struct iwn_softc *sc) 5574 { 5575 struct iwn_calib_state *calib = &sc->calib; 5576 struct iwn_phy_calib_gain cmd; 5577 int i, delta, noise; 5578 5579 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5580 5581 /* Get minimal noise among connected antennas. */ 5582 noise = INT_MAX; /* NB: There's at least one antenna. */ 5583 for (i = 0; i < 3; i++) 5584 if (sc->chainmask & (1 << i)) 5585 noise = MIN(calib->noise[i], noise); 5586 5587 memset(&cmd, 0, sizeof cmd); 5588 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 5589 /* Set differential gains for connected antennas. */ 5590 for (i = 0; i < 3; i++) { 5591 if (sc->chainmask & (1 << i)) { 5592 /* Compute attenuation (in unit of 1.5dB). */ 5593 delta = (noise - (int32_t)calib->noise[i]) / 30; 5594 /* NB: delta <= 0 */ 5595 /* Limit to [-4.5dB,0]. */ 5596 cmd.gain[i] = MIN(abs(delta), 3); 5597 if (delta < 0) 5598 cmd.gain[i] |= 1 << 2; /* sign bit */ 5599 } 5600 } 5601 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5602 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 5603 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 5604 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5605 } 5606 5607 static int 5608 iwn5000_set_gains(struct iwn_softc *sc) 5609 { 5610 struct iwn_calib_state *calib = &sc->calib; 5611 struct iwn_phy_calib_gain cmd; 5612 int i, ant, div, delta; 5613 5614 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5615 5616 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 5617 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 5618 5619 memset(&cmd, 0, sizeof cmd); 5620 cmd.code = sc->noise_gain; 5621 cmd.ngroups = 1; 5622 cmd.isvalid = 1; 5623 /* Get first available RX antenna as referential. */ 5624 ant = IWN_LSB(sc->rxchainmask); 5625 /* Set differential gains for other antennas. */ 5626 for (i = ant + 1; i < 3; i++) { 5627 if (sc->chainmask & (1 << i)) { 5628 /* The delta is relative to antenna "ant". */ 5629 delta = ((int32_t)calib->noise[ant] - 5630 (int32_t)calib->noise[i]) / div; 5631 /* Limit to [-4.5dB,+4.5dB]. */ 5632 cmd.gain[i - 1] = MIN(abs(delta), 3); 5633 if (delta < 0) 5634 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 5635 } 5636 } 5637 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5638 "setting differential gains Ant B/C: %x/%x (%x)\n", 5639 cmd.gain[0], cmd.gain[1], sc->chainmask); 5640 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5641 } 5642 5643 /* 5644 * Tune RF RX sensitivity based on the number of false alarms detected 5645 * during the last beacon period. 5646 */ 5647 static void 5648 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 5649 { 5650 #define inc(val, inc, max) \ 5651 if ((val) < (max)) { \ 5652 if ((val) < (max) - (inc)) \ 5653 (val) += (inc); \ 5654 else \ 5655 (val) = (max); \ 5656 needs_update = 1; \ 5657 } 5658 #define dec(val, dec, min) \ 5659 if ((val) > (min)) { \ 5660 if ((val) > (min) + (dec)) \ 5661 (val) -= (dec); \ 5662 else \ 5663 (val) = (min); \ 5664 needs_update = 1; \ 5665 } 5666 5667 const struct iwn_sensitivity_limits *limits = sc->limits; 5668 struct iwn_calib_state *calib = &sc->calib; 5669 uint32_t val, rxena, fa; 5670 uint32_t energy[3], energy_min; 5671 uint8_t noise[3], noise_ref; 5672 int i, needs_update = 0; 5673 5674 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5675 5676 /* Check that we've been enabled long enough. */ 5677 if ((rxena = le32toh(stats->general.load)) == 0){ 5678 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 5679 return; 5680 } 5681 5682 /* Compute number of false alarms since last call for OFDM. */ 5683 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5684 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 5685 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5686 5687 /* Save counters values for next call. */ 5688 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 5689 calib->fa_ofdm = le32toh(stats->ofdm.fa); 5690 5691 if (fa > 50 * rxena) { 5692 /* High false alarm count, decrease sensitivity. */ 5693 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5694 "%s: OFDM high false alarm count: %u\n", __func__, fa); 5695 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 5696 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 5697 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 5698 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 5699 5700 } else if (fa < 5 * rxena) { 5701 /* Low false alarm count, increase sensitivity. */ 5702 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5703 "%s: OFDM low false alarm count: %u\n", __func__, fa); 5704 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 5705 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 5706 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 5707 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 5708 } 5709 5710 /* Compute maximum noise among 3 receivers. */ 5711 for (i = 0; i < 3; i++) 5712 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 5713 val = MAX(noise[0], noise[1]); 5714 val = MAX(noise[2], val); 5715 /* Insert it into our samples table. */ 5716 calib->noise_samples[calib->cur_noise_sample] = val; 5717 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 5718 5719 /* Compute maximum noise among last 20 samples. */ 5720 noise_ref = calib->noise_samples[0]; 5721 for (i = 1; i < 20; i++) 5722 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 5723 5724 /* Compute maximum energy among 3 receivers. */ 5725 for (i = 0; i < 3; i++) 5726 energy[i] = le32toh(stats->general.energy[i]); 5727 val = MIN(energy[0], energy[1]); 5728 val = MIN(energy[2], val); 5729 /* Insert it into our samples table. */ 5730 calib->energy_samples[calib->cur_energy_sample] = val; 5731 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 5732 5733 /* Compute minimum energy among last 10 samples. */ 5734 energy_min = calib->energy_samples[0]; 5735 for (i = 1; i < 10; i++) 5736 energy_min = MAX(energy_min, calib->energy_samples[i]); 5737 energy_min += 6; 5738 5739 /* Compute number of false alarms since last call for CCK. */ 5740 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 5741 fa += le32toh(stats->cck.fa) - calib->fa_cck; 5742 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5743 5744 /* Save counters values for next call. */ 5745 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 5746 calib->fa_cck = le32toh(stats->cck.fa); 5747 5748 if (fa > 50 * rxena) { 5749 /* High false alarm count, decrease sensitivity. */ 5750 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5751 "%s: CCK high false alarm count: %u\n", __func__, fa); 5752 calib->cck_state = IWN_CCK_STATE_HIFA; 5753 calib->low_fa = 0; 5754 5755 if (calib->cck_x4 > 160) { 5756 calib->noise_ref = noise_ref; 5757 if (calib->energy_cck > 2) 5758 dec(calib->energy_cck, 2, energy_min); 5759 } 5760 if (calib->cck_x4 < 160) { 5761 calib->cck_x4 = 161; 5762 needs_update = 1; 5763 } else 5764 inc(calib->cck_x4, 3, limits->max_cck_x4); 5765 5766 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 5767 5768 } else if (fa < 5 * rxena) { 5769 /* Low false alarm count, increase sensitivity. */ 5770 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5771 "%s: CCK low false alarm count: %u\n", __func__, fa); 5772 calib->cck_state = IWN_CCK_STATE_LOFA; 5773 calib->low_fa++; 5774 5775 if (calib->cck_state != IWN_CCK_STATE_INIT && 5776 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 5777 calib->low_fa > 100)) { 5778 inc(calib->energy_cck, 2, limits->min_energy_cck); 5779 dec(calib->cck_x4, 3, limits->min_cck_x4); 5780 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 5781 } 5782 } else { 5783 /* Not worth to increase or decrease sensitivity. */ 5784 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5785 "%s: CCK normal false alarm count: %u\n", __func__, fa); 5786 calib->low_fa = 0; 5787 calib->noise_ref = noise_ref; 5788 5789 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 5790 /* Previous interval had many false alarms. */ 5791 dec(calib->energy_cck, 8, energy_min); 5792 } 5793 calib->cck_state = IWN_CCK_STATE_INIT; 5794 } 5795 5796 if (needs_update) 5797 (void)iwn_send_sensitivity(sc); 5798 5799 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5800 5801 #undef dec 5802 #undef inc 5803 } 5804 5805 static int 5806 iwn_send_sensitivity(struct iwn_softc *sc) 5807 { 5808 struct iwn_calib_state *calib = &sc->calib; 5809 struct iwn_enhanced_sensitivity_cmd cmd; 5810 int len; 5811 5812 memset(&cmd, 0, sizeof cmd); 5813 len = sizeof (struct iwn_sensitivity_cmd); 5814 cmd.which = IWN_SENSITIVITY_WORKTBL; 5815 /* OFDM modulation. */ 5816 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 5817 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 5818 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 5819 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 5820 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 5821 cmd.energy_ofdm_th = htole16(62); 5822 /* CCK modulation. */ 5823 cmd.corr_cck_x4 = htole16(calib->cck_x4); 5824 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 5825 cmd.energy_cck = htole16(calib->energy_cck); 5826 /* Barker modulation: use default values. */ 5827 cmd.corr_barker = htole16(190); 5828 cmd.corr_barker_mrc = htole16(390); 5829 5830 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5831 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 5832 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 5833 calib->ofdm_mrc_x4, calib->cck_x4, 5834 calib->cck_mrc_x4, calib->energy_cck); 5835 5836 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 5837 goto send; 5838 /* Enhanced sensitivity settings. */ 5839 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 5840 cmd.ofdm_det_slope_mrc = htole16(668); 5841 cmd.ofdm_det_icept_mrc = htole16(4); 5842 cmd.ofdm_det_slope = htole16(486); 5843 cmd.ofdm_det_icept = htole16(37); 5844 cmd.cck_det_slope_mrc = htole16(853); 5845 cmd.cck_det_icept_mrc = htole16(4); 5846 cmd.cck_det_slope = htole16(476); 5847 cmd.cck_det_icept = htole16(99); 5848 send: 5849 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 5850 } 5851 5852 /* 5853 * Set STA mode power saving level (between 0 and 5). 5854 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 5855 */ 5856 static int 5857 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 5858 { 5859 struct iwn_pmgt_cmd cmd; 5860 const struct iwn_pmgt *pmgt; 5861 uint32_t max, skip_dtim; 5862 uint32_t reg; 5863 int i; 5864 5865 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 5866 "%s: dtim=%d, level=%d, async=%d\n", 5867 __func__, 5868 dtim, 5869 level, 5870 async); 5871 5872 /* Select which PS parameters to use. */ 5873 if (dtim <= 2) 5874 pmgt = &iwn_pmgt[0][level]; 5875 else if (dtim <= 10) 5876 pmgt = &iwn_pmgt[1][level]; 5877 else 5878 pmgt = &iwn_pmgt[2][level]; 5879 5880 memset(&cmd, 0, sizeof cmd); 5881 if (level != 0) /* not CAM */ 5882 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 5883 if (level == 5) 5884 cmd.flags |= htole16(IWN_PS_FAST_PD); 5885 /* Retrieve PCIe Active State Power Management (ASPM). */ 5886 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5887 if (!(reg & 0x1)) /* L0s Entry disabled. */ 5888 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 5889 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 5890 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 5891 5892 if (dtim == 0) { 5893 dtim = 1; 5894 skip_dtim = 0; 5895 } else 5896 skip_dtim = pmgt->skip_dtim; 5897 if (skip_dtim != 0) { 5898 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 5899 max = pmgt->intval[4]; 5900 if (max == (uint32_t)-1) 5901 max = dtim * (skip_dtim + 1); 5902 else if (max > dtim) 5903 max = (max / dtim) * dtim; 5904 } else 5905 max = dtim; 5906 for (i = 0; i < 5; i++) 5907 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 5908 5909 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 5910 level); 5911 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 5912 } 5913 5914 static int 5915 iwn_send_btcoex(struct iwn_softc *sc) 5916 { 5917 struct iwn_bluetooth cmd; 5918 5919 memset(&cmd, 0, sizeof cmd); 5920 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 5921 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 5922 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 5923 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 5924 __func__); 5925 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 5926 } 5927 5928 static int 5929 iwn_send_advanced_btcoex(struct iwn_softc *sc) 5930 { 5931 static const uint32_t btcoex_3wire[12] = { 5932 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 5933 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 5934 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 5935 }; 5936 struct iwn6000_btcoex_config btconfig; 5937 struct iwn2000_btcoex_config btconfig2k; 5938 struct iwn_btcoex_priotable btprio; 5939 struct iwn_btcoex_prot btprot; 5940 int error, i; 5941 uint8_t flags; 5942 5943 memset(&btconfig, 0, sizeof btconfig); 5944 memset(&btconfig2k, 0, sizeof btconfig2k); 5945 5946 flags = IWN_BT_FLAG_COEX6000_MODE_3W << 5947 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 5948 5949 if (sc->base_params->bt_sco_disable) 5950 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; 5951 else 5952 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; 5953 5954 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; 5955 5956 /* Default flags result is 145 as old value */ 5957 5958 /* 5959 * Flags value has to be review. Values must change if we 5960 * which to disable it 5961 */ 5962 if (sc->base_params->bt_session_2) { 5963 btconfig2k.flags = flags; 5964 btconfig2k.max_kill = 5; 5965 btconfig2k.bt3_t7_timer = 1; 5966 btconfig2k.kill_ack = htole32(0xffff0000); 5967 btconfig2k.kill_cts = htole32(0xffff0000); 5968 btconfig2k.sample_time = 2; 5969 btconfig2k.bt3_t2_timer = 0xc; 5970 5971 for (i = 0; i < 12; i++) 5972 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); 5973 btconfig2k.valid = htole16(0xff); 5974 btconfig2k.prio_boost = htole32(0xf0); 5975 DPRINTF(sc, IWN_DEBUG_RESET, 5976 "%s: configuring advanced bluetooth coexistence" 5977 " session 2, flags : 0x%x\n", 5978 __func__, 5979 flags); 5980 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, 5981 sizeof(btconfig2k), 1); 5982 } else { 5983 btconfig.flags = flags; 5984 btconfig.max_kill = 5; 5985 btconfig.bt3_t7_timer = 1; 5986 btconfig.kill_ack = htole32(0xffff0000); 5987 btconfig.kill_cts = htole32(0xffff0000); 5988 btconfig.sample_time = 2; 5989 btconfig.bt3_t2_timer = 0xc; 5990 5991 for (i = 0; i < 12; i++) 5992 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 5993 btconfig.valid = htole16(0xff); 5994 btconfig.prio_boost = 0xf0; 5995 DPRINTF(sc, IWN_DEBUG_RESET, 5996 "%s: configuring advanced bluetooth coexistence," 5997 " flags : 0x%x\n", 5998 __func__, 5999 flags); 6000 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 6001 sizeof(btconfig), 1); 6002 } 6003 6004 6005 if (error != 0) 6006 return error; 6007 6008 memset(&btprio, 0, sizeof btprio); 6009 btprio.calib_init1 = 0x6; 6010 btprio.calib_init2 = 0x7; 6011 btprio.calib_periodic_low1 = 0x2; 6012 btprio.calib_periodic_low2 = 0x3; 6013 btprio.calib_periodic_high1 = 0x4; 6014 btprio.calib_periodic_high2 = 0x5; 6015 btprio.dtim = 0x6; 6016 btprio.scan52 = 0x8; 6017 btprio.scan24 = 0xa; 6018 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 6019 1); 6020 if (error != 0) 6021 return error; 6022 6023 /* Force BT state machine change. */ 6024 memset(&btprot, 0, sizeof btprot); 6025 btprot.open = 1; 6026 btprot.type = 1; 6027 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6028 if (error != 0) 6029 return error; 6030 btprot.open = 0; 6031 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6032 } 6033 6034 static int 6035 iwn5000_runtime_calib(struct iwn_softc *sc) 6036 { 6037 struct iwn5000_calib_config cmd; 6038 6039 memset(&cmd, 0, sizeof cmd); 6040 cmd.ucode.once.enable = 0xffffffff; 6041 cmd.ucode.once.start = IWN5000_CALIB_DC; 6042 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6043 "%s: configuring runtime calibration\n", __func__); 6044 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 6045 } 6046 6047 static int 6048 iwn_config(struct iwn_softc *sc) 6049 { 6050 struct iwn_ops *ops = &sc->ops; 6051 struct ifnet *ifp = sc->sc_ifp; 6052 struct ieee80211com *ic = ifp->if_l2com; 6053 uint32_t txmask; 6054 uint16_t rxchain; 6055 int error; 6056 6057 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6058 6059 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) 6060 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { 6061 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" 6062 " exclusive each together. Review NIC config file. Conf" 6063 " : 0x%08x Flags : 0x%08x \n", __func__, 6064 sc->base_params->calib_need, 6065 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | 6066 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); 6067 return (EINVAL); 6068 } 6069 6070 /* Compute temperature calib if needed. Will be send by send calib */ 6071 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { 6072 error = iwn5000_temp_offset_calib(sc); 6073 if (error != 0) { 6074 device_printf(sc->sc_dev, 6075 "%s: could not set temperature offset\n", __func__); 6076 return (error); 6077 } 6078 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 6079 error = iwn5000_temp_offset_calibv2(sc); 6080 if (error != 0) { 6081 device_printf(sc->sc_dev, 6082 "%s: could not compute temperature offset v2\n", 6083 __func__); 6084 return (error); 6085 } 6086 } 6087 6088 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6089 /* Configure runtime DC calibration. */ 6090 error = iwn5000_runtime_calib(sc); 6091 if (error != 0) { 6092 device_printf(sc->sc_dev, 6093 "%s: could not configure runtime calibration\n", 6094 __func__); 6095 return error; 6096 } 6097 } 6098 6099 /* Configure valid TX chains for >=5000 Series. */ 6100 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 6101 txmask = htole32(sc->txchainmask); 6102 DPRINTF(sc, IWN_DEBUG_RESET, 6103 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 6104 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 6105 sizeof txmask, 0); 6106 if (error != 0) { 6107 device_printf(sc->sc_dev, 6108 "%s: could not configure valid TX chains, " 6109 "error %d\n", __func__, error); 6110 return error; 6111 } 6112 } 6113 6114 /* Configure bluetooth coexistence. */ 6115 error = 0; 6116 6117 /* Configure bluetooth coexistence if needed. */ 6118 if (sc->base_params->bt_mode == IWN_BT_ADVANCED) 6119 error = iwn_send_advanced_btcoex(sc); 6120 if (sc->base_params->bt_mode == IWN_BT_SIMPLE) 6121 error = iwn_send_btcoex(sc); 6122 6123 if (error != 0) { 6124 device_printf(sc->sc_dev, 6125 "%s: could not configure bluetooth coexistence, error %d\n", 6126 __func__, error); 6127 return error; 6128 } 6129 6130 /* Set mode, channel, RX filter and enable RX. */ 6131 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6132 memset(sc->rxon, 0, sizeof (struct iwn_rxon)); 6133 IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp)); 6134 IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp)); 6135 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 6136 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6137 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 6138 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6139 switch (ic->ic_opmode) { 6140 case IEEE80211_M_STA: 6141 sc->rxon->mode = IWN_MODE_STA; 6142 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); 6143 break; 6144 case IEEE80211_M_MONITOR: 6145 sc->rxon->mode = IWN_MODE_MONITOR; 6146 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST | 6147 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 6148 break; 6149 default: 6150 /* Should not get there. */ 6151 break; 6152 } 6153 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ 6154 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ 6155 sc->rxon->ht_single_mask = 0xff; 6156 sc->rxon->ht_dual_mask = 0xff; 6157 sc->rxon->ht_triple_mask = 0xff; 6158 rxchain = 6159 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6160 IWN_RXCHAIN_MIMO_COUNT(2) | 6161 IWN_RXCHAIN_IDLE_COUNT(2); 6162 sc->rxon->rxchain = htole16(rxchain); 6163 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 6164 if (sc->sc_is_scanning) 6165 device_printf(sc->sc_dev, 6166 "%s: is_scanning set, before RXON\n", 6167 __func__); 6168 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0); 6169 if (error != 0) { 6170 device_printf(sc->sc_dev, "%s: RXON command failed\n", 6171 __func__); 6172 return error; 6173 } 6174 6175 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 6176 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 6177 __func__); 6178 return error; 6179 } 6180 6181 /* Configuration has changed, set TX power accordingly. */ 6182 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 6183 device_printf(sc->sc_dev, "%s: could not set TX power\n", 6184 __func__); 6185 return error; 6186 } 6187 6188 if ((error = iwn_set_critical_temp(sc)) != 0) { 6189 device_printf(sc->sc_dev, 6190 "%s: could not set critical temperature\n", __func__); 6191 return error; 6192 } 6193 6194 /* Set power saving level to CAM during initialization. */ 6195 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 6196 device_printf(sc->sc_dev, 6197 "%s: could not set power saving level\n", __func__); 6198 return error; 6199 } 6200 6201 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6202 6203 return 0; 6204 } 6205 6206 /* 6207 * Add an ssid element to a frame. 6208 */ 6209 static uint8_t * 6210 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) 6211 { 6212 *frm++ = IEEE80211_ELEMID_SSID; 6213 *frm++ = len; 6214 memcpy(frm, ssid, len); 6215 return frm + len; 6216 } 6217 6218 static uint16_t 6219 iwn_get_active_dwell_time(struct iwn_softc *sc, 6220 struct ieee80211_channel *c, uint8_t n_probes) 6221 { 6222 /* No channel? Default to 2GHz settings */ 6223 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6224 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 6225 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 6226 } 6227 6228 /* 5GHz dwell time */ 6229 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 6230 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 6231 } 6232 6233 /* 6234 * Limit the total dwell time to 85% of the beacon interval. 6235 * 6236 * Returns the dwell time in milliseconds. 6237 */ 6238 static uint16_t 6239 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 6240 { 6241 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 6242 struct ieee80211vap *vap = NULL; 6243 int bintval = 0; 6244 6245 /* bintval is in TU (1.024mS) */ 6246 if (! TAILQ_EMPTY(&ic->ic_vaps)) { 6247 vap = TAILQ_FIRST(&ic->ic_vaps); 6248 bintval = vap->iv_bss->ni_intval; 6249 } 6250 6251 /* 6252 * If it's non-zero, we should calculate the minimum of 6253 * it and the DWELL_BASE. 6254 * 6255 * XXX Yes, the math should take into account that bintval 6256 * is 1.024mS, not 1mS.. 6257 */ 6258 if (bintval > 0) { 6259 DPRINTF(sc, IWN_DEBUG_SCAN, 6260 "%s: bintval=%d\n", 6261 __func__, 6262 bintval); 6263 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 6264 } 6265 6266 /* No association context? Default */ 6267 return (IWN_PASSIVE_DWELL_BASE); 6268 } 6269 6270 static uint16_t 6271 iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) 6272 { 6273 uint16_t passive; 6274 6275 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6276 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 6277 } else { 6278 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 6279 } 6280 6281 /* Clamp to the beacon interval if we're associated */ 6282 return (iwn_limit_dwell(sc, passive)); 6283 } 6284 6285 static int 6286 iwn_scan(struct iwn_softc *sc) 6287 { 6288 struct ifnet *ifp = sc->sc_ifp; 6289 struct ieee80211com *ic = ifp->if_l2com; 6290 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ 6291 struct ieee80211_node *ni = ss->ss_vap->iv_bss; 6292 struct iwn_scan_hdr *hdr; 6293 struct iwn_cmd_data *tx; 6294 struct iwn_scan_essid *essid; 6295 struct iwn_scan_chan *chan; 6296 struct ieee80211_frame *wh; 6297 struct ieee80211_rateset *rs; 6298 struct ieee80211_channel *c; 6299 uint8_t *buf, *frm; 6300 uint16_t rxchain; 6301 uint8_t txant; 6302 int buflen, error; 6303 int is_active; 6304 uint16_t dwell_active, dwell_passive; 6305 uint32_t extra, scan_service_time; 6306 6307 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6308 6309 /* 6310 * We are absolutely not allowed to send a scan command when another 6311 * scan command is pending. 6312 */ 6313 if (sc->sc_is_scanning) { 6314 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 6315 __func__); 6316 return (EAGAIN); 6317 } 6318 6319 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6320 buf = kmalloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_INTWAIT | M_ZERO); 6321 hdr = (struct iwn_scan_hdr *)buf; 6322 /* 6323 * Move to the next channel if no frames are received within 10ms 6324 * after sending the probe request. 6325 */ 6326 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 6327 hdr->quiet_threshold = htole16(1); /* min # of packets */ 6328 /* 6329 * Max needs to be greater than active and passive and quiet! 6330 * It's also in microseconds! 6331 */ 6332 hdr->max_svc = htole32(250 * 1024); 6333 6334 /* 6335 * Reset scan: interval=100 6336 * Normal scan: interval=becaon interval 6337 * suspend_time: 100 (TU) 6338 * 6339 */ 6340 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; 6341 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); 6342 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ 6343 hdr->pause_svc = htole32(scan_service_time); 6344 6345 /* Select antennas for scanning. */ 6346 rxchain = 6347 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6348 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 6349 IWN_RXCHAIN_DRIVER_FORCE; 6350 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) && 6351 sc->hw_type == IWN_HW_REV_TYPE_4965) { 6352 /* Ant A must be avoided in 5GHz because of an HW bug. */ 6353 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 6354 } else /* Use all available RX antennas. */ 6355 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 6356 hdr->rxchain = htole16(rxchain); 6357 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 6358 6359 tx = (struct iwn_cmd_data *)(hdr + 1); 6360 tx->flags = htole32(IWN_TX_AUTO_SEQ); 6361 tx->id = sc->broadcast_id; 6362 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 6363 6364 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) { 6365 /* Send probe requests at 6Mbps. */ 6366 tx->rate = htole32(0xd); 6367 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 6368 } else { 6369 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 6370 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 6371 sc->rxon->associd && sc->rxon->chan > 14) 6372 tx->rate = htole32(0xd); 6373 else { 6374 /* Send probe requests at 1Mbps. */ 6375 tx->rate = htole32(10 | IWN_RFLAG_CCK); 6376 } 6377 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 6378 } 6379 /* Use the first valid TX antenna. */ 6380 txant = IWN_LSB(sc->txchainmask); 6381 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 6382 6383 /* 6384 * Only do active scanning if we're announcing a probe request 6385 * for a given SSID (or more, if we ever add it to the driver.) 6386 */ 6387 is_active = 0; 6388 6389 /* 6390 * If we're scanning for a specific SSID, add it to the command. 6391 */ 6392 essid = (struct iwn_scan_essid *)(tx + 1); 6393 if (ss->ss_ssid[0].len != 0) { 6394 essid[0].id = IEEE80211_ELEMID_SSID; 6395 essid[0].len = ss->ss_ssid[0].len; 6396 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 6397 } 6398 6399 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", 6400 __func__, 6401 ss->ss_ssid[0].len, 6402 ss->ss_ssid[0].len, 6403 ss->ss_ssid[0].ssid); 6404 6405 if (ss->ss_nssid > 0) 6406 is_active = 1; 6407 6408 /* 6409 * Build a probe request frame. Most of the following code is a 6410 * copy & paste of what is done in net80211. 6411 */ 6412 wh = (struct ieee80211_frame *)(essid + 20); 6413 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 6414 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 6415 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 6416 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 6417 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 6418 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 6419 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 6420 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 6421 6422 frm = (uint8_t *)(wh + 1); 6423 frm = ieee80211_add_ssid(frm, NULL, 0); 6424 frm = ieee80211_add_rates(frm, rs); 6425 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 6426 frm = ieee80211_add_xrates(frm, rs); 6427 if (ic->ic_htcaps & IEEE80211_HTC_HT) 6428 frm = ieee80211_add_htcap(frm, ni); 6429 6430 /* Set length of probe request. */ 6431 tx->len = htole16(frm - (uint8_t *)wh); 6432 6433 /* 6434 * If active scanning is requested but a certain channel is 6435 * marked passive, we can do active scanning if we detect 6436 * transmissions. 6437 * 6438 * There is an issue with some firmware versions that triggers 6439 * a sysassert on a "good CRC threshold" of zero (== disabled), 6440 * on a radar channel even though this means that we should NOT 6441 * send probes. 6442 * 6443 * The "good CRC threshold" is the number of frames that we 6444 * need to receive during our dwell time on a channel before 6445 * sending out probes -- setting this to a huge value will 6446 * mean we never reach it, but at the same time work around 6447 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 6448 * here instead of IWL_GOOD_CRC_TH_DISABLED. 6449 * 6450 * This was fixed in later versions along with some other 6451 * scan changes, and the threshold behaves as a flag in those 6452 * versions. 6453 */ 6454 6455 /* 6456 * If we're doing active scanning, set the crc_threshold 6457 * to a suitable value. This is different to active veruss 6458 * passive scanning depending upon the channel flags; the 6459 * firmware will obey that particular check for us. 6460 */ 6461 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 6462 hdr->crc_threshold = is_active ? 6463 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 6464 else 6465 hdr->crc_threshold = is_active ? 6466 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 6467 6468 c = ic->ic_curchan; 6469 chan = (struct iwn_scan_chan *)frm; 6470 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 6471 chan->flags = 0; 6472 if (ss->ss_nssid > 0) 6473 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 6474 chan->dsp_gain = 0x6e; 6475 6476 /* 6477 * Set the passive/active flag depending upon the channel mode. 6478 * XXX TODO: take the is_active flag into account as well? 6479 */ 6480 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 6481 chan->flags |= htole32(IWN_CHAN_PASSIVE); 6482 else 6483 chan->flags |= htole32(IWN_CHAN_ACTIVE); 6484 6485 /* 6486 * Calculate the active/passive dwell times. 6487 */ 6488 6489 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); 6490 dwell_passive = iwn_get_passive_dwell_time(sc, c); 6491 6492 /* Make sure they're valid */ 6493 if (dwell_passive <= dwell_active) 6494 dwell_passive = dwell_active + 1; 6495 6496 chan->active = htole16(dwell_active); 6497 chan->passive = htole16(dwell_passive); 6498 6499 if (IEEE80211_IS_CHAN_5GHZ(c) && 6500 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6501 chan->rf_gain = 0x3b; 6502 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 6503 chan->rf_gain = 0x3b; 6504 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 6505 chan->rf_gain = 0x28; 6506 } else { 6507 chan->rf_gain = 0x28; 6508 } 6509 6510 DPRINTF(sc, IWN_DEBUG_STATE, 6511 "%s: chan %u flags 0x%x rf_gain 0x%x " 6512 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " 6513 "isactive=%d numssid=%d\n", __func__, 6514 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 6515 dwell_active, dwell_passive, scan_service_time, 6516 hdr->crc_threshold, is_active, ss->ss_nssid); 6517 6518 hdr->nchan++; 6519 chan++; 6520 buflen = (uint8_t *)chan - buf; 6521 hdr->len = htole16(buflen); 6522 6523 if (sc->sc_is_scanning) { 6524 device_printf(sc->sc_dev, 6525 "%s: called with is_scanning set!\n", 6526 __func__); 6527 } 6528 sc->sc_is_scanning = 1; 6529 6530 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 6531 hdr->nchan); 6532 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 6533 kfree(buf, M_DEVBUF); 6534 6535 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6536 6537 return error; 6538 } 6539 6540 static int 6541 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 6542 { 6543 struct iwn_ops *ops = &sc->ops; 6544 struct ifnet *ifp = sc->sc_ifp; 6545 struct ieee80211com *ic = ifp->if_l2com; 6546 struct ieee80211_node *ni = vap->iv_bss; 6547 int error; 6548 6549 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6550 6551 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6552 /* Update adapter configuration. */ 6553 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6554 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6555 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6556 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6557 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6558 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6559 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6560 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6561 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6562 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6563 sc->rxon->cck_mask = 0; 6564 sc->rxon->ofdm_mask = 0x15; 6565 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6566 sc->rxon->cck_mask = 0x03; 6567 sc->rxon->ofdm_mask = 0; 6568 } else { 6569 /* Assume 802.11b/g. */ 6570 sc->rxon->cck_mask = 0x0f; 6571 sc->rxon->ofdm_mask = 0x15; 6572 } 6573 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 6574 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, 6575 sc->rxon->ofdm_mask); 6576 if (sc->sc_is_scanning) 6577 device_printf(sc->sc_dev, 6578 "%s: is_scanning set, before RXON\n", 6579 __func__); 6580 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6581 if (error != 0) { 6582 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 6583 __func__, error); 6584 return error; 6585 } 6586 6587 /* Configuration has changed, set TX power accordingly. */ 6588 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6589 device_printf(sc->sc_dev, 6590 "%s: could not set TX power, error %d\n", __func__, error); 6591 return error; 6592 } 6593 /* 6594 * Reconfiguring RXON clears the firmware nodes table so we must 6595 * add the broadcast node again. 6596 */ 6597 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 6598 device_printf(sc->sc_dev, 6599 "%s: could not add broadcast node, error %d\n", __func__, 6600 error); 6601 return error; 6602 } 6603 6604 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6605 6606 return 0; 6607 } 6608 6609 static int 6610 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 6611 { 6612 struct iwn_ops *ops = &sc->ops; 6613 struct ifnet *ifp = sc->sc_ifp; 6614 struct ieee80211com *ic = ifp->if_l2com; 6615 struct ieee80211_node *ni = vap->iv_bss; 6616 struct iwn_node_info node; 6617 uint32_t htflags = 0; 6618 int error; 6619 6620 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6621 6622 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6623 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 6624 /* Link LED blinks while monitoring. */ 6625 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 6626 return 0; 6627 } 6628 if ((error = iwn_set_timing(sc, ni)) != 0) { 6629 device_printf(sc->sc_dev, 6630 "%s: could not set timing, error %d\n", __func__, error); 6631 return error; 6632 } 6633 6634 /* Update adapter configuration. */ 6635 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 6636 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); 6637 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 6638 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6639 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 6640 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6641 if (ic->ic_flags & IEEE80211_F_SHSLOT) 6642 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 6643 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 6644 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 6645 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 6646 sc->rxon->cck_mask = 0; 6647 sc->rxon->ofdm_mask = 0x15; 6648 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 6649 sc->rxon->cck_mask = 0x03; 6650 sc->rxon->ofdm_mask = 0; 6651 } else { 6652 /* Assume 802.11b/g. */ 6653 sc->rxon->cck_mask = 0x0f; 6654 sc->rxon->ofdm_mask = 0x15; 6655 } 6656 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6657 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 6658 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 6659 switch (ic->ic_curhtprotmode) { 6660 case IEEE80211_HTINFO_OPMODE_HT20PR: 6661 htflags |= IWN_RXON_HT_MODEPURE40; 6662 break; 6663 default: 6664 htflags |= IWN_RXON_HT_MODEMIXED; 6665 break; 6666 } 6667 } 6668 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 6669 htflags |= IWN_RXON_HT_HT40MINUS; 6670 } 6671 sc->rxon->flags |= htole32(htflags); 6672 sc->rxon->filter |= htole32(IWN_FILTER_BSS); 6673 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", 6674 sc->rxon->chan, sc->rxon->flags); 6675 if (sc->sc_is_scanning) 6676 device_printf(sc->sc_dev, 6677 "%s: is_scanning set, before RXON\n", 6678 __func__); 6679 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6680 if (error != 0) { 6681 device_printf(sc->sc_dev, 6682 "%s: could not update configuration, error %d\n", __func__, 6683 error); 6684 return error; 6685 } 6686 6687 /* Configuration has changed, set TX power accordingly. */ 6688 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 6689 device_printf(sc->sc_dev, 6690 "%s: could not set TX power, error %d\n", __func__, error); 6691 return error; 6692 } 6693 6694 /* Fake a join to initialize the TX rate. */ 6695 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 6696 iwn_newassoc(ni, 1); 6697 6698 /* Add BSS node. */ 6699 memset(&node, 0, sizeof node); 6700 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 6701 node.id = IWN_ID_BSS; 6702 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 6703 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 6704 case IEEE80211_HTCAP_SMPS_ENA: 6705 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 6706 break; 6707 case IEEE80211_HTCAP_SMPS_DYNAMIC: 6708 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 6709 break; 6710 } 6711 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 6712 IWN_AMDPU_DENSITY(5)); /* 4us */ 6713 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 6714 node.htflags |= htole32(IWN_NODE_HT40); 6715 } 6716 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 6717 error = ops->add_node(sc, &node, 1); 6718 if (error != 0) { 6719 device_printf(sc->sc_dev, 6720 "%s: could not add BSS node, error %d\n", __func__, error); 6721 return error; 6722 } 6723 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 6724 __func__, node.id); 6725 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 6726 device_printf(sc->sc_dev, 6727 "%s: could not setup link quality for node %d, error %d\n", 6728 __func__, node.id, error); 6729 return error; 6730 } 6731 6732 if ((error = iwn_init_sensitivity(sc)) != 0) { 6733 device_printf(sc->sc_dev, 6734 "%s: could not set sensitivity, error %d\n", __func__, 6735 error); 6736 return error; 6737 } 6738 /* Start periodic calibration timer. */ 6739 sc->calib.state = IWN_CALIB_STATE_ASSOC; 6740 sc->calib_cnt = 0; 6741 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 6742 sc); 6743 6744 /* Link LED always on while associated. */ 6745 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 6746 6747 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6748 6749 return 0; 6750 } 6751 6752 /* 6753 * This function is called by upper layer when an ADDBA request is received 6754 * from another STA and before the ADDBA response is sent. 6755 */ 6756 static int 6757 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 6758 int baparamset, int batimeout, int baseqctl) 6759 { 6760 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 6761 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6762 struct iwn_ops *ops = &sc->ops; 6763 struct iwn_node *wn = (void *)ni; 6764 struct iwn_node_info node; 6765 uint16_t ssn; 6766 uint8_t tid; 6767 int error; 6768 6769 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6770 6771 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 6772 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 6773 6774 memset(&node, 0, sizeof node); 6775 node.id = wn->id; 6776 node.control = IWN_NODE_UPDATE; 6777 node.flags = IWN_FLAG_SET_ADDBA; 6778 node.addba_tid = tid; 6779 node.addba_ssn = htole16(ssn); 6780 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 6781 wn->id, tid, ssn); 6782 error = ops->add_node(sc, &node, 1); 6783 if (error != 0) 6784 return error; 6785 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 6786 #undef MS 6787 } 6788 6789 /* 6790 * This function is called by upper layer on teardown of an HT-immediate 6791 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 6792 */ 6793 static void 6794 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 6795 { 6796 struct ieee80211com *ic = ni->ni_ic; 6797 struct iwn_softc *sc = ic->ic_ifp->if_softc; 6798 struct iwn_ops *ops = &sc->ops; 6799 struct iwn_node *wn = (void *)ni; 6800 struct iwn_node_info node; 6801 uint8_t tid; 6802 6803 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6804 6805 /* XXX: tid as an argument */ 6806 for (tid = 0; tid < WME_NUM_TID; tid++) { 6807 if (&ni->ni_rx_ampdu[tid] == rap) 6808 break; 6809 } 6810 6811 memset(&node, 0, sizeof node); 6812 node.id = wn->id; 6813 node.control = IWN_NODE_UPDATE; 6814 node.flags = IWN_FLAG_SET_DELBA; 6815 node.delba_tid = tid; 6816 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 6817 (void)ops->add_node(sc, &node, 1); 6818 sc->sc_ampdu_rx_stop(ni, rap); 6819 } 6820 6821 static int 6822 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6823 int dialogtoken, int baparamset, int batimeout) 6824 { 6825 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6826 int qid; 6827 6828 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6829 6830 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 6831 if (sc->qid2tap[qid] == NULL) 6832 break; 6833 } 6834 if (qid == sc->ntxqs) { 6835 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 6836 __func__); 6837 return 0; 6838 } 6839 tap->txa_private = kmalloc(sizeof(int), M_DEVBUF, M_INTWAIT); 6840 sc->qid2tap[qid] = tap; 6841 *(int *)tap->txa_private = qid; 6842 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 6843 batimeout); 6844 } 6845 6846 static int 6847 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6848 int code, int baparamset, int batimeout) 6849 { 6850 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6851 int qid = *(int *)tap->txa_private; 6852 uint8_t tid = tap->txa_ac; 6853 int ret; 6854 6855 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6856 6857 if (code == IEEE80211_STATUS_SUCCESS) { 6858 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 6859 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 6860 if (ret != 1) 6861 return ret; 6862 } else { 6863 sc->qid2tap[qid] = NULL; 6864 kfree(tap->txa_private, M_DEVBUF); 6865 tap->txa_private = NULL; 6866 } 6867 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 6868 } 6869 6870 /* 6871 * This function is called by upper layer when an ADDBA response is received 6872 * from another STA. 6873 */ 6874 static int 6875 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 6876 uint8_t tid) 6877 { 6878 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 6879 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6880 struct iwn_ops *ops = &sc->ops; 6881 struct iwn_node *wn = (void *)ni; 6882 struct iwn_node_info node; 6883 int error, qid; 6884 6885 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6886 6887 /* Enable TX for the specified RA/TID. */ 6888 wn->disable_tid &= ~(1 << tid); 6889 memset(&node, 0, sizeof node); 6890 node.id = wn->id; 6891 node.control = IWN_NODE_UPDATE; 6892 node.flags = IWN_FLAG_SET_DISABLE_TID; 6893 node.disable_tid = htole16(wn->disable_tid); 6894 error = ops->add_node(sc, &node, 1); 6895 if (error != 0) 6896 return 0; 6897 6898 if ((error = iwn_nic_lock(sc)) != 0) 6899 return 0; 6900 qid = *(int *)tap->txa_private; 6901 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 6902 __func__, wn->id, tid, tap->txa_start, qid); 6903 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 6904 iwn_nic_unlock(sc); 6905 6906 iwn_set_link_quality(sc, ni); 6907 return 1; 6908 } 6909 6910 static void 6911 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 6912 { 6913 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6914 struct iwn_ops *ops = &sc->ops; 6915 uint8_t tid = tap->txa_ac; 6916 int qid; 6917 6918 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6919 6920 sc->sc_addba_stop(ni, tap); 6921 6922 if (tap->txa_private == NULL) 6923 return; 6924 6925 qid = *(int *)tap->txa_private; 6926 if (sc->txq[qid].queued != 0) 6927 return; 6928 if (iwn_nic_lock(sc) != 0) 6929 return; 6930 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 6931 iwn_nic_unlock(sc); 6932 sc->qid2tap[qid] = NULL; 6933 kfree(tap->txa_private, M_DEVBUF); 6934 tap->txa_private = NULL; 6935 } 6936 6937 static void 6938 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 6939 int qid, uint8_t tid, uint16_t ssn) 6940 { 6941 struct iwn_node *wn = (void *)ni; 6942 6943 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6944 6945 /* Stop TX scheduler while we're changing its configuration. */ 6946 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6947 IWN4965_TXQ_STATUS_CHGACT); 6948 6949 /* Assign RA/TID translation to the queue. */ 6950 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 6951 wn->id << 4 | tid); 6952 6953 /* Enable chain-building mode for the queue. */ 6954 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 6955 6956 /* Set starting sequence number from the ADDBA request. */ 6957 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 6958 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6959 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6960 6961 /* Set scheduler window size. */ 6962 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 6963 IWN_SCHED_WINSZ); 6964 /* Set scheduler frame limit. */ 6965 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 6966 IWN_SCHED_LIMIT << 16); 6967 6968 /* Enable interrupts for the queue. */ 6969 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6970 6971 /* Mark the queue as active. */ 6972 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6973 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 6974 iwn_tid2fifo[tid] << 1); 6975 } 6976 6977 static void 6978 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 6979 { 6980 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6981 6982 /* Stop TX scheduler while we're changing its configuration. */ 6983 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6984 IWN4965_TXQ_STATUS_CHGACT); 6985 6986 /* Set starting sequence number from the ADDBA request. */ 6987 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6988 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6989 6990 /* Disable interrupts for the queue. */ 6991 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6992 6993 /* Mark the queue as inactive. */ 6994 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6995 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 6996 } 6997 6998 static void 6999 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7000 int qid, uint8_t tid, uint16_t ssn) 7001 { 7002 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7003 7004 struct iwn_node *wn = (void *)ni; 7005 7006 /* Stop TX scheduler while we're changing its configuration. */ 7007 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7008 IWN5000_TXQ_STATUS_CHGACT); 7009 7010 /* Assign RA/TID translation to the queue. */ 7011 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 7012 wn->id << 4 | tid); 7013 7014 /* Enable chain-building mode for the queue. */ 7015 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 7016 7017 /* Enable aggregation for the queue. */ 7018 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7019 7020 /* Set starting sequence number from the ADDBA request. */ 7021 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7022 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7023 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7024 7025 /* Set scheduler window size and frame limit. */ 7026 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7027 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7028 7029 /* Enable interrupts for the queue. */ 7030 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7031 7032 /* Mark the queue as active. */ 7033 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7034 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 7035 } 7036 7037 static void 7038 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7039 { 7040 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7041 7042 /* Stop TX scheduler while we're changing its configuration. */ 7043 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7044 IWN5000_TXQ_STATUS_CHGACT); 7045 7046 /* Disable aggregation for the queue. */ 7047 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7048 7049 /* Set starting sequence number from the ADDBA request. */ 7050 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7051 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7052 7053 /* Disable interrupts for the queue. */ 7054 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7055 7056 /* Mark the queue as inactive. */ 7057 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7058 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 7059 } 7060 7061 /* 7062 * Query calibration tables from the initialization firmware. We do this 7063 * only once at first boot. Called from a process context. 7064 */ 7065 static int 7066 iwn5000_query_calibration(struct iwn_softc *sc) 7067 { 7068 struct iwn5000_calib_config cmd; 7069 int error; 7070 7071 memset(&cmd, 0, sizeof cmd); 7072 cmd.ucode.once.enable = 0xffffffff; 7073 cmd.ucode.once.start = 0xffffffff; 7074 cmd.ucode.once.send = 0xffffffff; 7075 cmd.ucode.flags = 0xffffffff; 7076 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 7077 __func__); 7078 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 7079 if (error != 0) 7080 return error; 7081 7082 /* Wait at most two seconds for calibration to complete. */ 7083 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 7084 error = zsleep(sc, &wlan_global_serializer, 0, "iwncal", 2 * hz); 7085 return error; 7086 } 7087 7088 /* 7089 * Send calibration results to the runtime firmware. These results were 7090 * obtained on first boot from the initialization firmware. 7091 */ 7092 static int 7093 iwn5000_send_calibration(struct iwn_softc *sc) 7094 { 7095 int idx, error; 7096 7097 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { 7098 if (!(sc->base_params->calib_need & (1<<idx))) { 7099 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7100 "No need of calib %d\n", 7101 idx); 7102 continue; /* no need for this calib */ 7103 } 7104 if (sc->calibcmd[idx].buf == NULL) { 7105 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7106 "Need calib idx : %d but no available data\n", 7107 idx); 7108 continue; 7109 } 7110 7111 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7112 "send calibration result idx=%d len=%d\n", idx, 7113 sc->calibcmd[idx].len); 7114 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 7115 sc->calibcmd[idx].len, 0); 7116 if (error != 0) { 7117 device_printf(sc->sc_dev, 7118 "%s: could not send calibration result, error %d\n", 7119 __func__, error); 7120 return error; 7121 } 7122 } 7123 return 0; 7124 } 7125 7126 static int 7127 iwn5000_send_wimax_coex(struct iwn_softc *sc) 7128 { 7129 struct iwn5000_wimax_coex wimax; 7130 7131 #ifdef notyet 7132 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 7133 /* Enable WiMAX coexistence for combo adapters. */ 7134 wimax.flags = 7135 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 7136 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 7137 IWN_WIMAX_COEX_STA_TABLE_VALID | 7138 IWN_WIMAX_COEX_ENABLE; 7139 memcpy(wimax.events, iwn6050_wimax_events, 7140 sizeof iwn6050_wimax_events); 7141 } else 7142 #endif 7143 { 7144 /* Disable WiMAX coexistence. */ 7145 wimax.flags = 0; 7146 memset(wimax.events, 0, sizeof wimax.events); 7147 } 7148 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 7149 __func__); 7150 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 7151 } 7152 7153 static int 7154 iwn5000_crystal_calib(struct iwn_softc *sc) 7155 { 7156 struct iwn5000_phy_calib_crystal cmd; 7157 7158 memset(&cmd, 0, sizeof cmd); 7159 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 7160 cmd.ngroups = 1; 7161 cmd.isvalid = 1; 7162 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 7163 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 7164 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 7165 cmd.cap_pin[0], cmd.cap_pin[1]); 7166 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7167 } 7168 7169 static int 7170 iwn5000_temp_offset_calib(struct iwn_softc *sc) 7171 { 7172 struct iwn5000_phy_calib_temp_offset cmd; 7173 7174 memset(&cmd, 0, sizeof cmd); 7175 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7176 cmd.ngroups = 1; 7177 cmd.isvalid = 1; 7178 if (sc->eeprom_temp != 0) 7179 cmd.offset = htole16(sc->eeprom_temp); 7180 else 7181 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 7182 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 7183 le16toh(cmd.offset)); 7184 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7185 } 7186 7187 static int 7188 iwn5000_temp_offset_calibv2(struct iwn_softc *sc) 7189 { 7190 struct iwn5000_phy_calib_temp_offsetv2 cmd; 7191 7192 memset(&cmd, 0, sizeof cmd); 7193 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7194 cmd.ngroups = 1; 7195 cmd.isvalid = 1; 7196 if (sc->eeprom_temp != 0) { 7197 cmd.offset_low = htole16(sc->eeprom_temp); 7198 cmd.offset_high = htole16(sc->eeprom_temp_high); 7199 } else { 7200 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 7201 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 7202 } 7203 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 7204 7205 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7206 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", 7207 le16toh(cmd.offset_low), 7208 le16toh(cmd.offset_high), 7209 le16toh(cmd.burnt_voltage_ref)); 7210 7211 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7212 } 7213 7214 /* 7215 * This function is called after the runtime firmware notifies us of its 7216 * readiness (called in a process context). 7217 */ 7218 static int 7219 iwn4965_post_alive(struct iwn_softc *sc) 7220 { 7221 int error, qid; 7222 7223 if ((error = iwn_nic_lock(sc)) != 0) 7224 return error; 7225 7226 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7227 7228 /* Clear TX scheduler state in SRAM. */ 7229 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7230 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 7231 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 7232 7233 /* Set physical address of TX scheduler rings (1KB aligned). */ 7234 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7235 7236 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7237 7238 /* Disable chain mode for all our 16 queues. */ 7239 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 7240 7241 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 7242 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 7243 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7244 7245 /* Set scheduler window size. */ 7246 iwn_mem_write(sc, sc->sched_base + 7247 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 7248 /* Set scheduler frame limit. */ 7249 iwn_mem_write(sc, sc->sched_base + 7250 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7251 IWN_SCHED_LIMIT << 16); 7252 } 7253 7254 /* Enable interrupts for all our 16 queues. */ 7255 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 7256 /* Identify TX FIFO rings (0-7). */ 7257 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 7258 7259 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7260 for (qid = 0; qid < 7; qid++) { 7261 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 7262 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7263 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 7264 } 7265 iwn_nic_unlock(sc); 7266 return 0; 7267 } 7268 7269 /* 7270 * This function is called after the initialization or runtime firmware 7271 * notifies us of its readiness (called in a process context). 7272 */ 7273 static int 7274 iwn5000_post_alive(struct iwn_softc *sc) 7275 { 7276 int error, qid; 7277 7278 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7279 7280 /* Switch to using ICT interrupt mode. */ 7281 iwn5000_ict_reset(sc); 7282 7283 if ((error = iwn_nic_lock(sc)) != 0){ 7284 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 7285 return error; 7286 } 7287 7288 /* Clear TX scheduler state in SRAM. */ 7289 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7290 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 7291 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 7292 7293 /* Set physical address of TX scheduler rings (1KB aligned). */ 7294 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7295 7296 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7297 7298 /* Enable chain mode for all queues, except command queue. */ 7299 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 7300 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); 7301 else 7302 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 7303 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 7304 7305 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 7306 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 7307 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7308 7309 iwn_mem_write(sc, sc->sched_base + 7310 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 7311 /* Set scheduler window size and frame limit. */ 7312 iwn_mem_write(sc, sc->sched_base + 7313 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7314 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7315 } 7316 7317 /* Enable interrupts for all our 20 queues. */ 7318 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 7319 /* Identify TX FIFO rings (0-7). */ 7320 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 7321 7322 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7323 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { 7324 /* Mark TX rings as active. */ 7325 for (qid = 0; qid < 11; qid++) { 7326 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; 7327 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7328 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7329 } 7330 } else { 7331 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7332 for (qid = 0; qid < 7; qid++) { 7333 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 7334 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7335 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7336 } 7337 } 7338 iwn_nic_unlock(sc); 7339 7340 /* Configure WiMAX coexistence for combo adapters. */ 7341 error = iwn5000_send_wimax_coex(sc); 7342 if (error != 0) { 7343 device_printf(sc->sc_dev, 7344 "%s: could not configure WiMAX coexistence, error %d\n", 7345 __func__, error); 7346 return error; 7347 } 7348 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 7349 /* Perform crystal calibration. */ 7350 error = iwn5000_crystal_calib(sc); 7351 if (error != 0) { 7352 device_printf(sc->sc_dev, 7353 "%s: crystal calibration failed, error %d\n", 7354 __func__, error); 7355 return error; 7356 } 7357 } 7358 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 7359 /* Query calibration from the initialization firmware. */ 7360 if ((error = iwn5000_query_calibration(sc)) != 0) { 7361 device_printf(sc->sc_dev, 7362 "%s: could not query calibration, error %d\n", 7363 __func__, error); 7364 return error; 7365 } 7366 /* 7367 * We have the calibration results now, reboot with the 7368 * runtime firmware (call ourselves recursively!) 7369 */ 7370 iwn_hw_stop(sc); 7371 error = iwn_hw_init(sc); 7372 } else { 7373 /* Send calibration results to runtime firmware. */ 7374 error = iwn5000_send_calibration(sc); 7375 } 7376 7377 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7378 7379 return error; 7380 } 7381 7382 /* 7383 * The firmware boot code is small and is intended to be copied directly into 7384 * the NIC internal memory (no DMA transfer). 7385 */ 7386 static int 7387 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 7388 { 7389 int error, ntries; 7390 7391 size /= sizeof (uint32_t); 7392 7393 if ((error = iwn_nic_lock(sc)) != 0) 7394 return error; 7395 7396 /* Copy microcode image into NIC memory. */ 7397 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 7398 (const uint32_t *)ucode, size); 7399 7400 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 7401 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 7402 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 7403 7404 /* Start boot load now. */ 7405 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 7406 7407 /* Wait for transfer to complete. */ 7408 for (ntries = 0; ntries < 1000; ntries++) { 7409 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 7410 IWN_BSM_WR_CTRL_START)) 7411 break; 7412 DELAY(10); 7413 } 7414 if (ntries == 1000) { 7415 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7416 __func__); 7417 iwn_nic_unlock(sc); 7418 return ETIMEDOUT; 7419 } 7420 7421 /* Enable boot after power up. */ 7422 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 7423 7424 iwn_nic_unlock(sc); 7425 return 0; 7426 } 7427 7428 static int 7429 iwn4965_load_firmware(struct iwn_softc *sc) 7430 { 7431 struct iwn_fw_info *fw = &sc->fw; 7432 struct iwn_dma_info *dma = &sc->fw_dma; 7433 int error; 7434 7435 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 7436 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 7437 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7438 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7439 fw->init.text, fw->init.textsz); 7440 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7441 7442 /* Tell adapter where to find initialization sections. */ 7443 if ((error = iwn_nic_lock(sc)) != 0) 7444 return error; 7445 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7446 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 7447 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7448 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7449 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 7450 iwn_nic_unlock(sc); 7451 7452 /* Load firmware boot code. */ 7453 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 7454 if (error != 0) { 7455 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 7456 __func__); 7457 return error; 7458 } 7459 /* Now press "execute". */ 7460 IWN_WRITE(sc, IWN_RESET, 0); 7461 7462 /* Wait at most one second for first alive notification. */ 7463 if ((error = zsleep(sc, &wlan_global_serializer, 0, "iwninit", hz)) != 0) { 7464 device_printf(sc->sc_dev, 7465 "%s: timeout waiting for adapter to initialize, error %d\n", 7466 __func__, error); 7467 return error; 7468 } 7469 7470 /* Retrieve current temperature for initial TX power calibration. */ 7471 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 7472 sc->temp = iwn4965_get_temperature(sc); 7473 7474 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 7475 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 7476 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7477 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 7478 fw->main.text, fw->main.textsz); 7479 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7480 7481 /* Tell adapter where to find runtime sections. */ 7482 if ((error = iwn_nic_lock(sc)) != 0) 7483 return error; 7484 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 7485 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 7486 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 7487 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 7488 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 7489 IWN_FW_UPDATED | fw->main.textsz); 7490 iwn_nic_unlock(sc); 7491 7492 return 0; 7493 } 7494 7495 static int 7496 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 7497 const uint8_t *section, int size) 7498 { 7499 struct iwn_dma_info *dma = &sc->fw_dma; 7500 int error; 7501 7502 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7503 7504 /* Copy firmware section into pre-allocated DMA-safe memory. */ 7505 memcpy(dma->vaddr, section, size); 7506 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 7507 7508 if ((error = iwn_nic_lock(sc)) != 0) 7509 return error; 7510 7511 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7512 IWN_FH_TX_CONFIG_DMA_PAUSE); 7513 7514 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 7515 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 7516 IWN_LOADDR(dma->paddr)); 7517 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 7518 IWN_HIADDR(dma->paddr) << 28 | size); 7519 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 7520 IWN_FH_TXBUF_STATUS_TBNUM(1) | 7521 IWN_FH_TXBUF_STATUS_TBIDX(1) | 7522 IWN_FH_TXBUF_STATUS_TFBD_VALID); 7523 7524 /* Kick Flow Handler to start DMA transfer. */ 7525 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 7526 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 7527 7528 iwn_nic_unlock(sc); 7529 7530 /* Wait at most five seconds for FH DMA transfer to complete. */ 7531 return zsleep(sc, &wlan_global_serializer, 0, "iwninit", 5 * hz); 7532 } 7533 7534 static int 7535 iwn5000_load_firmware(struct iwn_softc *sc) 7536 { 7537 struct iwn_fw_part *fw; 7538 int error; 7539 7540 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7541 7542 /* Load the initialization firmware on first boot only. */ 7543 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 7544 &sc->fw.main : &sc->fw.init; 7545 7546 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 7547 fw->text, fw->textsz); 7548 if (error != 0) { 7549 device_printf(sc->sc_dev, 7550 "%s: could not load firmware %s section, error %d\n", 7551 __func__, ".text", error); 7552 return error; 7553 } 7554 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 7555 fw->data, fw->datasz); 7556 if (error != 0) { 7557 device_printf(sc->sc_dev, 7558 "%s: could not load firmware %s section, error %d\n", 7559 __func__, ".data", error); 7560 return error; 7561 } 7562 7563 /* Now press "execute". */ 7564 IWN_WRITE(sc, IWN_RESET, 0); 7565 return 0; 7566 } 7567 7568 /* 7569 * Extract text and data sections from a legacy firmware image. 7570 */ 7571 static int 7572 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 7573 { 7574 const uint32_t *ptr; 7575 size_t hdrlen = 24; 7576 uint32_t rev; 7577 7578 ptr = (const uint32_t *)fw->data; 7579 rev = le32toh(*ptr++); 7580 7581 /* Check firmware API version. */ 7582 if (IWN_FW_API(rev) <= 1) { 7583 device_printf(sc->sc_dev, 7584 "%s: bad firmware, need API version >=2\n", __func__); 7585 return EINVAL; 7586 } 7587 if (IWN_FW_API(rev) >= 3) { 7588 /* Skip build number (version 2 header). */ 7589 hdrlen += 4; 7590 ptr++; 7591 } 7592 if (fw->size < hdrlen) { 7593 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7594 __func__, fw->size); 7595 return EINVAL; 7596 } 7597 fw->main.textsz = le32toh(*ptr++); 7598 fw->main.datasz = le32toh(*ptr++); 7599 fw->init.textsz = le32toh(*ptr++); 7600 fw->init.datasz = le32toh(*ptr++); 7601 fw->boot.textsz = le32toh(*ptr++); 7602 7603 /* Check that all firmware sections fit. */ 7604 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 7605 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 7606 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7607 __func__, fw->size); 7608 return EINVAL; 7609 } 7610 7611 /* Get pointers to firmware sections. */ 7612 fw->main.text = (const uint8_t *)ptr; 7613 fw->main.data = fw->main.text + fw->main.textsz; 7614 fw->init.text = fw->main.data + fw->main.datasz; 7615 fw->init.data = fw->init.text + fw->init.textsz; 7616 fw->boot.text = fw->init.data + fw->init.datasz; 7617 return 0; 7618 } 7619 7620 /* 7621 * Extract text and data sections from a TLV firmware image. 7622 */ 7623 static int 7624 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 7625 uint16_t alt) 7626 { 7627 const struct iwn_fw_tlv_hdr *hdr; 7628 const struct iwn_fw_tlv *tlv; 7629 const uint8_t *ptr, *end; 7630 uint64_t altmask; 7631 uint32_t len, tmp; 7632 7633 if (fw->size < sizeof (*hdr)) { 7634 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7635 __func__, fw->size); 7636 return EINVAL; 7637 } 7638 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 7639 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 7640 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 7641 __func__, le32toh(hdr->signature)); 7642 return EINVAL; 7643 } 7644 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 7645 le32toh(hdr->build)); 7646 7647 /* 7648 * Select the closest supported alternative that is less than 7649 * or equal to the specified one. 7650 */ 7651 altmask = le64toh(hdr->altmask); 7652 while (alt > 0 && !(altmask & (1ULL << alt))) 7653 alt--; /* Downgrade. */ 7654 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 7655 7656 ptr = (const uint8_t *)(hdr + 1); 7657 end = (const uint8_t *)(fw->data + fw->size); 7658 7659 /* Parse type-length-value fields. */ 7660 while (ptr + sizeof (*tlv) <= end) { 7661 tlv = (const struct iwn_fw_tlv *)ptr; 7662 len = le32toh(tlv->len); 7663 7664 ptr += sizeof (*tlv); 7665 if (ptr + len > end) { 7666 device_printf(sc->sc_dev, 7667 "%s: firmware too short: %zu bytes\n", __func__, 7668 fw->size); 7669 return EINVAL; 7670 } 7671 /* Skip other alternatives. */ 7672 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 7673 goto next; 7674 7675 switch (le16toh(tlv->type)) { 7676 case IWN_FW_TLV_MAIN_TEXT: 7677 fw->main.text = ptr; 7678 fw->main.textsz = len; 7679 break; 7680 case IWN_FW_TLV_MAIN_DATA: 7681 fw->main.data = ptr; 7682 fw->main.datasz = len; 7683 break; 7684 case IWN_FW_TLV_INIT_TEXT: 7685 fw->init.text = ptr; 7686 fw->init.textsz = len; 7687 break; 7688 case IWN_FW_TLV_INIT_DATA: 7689 fw->init.data = ptr; 7690 fw->init.datasz = len; 7691 break; 7692 case IWN_FW_TLV_BOOT_TEXT: 7693 fw->boot.text = ptr; 7694 fw->boot.textsz = len; 7695 break; 7696 case IWN_FW_TLV_ENH_SENS: 7697 if (!len) 7698 sc->sc_flags |= IWN_FLAG_ENH_SENS; 7699 break; 7700 case IWN_FW_TLV_PHY_CALIB: 7701 tmp = le32toh(*ptr); 7702 if (tmp < 253) { 7703 sc->reset_noise_gain = tmp; 7704 sc->noise_gain = tmp + 1; 7705 } 7706 break; 7707 case IWN_FW_TLV_PAN: 7708 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; 7709 DPRINTF(sc, IWN_DEBUG_RESET, 7710 "PAN Support found: %d\n", 1); 7711 break; 7712 case IWN_FW_TLV_FLAGS: 7713 if (len < sizeof(uint32_t)) 7714 break; 7715 if (len % sizeof(uint32_t)) 7716 break; 7717 sc->tlv_feature_flags = le32toh(*ptr); 7718 DPRINTF(sc, IWN_DEBUG_RESET, 7719 "%s: feature: 0x%08x\n", 7720 __func__, 7721 sc->tlv_feature_flags); 7722 break; 7723 case IWN_FW_TLV_PBREQ_MAXLEN: 7724 case IWN_FW_TLV_RUNT_EVTLOG_PTR: 7725 case IWN_FW_TLV_RUNT_EVTLOG_SIZE: 7726 case IWN_FW_TLV_RUNT_ERRLOG_PTR: 7727 case IWN_FW_TLV_INIT_EVTLOG_PTR: 7728 case IWN_FW_TLV_INIT_EVTLOG_SIZE: 7729 case IWN_FW_TLV_INIT_ERRLOG_PTR: 7730 case IWN_FW_TLV_WOWLAN_INST: 7731 case IWN_FW_TLV_WOWLAN_DATA: 7732 DPRINTF(sc, IWN_DEBUG_RESET, 7733 "TLV type %d recognized but not handled\n", 7734 le16toh(tlv->type)); 7735 break; 7736 default: 7737 DPRINTF(sc, IWN_DEBUG_RESET, 7738 "TLV type %d not handled\n", le16toh(tlv->type)); 7739 break; 7740 } 7741 next: /* TLV fields are 32-bit aligned. */ 7742 ptr += (len + 3) & ~3; 7743 } 7744 return 0; 7745 } 7746 7747 static int 7748 iwn_read_firmware(struct iwn_softc *sc) 7749 { 7750 struct iwn_fw_info *fw = &sc->fw; 7751 int error; 7752 7753 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7754 7755 wlan_assert_serialized(); 7756 memset(fw, 0, sizeof (*fw)); 7757 7758 /* 7759 * Read firmware image from filesystem. The firmware can block 7760 * in a taskq and deadlock against our serializer so unlock 7761 * while we do tihs. 7762 */ 7763 wlan_serialize_exit(); 7764 sc->fw_fp = firmware_get(sc->fwname); 7765 wlan_serialize_enter(); 7766 if (sc->fw_fp == NULL) { 7767 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 7768 __func__, sc->fwname); 7769 return EINVAL; 7770 } 7771 7772 fw->size = sc->fw_fp->datasize; 7773 fw->data = (const uint8_t *)sc->fw_fp->data; 7774 if (fw->size < sizeof (uint32_t)) { 7775 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 7776 __func__, fw->size); 7777 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7778 sc->fw_fp = NULL; 7779 return EINVAL; 7780 } 7781 7782 /* Retrieve text and data sections. */ 7783 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 7784 error = iwn_read_firmware_leg(sc, fw); 7785 else 7786 error = iwn_read_firmware_tlv(sc, fw, 1); 7787 if (error != 0) { 7788 device_printf(sc->sc_dev, 7789 "%s: could not read firmware sections, error %d\n", 7790 __func__, error); 7791 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7792 sc->fw_fp = NULL; 7793 return error; 7794 } 7795 7796 /* Make sure text and data sections fit in hardware memory. */ 7797 if (fw->main.textsz > sc->fw_text_maxsz || 7798 fw->main.datasz > sc->fw_data_maxsz || 7799 fw->init.textsz > sc->fw_text_maxsz || 7800 fw->init.datasz > sc->fw_data_maxsz || 7801 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 7802 (fw->boot.textsz & 3) != 0) { 7803 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 7804 __func__); 7805 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7806 sc->fw_fp = NULL; 7807 return EINVAL; 7808 } 7809 7810 /* We can proceed with loading the firmware. */ 7811 return 0; 7812 } 7813 7814 static int 7815 iwn_clock_wait(struct iwn_softc *sc) 7816 { 7817 int ntries; 7818 7819 /* Set "initialization complete" bit. */ 7820 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 7821 7822 /* Wait for clock stabilization. */ 7823 for (ntries = 0; ntries < 2500; ntries++) { 7824 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 7825 return 0; 7826 DELAY(10); 7827 } 7828 device_printf(sc->sc_dev, 7829 "%s: timeout waiting for clock stabilization\n", __func__); 7830 return ETIMEDOUT; 7831 } 7832 7833 static int 7834 iwn_apm_init(struct iwn_softc *sc) 7835 { 7836 uint32_t reg; 7837 int error; 7838 7839 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7840 7841 /* Disable L0s exit timer (NMI bug workaround). */ 7842 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 7843 /* Don't wait for ICH L0s (ICH bug workaround). */ 7844 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 7845 7846 /* Set FH wait threshold to max (HW bug under stress workaround). */ 7847 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 7848 7849 /* Enable HAP INTA to move adapter from L1a to L0s. */ 7850 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 7851 7852 /* Retrieve PCIe Active State Power Management (ASPM). */ 7853 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 7854 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 7855 if (reg & 0x02) /* L1 Entry enabled. */ 7856 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 7857 else 7858 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 7859 7860 if (sc->base_params->pll_cfg_val) 7861 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); 7862 7863 /* Wait for clock stabilization before accessing prph. */ 7864 if ((error = iwn_clock_wait(sc)) != 0) 7865 return error; 7866 7867 if ((error = iwn_nic_lock(sc)) != 0) 7868 return error; 7869 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 7870 /* Enable DMA and BSM (Bootstrap State Machine). */ 7871 iwn_prph_write(sc, IWN_APMG_CLK_EN, 7872 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 7873 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 7874 } else { 7875 /* Enable DMA. */ 7876 iwn_prph_write(sc, IWN_APMG_CLK_EN, 7877 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 7878 } 7879 DELAY(20); 7880 /* Disable L1-Active. */ 7881 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 7882 iwn_nic_unlock(sc); 7883 7884 return 0; 7885 } 7886 7887 static void 7888 iwn_apm_stop_master(struct iwn_softc *sc) 7889 { 7890 int ntries; 7891 7892 /* Stop busmaster DMA activity. */ 7893 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 7894 for (ntries = 0; ntries < 100; ntries++) { 7895 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 7896 return; 7897 DELAY(10); 7898 } 7899 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 7900 } 7901 7902 static void 7903 iwn_apm_stop(struct iwn_softc *sc) 7904 { 7905 iwn_apm_stop_master(sc); 7906 7907 /* Reset the entire device. */ 7908 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 7909 DELAY(10); 7910 /* Clear "initialization complete" bit. */ 7911 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 7912 } 7913 7914 static int 7915 iwn4965_nic_config(struct iwn_softc *sc) 7916 { 7917 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7918 7919 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 7920 /* 7921 * I don't believe this to be correct but this is what the 7922 * vendor driver is doing. Probably the bits should not be 7923 * shifted in IWN_RFCFG_*. 7924 */ 7925 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7926 IWN_RFCFG_TYPE(sc->rfcfg) | 7927 IWN_RFCFG_STEP(sc->rfcfg) | 7928 IWN_RFCFG_DASH(sc->rfcfg)); 7929 } 7930 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7931 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 7932 return 0; 7933 } 7934 7935 static int 7936 iwn5000_nic_config(struct iwn_softc *sc) 7937 { 7938 uint32_t tmp; 7939 int error; 7940 7941 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7942 7943 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 7944 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7945 IWN_RFCFG_TYPE(sc->rfcfg) | 7946 IWN_RFCFG_STEP(sc->rfcfg) | 7947 IWN_RFCFG_DASH(sc->rfcfg)); 7948 } 7949 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7950 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 7951 7952 if ((error = iwn_nic_lock(sc)) != 0) 7953 return error; 7954 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 7955 7956 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 7957 /* 7958 * Select first Switching Voltage Regulator (1.32V) to 7959 * solve a stability issue related to noisy DC2DC line 7960 * in the silicon of 1000 Series. 7961 */ 7962 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 7963 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 7964 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 7965 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 7966 } 7967 iwn_nic_unlock(sc); 7968 7969 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 7970 /* Use internal power amplifier only. */ 7971 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 7972 } 7973 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { 7974 /* Indicate that ROM calibration version is >=6. */ 7975 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 7976 } 7977 if (sc->base_params->additional_gp_drv_bit) 7978 IWN_SETBITS(sc, IWN_GP_DRIVER, 7979 sc->base_params->additional_gp_drv_bit); 7980 return 0; 7981 } 7982 7983 /* 7984 * Take NIC ownership over Intel Active Management Technology (AMT). 7985 */ 7986 static int 7987 iwn_hw_prepare(struct iwn_softc *sc) 7988 { 7989 int ntries; 7990 7991 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7992 7993 /* Check if hardware is ready. */ 7994 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 7995 for (ntries = 0; ntries < 5; ntries++) { 7996 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 7997 IWN_HW_IF_CONFIG_NIC_READY) 7998 return 0; 7999 DELAY(10); 8000 } 8001 8002 /* Hardware not ready, force into ready state. */ 8003 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 8004 for (ntries = 0; ntries < 15000; ntries++) { 8005 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 8006 IWN_HW_IF_CONFIG_PREPARE_DONE)) 8007 break; 8008 DELAY(10); 8009 } 8010 if (ntries == 15000) 8011 return ETIMEDOUT; 8012 8013 /* Hardware should be ready now. */ 8014 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8015 for (ntries = 0; ntries < 5; ntries++) { 8016 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8017 IWN_HW_IF_CONFIG_NIC_READY) 8018 return 0; 8019 DELAY(10); 8020 } 8021 return ETIMEDOUT; 8022 } 8023 8024 static int 8025 iwn_hw_init(struct iwn_softc *sc) 8026 { 8027 struct iwn_ops *ops = &sc->ops; 8028 int error, chnl, qid; 8029 8030 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8031 8032 /* Clear pending interrupts. */ 8033 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8034 8035 if ((error = iwn_apm_init(sc)) != 0) { 8036 device_printf(sc->sc_dev, 8037 "%s: could not power ON adapter, error %d\n", __func__, 8038 error); 8039 return error; 8040 } 8041 8042 /* Select VMAIN power source. */ 8043 if ((error = iwn_nic_lock(sc)) != 0) 8044 return error; 8045 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 8046 iwn_nic_unlock(sc); 8047 8048 /* Perform adapter-specific initialization. */ 8049 if ((error = ops->nic_config(sc)) != 0) 8050 return error; 8051 8052 /* Initialize RX ring. */ 8053 if ((error = iwn_nic_lock(sc)) != 0) 8054 return error; 8055 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 8056 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 8057 /* Set physical address of RX ring (256-byte aligned). */ 8058 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 8059 /* Set physical address of RX status (16-byte aligned). */ 8060 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 8061 /* Enable RX. */ 8062 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 8063 IWN_FH_RX_CONFIG_ENA | 8064 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 8065 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 8066 IWN_FH_RX_CONFIG_SINGLE_FRAME | 8067 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 8068 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 8069 iwn_nic_unlock(sc); 8070 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 8071 8072 if ((error = iwn_nic_lock(sc)) != 0) 8073 return error; 8074 8075 /* Initialize TX scheduler. */ 8076 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8077 8078 /* Set physical address of "keep warm" page (16-byte aligned). */ 8079 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 8080 8081 /* Initialize TX rings. */ 8082 for (qid = 0; qid < sc->ntxqs; qid++) { 8083 struct iwn_tx_ring *txq = &sc->txq[qid]; 8084 8085 /* Set physical address of TX ring (256-byte aligned). */ 8086 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 8087 txq->desc_dma.paddr >> 8); 8088 } 8089 iwn_nic_unlock(sc); 8090 8091 /* Enable DMA channels. */ 8092 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8093 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 8094 IWN_FH_TX_CONFIG_DMA_ENA | 8095 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 8096 } 8097 8098 /* Clear "radio off" and "commands blocked" bits. */ 8099 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8100 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 8101 8102 /* Clear pending interrupts. */ 8103 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8104 /* Enable interrupt coalescing. */ 8105 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 8106 /* Enable interrupts. */ 8107 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8108 8109 /* _Really_ make sure "radio off" bit is cleared! */ 8110 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8111 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8112 8113 /* Enable shadow registers. */ 8114 if (sc->base_params->shadow_reg_enable) 8115 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 8116 8117 if ((error = ops->load_firmware(sc)) != 0) { 8118 device_printf(sc->sc_dev, 8119 "%s: could not load firmware, error %d\n", __func__, 8120 error); 8121 return error; 8122 } 8123 /* Wait at most one second for firmware alive notification. */ 8124 if ((error = zsleep(sc, &wlan_global_serializer, 0, "iwninit", hz)) != 0) { 8125 device_printf(sc->sc_dev, 8126 "%s: timeout waiting for adapter to initialize, error %d\n", 8127 __func__, error); 8128 return error; 8129 } 8130 /* Do post-firmware initialization. */ 8131 8132 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8133 8134 return ops->post_alive(sc); 8135 } 8136 8137 static void 8138 iwn_hw_stop(struct iwn_softc *sc) 8139 { 8140 int chnl, qid, ntries; 8141 8142 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8143 8144 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 8145 8146 /* Disable interrupts. */ 8147 IWN_WRITE(sc, IWN_INT_MASK, 0); 8148 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8149 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 8150 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8151 8152 /* Make sure we no longer hold the NIC lock. */ 8153 iwn_nic_unlock(sc); 8154 8155 /* Stop TX scheduler. */ 8156 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8157 8158 /* Stop all DMA channels. */ 8159 if (iwn_nic_lock(sc) == 0) { 8160 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8161 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 8162 for (ntries = 0; ntries < 200; ntries++) { 8163 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 8164 IWN_FH_TX_STATUS_IDLE(chnl)) 8165 break; 8166 DELAY(10); 8167 } 8168 } 8169 iwn_nic_unlock(sc); 8170 } 8171 8172 /* Stop RX ring. */ 8173 iwn_reset_rx_ring(sc, &sc->rxq); 8174 8175 /* Reset all TX rings. */ 8176 for (qid = 0; qid < sc->ntxqs; qid++) 8177 iwn_reset_tx_ring(sc, &sc->txq[qid]); 8178 8179 if (iwn_nic_lock(sc) == 0) { 8180 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 8181 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8182 iwn_nic_unlock(sc); 8183 } 8184 DELAY(5); 8185 /* Power OFF adapter. */ 8186 iwn_apm_stop(sc); 8187 } 8188 8189 static void 8190 iwn_radio_on_task(void *arg0, int pending) 8191 { 8192 struct iwn_softc *sc = arg0; 8193 struct ifnet *ifp; 8194 struct ieee80211com *ic; 8195 struct ieee80211vap *vap; 8196 8197 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8198 8199 wlan_serialize_enter(); 8200 ifp = sc->sc_ifp; 8201 ic = ifp->if_l2com; 8202 vap = TAILQ_FIRST(&ic->ic_vaps); 8203 if (vap != NULL) { 8204 iwn_init_locked(sc); 8205 ieee80211_init(vap); 8206 } 8207 wlan_serialize_exit(); 8208 } 8209 8210 static void 8211 iwn_radio_off_task(void *arg0, int pending) 8212 { 8213 struct iwn_softc *sc = arg0; 8214 struct ifnet *ifp; 8215 struct ieee80211com *ic; 8216 struct ieee80211vap *vap; 8217 8218 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8219 8220 wlan_serialize_enter(); 8221 ifp = sc->sc_ifp; 8222 ic = ifp->if_l2com; 8223 vap = TAILQ_FIRST(&ic->ic_vaps); 8224 iwn_stop_locked(sc); 8225 if (vap != NULL) 8226 ieee80211_stop(vap); 8227 8228 /* Enable interrupts to get RF toggle notification. */ 8229 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8230 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8231 wlan_serialize_exit(); 8232 } 8233 8234 static void 8235 iwn_init_locked(struct iwn_softc *sc) 8236 { 8237 struct ifnet *ifp = sc->sc_ifp; 8238 int error; 8239 8240 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8241 8242 /* 8243 * Make sure we hold the serializer or we will have timing issues 8244 * with the wlan subsystem. 8245 */ 8246 wlan_assert_serialized(); 8247 if ((error = iwn_hw_prepare(sc)) != 0) { 8248 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 8249 __func__, error); 8250 goto fail; 8251 } 8252 8253 /* Initialize interrupt mask to default value. */ 8254 sc->int_mask = IWN_INT_MASK_DEF; 8255 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8256 8257 /* Check that the radio is not disabled by hardware switch. */ 8258 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 8259 device_printf(sc->sc_dev, 8260 "radio is disabled by hardware switch\n"); 8261 /* Enable interrupts to get RF toggle notifications. */ 8262 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8263 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8264 return; 8265 } 8266 8267 /* Read firmware images from the filesystem. */ 8268 if ((error = iwn_read_firmware(sc)) != 0) { 8269 device_printf(sc->sc_dev, 8270 "%s: could not read firmware, error %d\n", __func__, 8271 error); 8272 goto fail; 8273 } 8274 8275 /* Initialize hardware and upload firmware. */ 8276 error = iwn_hw_init(sc); 8277 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8278 sc->fw_fp = NULL; 8279 if (error != 0) { 8280 device_printf(sc->sc_dev, 8281 "%s: could not initialize hardware, error %d\n", __func__, 8282 error); 8283 goto fail; 8284 } 8285 8286 /* Configure adapter now that it is ready. */ 8287 if ((error = iwn_config(sc)) != 0) { 8288 device_printf(sc->sc_dev, 8289 "%s: could not configure device, error %d\n", __func__, 8290 error); 8291 goto fail; 8292 } 8293 8294 ifq_clr_oactive(&ifp->if_snd); 8295 ifp->if_flags |= IFF_RUNNING; 8296 8297 callout_reset(&sc->watchdog_to, hz, iwn_watchdog_timeout, sc); 8298 8299 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8300 8301 return; 8302 8303 fail: iwn_stop_locked(sc); 8304 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 8305 } 8306 8307 static void 8308 iwn_init(void *arg) 8309 { 8310 struct iwn_softc *sc = arg; 8311 struct ifnet *ifp = sc->sc_ifp; 8312 struct ieee80211com *ic = ifp->if_l2com; 8313 8314 wlan_assert_serialized(); 8315 iwn_init_locked(sc); 8316 8317 if (ifp->if_flags & IFF_RUNNING) 8318 ieee80211_start_all(ic); 8319 } 8320 8321 static void 8322 iwn_stop_locked(struct iwn_softc *sc) 8323 { 8324 struct ifnet *ifp = sc->sc_ifp; 8325 8326 sc->sc_is_scanning = 0; 8327 sc->sc_tx_timer = 0; 8328 callout_stop(&sc->watchdog_to); 8329 callout_stop(&sc->calib_to); 8330 ifp->if_flags &= ~IFF_RUNNING; 8331 ifq_clr_oactive(&ifp->if_snd); 8332 8333 /* Power OFF hardware. */ 8334 iwn_hw_stop(sc); 8335 } 8336 8337 /* 8338 * Callback from net80211 to start a scan. 8339 */ 8340 static void 8341 iwn_scan_start(struct ieee80211com *ic) 8342 { 8343 struct ifnet *ifp = ic->ic_ifp; 8344 struct iwn_softc *sc = ifp->if_softc; 8345 8346 /* make the link LED blink while we're scanning */ 8347 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 8348 } 8349 8350 /* 8351 * Callback from net80211 to terminate a scan. 8352 */ 8353 static void 8354 iwn_scan_end(struct ieee80211com *ic) 8355 { 8356 struct ifnet *ifp = ic->ic_ifp; 8357 struct iwn_softc *sc = ifp->if_softc; 8358 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8359 8360 if (vap->iv_state == IEEE80211_S_RUN) { 8361 /* Set link LED to ON status if we are associated */ 8362 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 8363 } 8364 } 8365 8366 /* 8367 * Callback from net80211 to force a channel change. 8368 */ 8369 static void 8370 iwn_set_channel(struct ieee80211com *ic) 8371 { 8372 const struct ieee80211_channel *c = ic->ic_curchan; 8373 struct ifnet *ifp = ic->ic_ifp; 8374 struct iwn_softc *sc = ifp->if_softc; 8375 int error; 8376 8377 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8378 8379 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 8380 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 8381 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 8382 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 8383 8384 /* 8385 * Only need to set the channel in Monitor mode. AP scanning and auth 8386 * are already taken care of by their respective firmware commands. 8387 */ 8388 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 8389 error = iwn_config(sc); 8390 if (error != 0) 8391 device_printf(sc->sc_dev, 8392 "%s: error %d settting channel\n", __func__, error); 8393 } 8394 } 8395 8396 /* 8397 * Callback from net80211 to start scanning of the current channel. 8398 */ 8399 static void 8400 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 8401 { 8402 struct ieee80211vap *vap = ss->ss_vap; 8403 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 8404 int error; 8405 8406 error = iwn_scan(sc); 8407 if (error != 0) 8408 ieee80211_cancel_scan(vap); 8409 } 8410 8411 /* 8412 * Callback from net80211 to handle the minimum dwell time being met. 8413 * The intent is to terminate the scan but we just let the firmware 8414 * notify us when it's finished as we have no safe way to abort it. 8415 */ 8416 static void 8417 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 8418 { 8419 /* NB: don't try to abort scan; wait for firmware to finish */ 8420 } 8421 8422 static void 8423 iwn_hw_reset_task(void *arg0, int pending) 8424 { 8425 struct iwn_softc *sc = arg0; 8426 struct ifnet *ifp; 8427 struct ieee80211com *ic; 8428 8429 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8430 8431 wlan_serialize_enter(); 8432 ifp = sc->sc_ifp; 8433 ic = ifp->if_l2com; 8434 iwn_stop_locked(sc); 8435 iwn_init_locked(sc); 8436 ieee80211_notify_radio(ic, 1); 8437 wlan_serialize_exit(); 8438 } 8439 #ifdef IWN_DEBUG 8440 #define IWN_DESC(x) case x: return #x 8441 #define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 8442 8443 /* 8444 * Translate CSR code to string 8445 */ 8446 static char *iwn_get_csr_string(int csr) 8447 { 8448 switch (csr) { 8449 IWN_DESC(IWN_HW_IF_CONFIG); 8450 IWN_DESC(IWN_INT_COALESCING); 8451 IWN_DESC(IWN_INT); 8452 IWN_DESC(IWN_INT_MASK); 8453 IWN_DESC(IWN_FH_INT); 8454 IWN_DESC(IWN_GPIO_IN); 8455 IWN_DESC(IWN_RESET); 8456 IWN_DESC(IWN_GP_CNTRL); 8457 IWN_DESC(IWN_HW_REV); 8458 IWN_DESC(IWN_EEPROM); 8459 IWN_DESC(IWN_EEPROM_GP); 8460 IWN_DESC(IWN_OTP_GP); 8461 IWN_DESC(IWN_GIO); 8462 IWN_DESC(IWN_GP_UCODE); 8463 IWN_DESC(IWN_GP_DRIVER); 8464 IWN_DESC(IWN_UCODE_GP1); 8465 IWN_DESC(IWN_UCODE_GP2); 8466 IWN_DESC(IWN_LED); 8467 IWN_DESC(IWN_DRAM_INT_TBL); 8468 IWN_DESC(IWN_GIO_CHICKEN); 8469 IWN_DESC(IWN_ANA_PLL); 8470 IWN_DESC(IWN_HW_REV_WA); 8471 IWN_DESC(IWN_DBG_HPET_MEM); 8472 default: 8473 return "UNKNOWN CSR"; 8474 } 8475 } 8476 8477 /* 8478 * This function print firmware register 8479 */ 8480 static void 8481 iwn_debug_register(struct iwn_softc *sc) 8482 { 8483 int i; 8484 static const uint32_t csr_tbl[] = { 8485 IWN_HW_IF_CONFIG, 8486 IWN_INT_COALESCING, 8487 IWN_INT, 8488 IWN_INT_MASK, 8489 IWN_FH_INT, 8490 IWN_GPIO_IN, 8491 IWN_RESET, 8492 IWN_GP_CNTRL, 8493 IWN_HW_REV, 8494 IWN_EEPROM, 8495 IWN_EEPROM_GP, 8496 IWN_OTP_GP, 8497 IWN_GIO, 8498 IWN_GP_UCODE, 8499 IWN_GP_DRIVER, 8500 IWN_UCODE_GP1, 8501 IWN_UCODE_GP2, 8502 IWN_LED, 8503 IWN_DRAM_INT_TBL, 8504 IWN_GIO_CHICKEN, 8505 IWN_ANA_PLL, 8506 IWN_HW_REV_WA, 8507 IWN_DBG_HPET_MEM, 8508 }; 8509 DPRINTF(sc, IWN_DEBUG_REGISTER, 8510 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", 8511 "\n"); 8512 for (i = 0; i < COUNTOF(csr_tbl); i++){ 8513 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", 8514 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); 8515 if ((i+1) % 3 == 0) 8516 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8517 } 8518 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 8519 } 8520 #endif 8521