1 /*- 2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org> 4 * Copyright (c) 2008 Sam Leffler, Errno Consulting 5 * Copyright (c) 2011 Intel Corporation 6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr> 7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 24 * adapters. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_wlan.h" 31 #include "opt_iwn.h" 32 33 #include <sys/param.h> 34 #include <sys/sockio.h> 35 #include <sys/sysctl.h> 36 #include <sys/mbuf.h> 37 #include <sys/kernel.h> 38 #include <sys/socket.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/bus.h> 42 #include <sys/conf.h> 43 #include <sys/rman.h> 44 #include <sys/endian.h> 45 #include <sys/firmware.h> 46 #include <sys/limits.h> 47 #include <sys/module.h> 48 #include <sys/priv.h> 49 #include <sys/queue.h> 50 #include <sys/taskqueue.h> 51 #if defined(__DragonFly__) 52 #include <sys/device.h> 53 #endif 54 55 #if defined(__DragonFly__) 56 /* empty */ 57 #else 58 #include <machine/bus.h> 59 #include <machine/resource.h> 60 #include <machine/clock.h> 61 #endif 62 63 #if defined(__DragonFly__) 64 #include <bus/pci/pcireg.h> 65 #include <bus/pci/pcivar.h> 66 #else 67 #include <dev/pci/pcireg.h> 68 #include <dev/pci/pcivar.h> 69 #endif 70 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 76 #include <netinet/in.h> 77 #include <netinet/if_ether.h> 78 79 #include <netproto/802_11/ieee80211_var.h> 80 #include <netproto/802_11/ieee80211_radiotap.h> 81 #include <netproto/802_11/ieee80211_regdomain.h> 82 #include <netproto/802_11/ieee80211_ratectl.h> 83 84 #include <dev/netif/iwn/if_iwnreg.h> 85 #include <dev/netif/iwn/if_iwnvar.h> 86 #include <dev/netif/iwn/if_iwn_devid.h> 87 #include <dev/netif/iwn/if_iwn_chip_cfg.h> 88 #include <dev/netif/iwn/if_iwn_debug.h> 89 #include <dev/netif/iwn/if_iwn_ioctl.h> 90 91 struct iwn_ident { 92 uint16_t vendor; 93 uint16_t device; 94 const char *name; 95 }; 96 97 static const struct iwn_ident iwn_ident_table[] = { 98 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, 99 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, 100 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, 101 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, 102 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, 103 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, 104 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, 105 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, 106 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, 107 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, 108 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, 109 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, 110 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 111 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 112 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ 113 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, 114 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, 115 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, 116 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, 117 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, 118 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, 119 { 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105" }, 120 { 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105" }, 121 { 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135" }, 122 { 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135" }, 123 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, 124 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, 125 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, 126 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, 127 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, 128 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, 129 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, 130 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, 131 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, 132 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, 133 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, 134 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, 135 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, 136 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, 137 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, 138 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, 139 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, 140 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, 141 { 0, 0, NULL } 142 }; 143 144 static int iwn_probe(device_t); 145 static int iwn_attach(device_t); 146 static int iwn4965_attach(struct iwn_softc *, uint16_t); 147 static int iwn5000_attach(struct iwn_softc *, uint16_t); 148 static int iwn_config_specific(struct iwn_softc *, uint16_t); 149 static void iwn_radiotap_attach(struct iwn_softc *); 150 static void iwn_sysctlattach(struct iwn_softc *); 151 static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 152 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 153 const uint8_t [IEEE80211_ADDR_LEN], 154 const uint8_t [IEEE80211_ADDR_LEN]); 155 static void iwn_vap_delete(struct ieee80211vap *); 156 static int iwn_detach(device_t); 157 static int iwn_shutdown(device_t); 158 static int iwn_suspend(device_t); 159 static int iwn_resume(device_t); 160 static int iwn_nic_lock(struct iwn_softc *); 161 static int iwn_eeprom_lock(struct iwn_softc *); 162 static int iwn_init_otprom(struct iwn_softc *); 163 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 164 static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 165 static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 166 void **, bus_size_t, bus_size_t); 167 static void iwn_dma_contig_free(struct iwn_dma_info *); 168 static int iwn_alloc_sched(struct iwn_softc *); 169 static void iwn_free_sched(struct iwn_softc *); 170 static int iwn_alloc_kw(struct iwn_softc *); 171 static void iwn_free_kw(struct iwn_softc *); 172 static int iwn_alloc_ict(struct iwn_softc *); 173 static void iwn_free_ict(struct iwn_softc *); 174 static int iwn_alloc_fwmem(struct iwn_softc *); 175 static void iwn_free_fwmem(struct iwn_softc *); 176 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 177 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 178 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 179 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 180 int); 181 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 182 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 183 static void iwn5000_ict_reset(struct iwn_softc *); 184 static int iwn_read_eeprom(struct iwn_softc *, 185 uint8_t macaddr[IEEE80211_ADDR_LEN]); 186 static void iwn4965_read_eeprom(struct iwn_softc *); 187 #ifdef IWN_DEBUG 188 static void iwn4965_print_power_group(struct iwn_softc *, int); 189 #endif 190 static void iwn5000_read_eeprom(struct iwn_softc *); 191 static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 192 static void iwn_read_eeprom_band(struct iwn_softc *, int, int, int *, 193 struct ieee80211_channel[]); 194 static void iwn_read_eeprom_ht40(struct iwn_softc *, int, int, int *, 195 struct ieee80211_channel[]); 196 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 197 static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 198 struct ieee80211_channel *); 199 static void iwn_getradiocaps(struct ieee80211com *, int, int *, 200 struct ieee80211_channel[]); 201 static int iwn_setregdomain(struct ieee80211com *, 202 struct ieee80211_regdomain *, int, 203 struct ieee80211_channel[]); 204 static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 205 static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 206 const uint8_t mac[IEEE80211_ADDR_LEN]); 207 static void iwn_newassoc(struct ieee80211_node *, int); 208 static int iwn_media_change(struct ifnet *); 209 static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 210 static void iwn_calib_timeout(void *); 211 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 212 struct iwn_rx_data *); 213 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 214 struct iwn_rx_data *); 215 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 216 struct iwn_rx_data *); 217 static void iwn5000_rx_calib_results(struct iwn_softc *, 218 struct iwn_rx_desc *, struct iwn_rx_data *); 219 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 220 struct iwn_rx_data *); 221 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 222 struct iwn_rx_data *); 223 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 224 struct iwn_rx_data *); 225 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 226 uint8_t); 227 static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, int, void *); 228 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 229 static void iwn_notif_intr(struct iwn_softc *); 230 static void iwn_wakeup_intr(struct iwn_softc *); 231 static void iwn_rftoggle_intr(struct iwn_softc *); 232 static void iwn_fatal_intr(struct iwn_softc *); 233 static void iwn_intr(void *); 234 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 235 uint16_t); 236 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 237 uint16_t); 238 #ifdef notyet 239 static void iwn5000_reset_sched(struct iwn_softc *, int, int); 240 #endif 241 static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 242 struct ieee80211_node *); 243 static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 244 struct ieee80211_node *, 245 const struct ieee80211_bpf_params *params); 246 static void iwn_xmit_task(void *arg0, int pending); 247 static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 248 const struct ieee80211_bpf_params *); 249 static int iwn_transmit(struct ieee80211com *, struct mbuf *); 250 static void iwn_watchdog(void *); 251 static int iwn_ioctl(struct ieee80211com *, u_long , void *); 252 static void iwn_parent(struct ieee80211com *); 253 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 254 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 255 int); 256 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 257 int); 258 static int iwn_set_link_quality(struct iwn_softc *, 259 struct ieee80211_node *); 260 static int iwn_add_broadcast_node(struct iwn_softc *, int); 261 static int iwn_updateedca(struct ieee80211com *); 262 static void iwn_update_mcast(struct ieee80211com *); 263 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 264 static int iwn_set_critical_temp(struct iwn_softc *); 265 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 266 static void iwn4965_power_calibration(struct iwn_softc *, int); 267 static int iwn4965_set_txpower(struct iwn_softc *, 268 struct ieee80211_channel *, int); 269 static int iwn5000_set_txpower(struct iwn_softc *, 270 struct ieee80211_channel *, int); 271 static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 272 static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 273 static int iwn_get_noise(const struct iwn_rx_general_stats *); 274 static int iwn4965_get_temperature(struct iwn_softc *); 275 static int iwn5000_get_temperature(struct iwn_softc *); 276 static int iwn_init_sensitivity(struct iwn_softc *); 277 static void iwn_collect_noise(struct iwn_softc *, 278 const struct iwn_rx_general_stats *); 279 static int iwn4965_init_gains(struct iwn_softc *); 280 static int iwn5000_init_gains(struct iwn_softc *); 281 static int iwn4965_set_gains(struct iwn_softc *); 282 static int iwn5000_set_gains(struct iwn_softc *); 283 static void iwn_tune_sensitivity(struct iwn_softc *, 284 const struct iwn_rx_stats *); 285 static void iwn_save_stats_counters(struct iwn_softc *, 286 const struct iwn_stats *); 287 static int iwn_send_sensitivity(struct iwn_softc *); 288 static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *); 289 static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 290 static int iwn_send_btcoex(struct iwn_softc *); 291 static int iwn_send_advanced_btcoex(struct iwn_softc *); 292 static int iwn5000_runtime_calib(struct iwn_softc *); 293 static int iwn_config(struct iwn_softc *); 294 static int iwn_scan(struct iwn_softc *, struct ieee80211vap *, 295 struct ieee80211_scan_state *, struct ieee80211_channel *); 296 static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 297 static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 298 static int iwn_ampdu_rx_start(struct ieee80211_node *, 299 struct ieee80211_rx_ampdu *, int, int, int); 300 static void iwn_ampdu_rx_stop(struct ieee80211_node *, 301 struct ieee80211_rx_ampdu *); 302 static int iwn_addba_request(struct ieee80211_node *, 303 struct ieee80211_tx_ampdu *, int, int, int); 304 static int iwn_addba_response(struct ieee80211_node *, 305 struct ieee80211_tx_ampdu *, int, int, int); 306 static int iwn_ampdu_tx_start(struct ieee80211com *, 307 struct ieee80211_node *, uint8_t); 308 static void iwn_ampdu_tx_stop(struct ieee80211_node *, 309 struct ieee80211_tx_ampdu *); 310 static void iwn4965_ampdu_tx_start(struct iwn_softc *, 311 struct ieee80211_node *, int, uint8_t, uint16_t); 312 static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 313 uint8_t, uint16_t); 314 static void iwn5000_ampdu_tx_start(struct iwn_softc *, 315 struct ieee80211_node *, int, uint8_t, uint16_t); 316 static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 317 uint8_t, uint16_t); 318 static int iwn5000_query_calibration(struct iwn_softc *); 319 static int iwn5000_send_calibration(struct iwn_softc *); 320 static int iwn5000_send_wimax_coex(struct iwn_softc *); 321 static int iwn5000_crystal_calib(struct iwn_softc *); 322 static int iwn5000_temp_offset_calib(struct iwn_softc *); 323 static int iwn5000_temp_offset_calibv2(struct iwn_softc *); 324 static int iwn4965_post_alive(struct iwn_softc *); 325 static int iwn5000_post_alive(struct iwn_softc *); 326 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 327 int); 328 static int iwn4965_load_firmware(struct iwn_softc *); 329 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 330 const uint8_t *, int); 331 static int iwn5000_load_firmware(struct iwn_softc *); 332 static int iwn_read_firmware_leg(struct iwn_softc *, 333 struct iwn_fw_info *); 334 static int iwn_read_firmware_tlv(struct iwn_softc *, 335 struct iwn_fw_info *, uint16_t); 336 static int iwn_read_firmware(struct iwn_softc *); 337 static void iwn_unload_firmware(struct iwn_softc *); 338 static int iwn_clock_wait(struct iwn_softc *); 339 static int iwn_apm_init(struct iwn_softc *); 340 static void iwn_apm_stop_master(struct iwn_softc *); 341 static void iwn_apm_stop(struct iwn_softc *); 342 static int iwn4965_nic_config(struct iwn_softc *); 343 static int iwn5000_nic_config(struct iwn_softc *); 344 static int iwn_hw_prepare(struct iwn_softc *); 345 static int iwn_hw_init(struct iwn_softc *); 346 static void iwn_hw_stop(struct iwn_softc *); 347 static void iwn_radio_on(void *, int); 348 static void iwn_radio_off(void *, int); 349 static void iwn_panicked(void *, int); 350 static void iwn_init_locked(struct iwn_softc *); 351 static void iwn_init(struct iwn_softc *); 352 static void iwn_stop_locked(struct iwn_softc *); 353 static void iwn_stop(struct iwn_softc *); 354 static void iwn_scan_start(struct ieee80211com *); 355 static void iwn_scan_end(struct ieee80211com *); 356 static void iwn_set_channel(struct ieee80211com *); 357 static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 358 static void iwn_scan_mindwell(struct ieee80211_scan_state *); 359 #ifdef IWN_DEBUG 360 static char *iwn_get_csr_string(int); 361 static void iwn_debug_register(struct iwn_softc *); 362 #endif 363 364 static device_method_t iwn_methods[] = { 365 /* Device interface */ 366 DEVMETHOD(device_probe, iwn_probe), 367 DEVMETHOD(device_attach, iwn_attach), 368 DEVMETHOD(device_detach, iwn_detach), 369 DEVMETHOD(device_shutdown, iwn_shutdown), 370 DEVMETHOD(device_suspend, iwn_suspend), 371 DEVMETHOD(device_resume, iwn_resume), 372 373 DEVMETHOD_END 374 }; 375 376 static driver_t iwn_driver = { 377 "iwn", 378 iwn_methods, 379 sizeof(struct iwn_softc) 380 }; 381 static devclass_t iwn_devclass; 382 383 DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); 384 385 MODULE_VERSION(iwn, 1); 386 387 MODULE_DEPEND(iwn, firmware, 1, 1, 1); 388 MODULE_DEPEND(iwn, pci, 1, 1, 1); 389 MODULE_DEPEND(iwn, wlan, 1, 1, 1); 390 391 static d_ioctl_t iwn_cdev_ioctl; 392 static d_open_t iwn_cdev_open; 393 static d_close_t iwn_cdev_close; 394 395 static struct dev_ops iwn_cdevsw = { 396 #if defined(__DragonFly__) 397 /* none */ 398 { "iwn", 0, 0 }, 399 #else 400 .d_version = D_VERSION, 401 .d_flags = 0, 402 #endif 403 .d_open = iwn_cdev_open, 404 .d_close = iwn_cdev_close, 405 .d_ioctl = iwn_cdev_ioctl, 406 #if defined(__DragonFly__) 407 /* none */ 408 #else 409 .d_name = "iwn", 410 #endif 411 }; 412 413 static int 414 iwn_probe(device_t dev) 415 { 416 const struct iwn_ident *ident; 417 418 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 419 if (pci_get_vendor(dev) == ident->vendor && 420 pci_get_device(dev) == ident->device) { 421 device_set_desc(dev, ident->name); 422 return (BUS_PROBE_DEFAULT); 423 } 424 } 425 return ENXIO; 426 } 427 428 static int 429 iwn_is_3stream_device(struct iwn_softc *sc) 430 { 431 /* XXX for now only 5300, until the 5350 can be tested */ 432 if (sc->hw_type == IWN_HW_REV_TYPE_5300) 433 return (1); 434 return (0); 435 } 436 437 static int 438 iwn_attach(device_t dev) 439 { 440 struct iwn_softc *sc = device_get_softc(dev); 441 struct ieee80211com *ic; 442 int i, error, rid; 443 #if defined(__DragonFly__) 444 int irq_flags; 445 #endif 446 447 sc->sc_dev = dev; 448 449 #ifdef IWN_DEBUG 450 error = resource_int_value(device_get_name(sc->sc_dev), 451 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 452 if (error != 0) 453 sc->sc_debug = 0; 454 #else 455 sc->sc_debug = 0; 456 #endif 457 458 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 459 460 /* 461 * Get the offset of the PCI Express Capability Structure in PCI 462 * Configuration Space. 463 */ 464 #if defined(__DragonFly__) 465 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 466 #else 467 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 468 #endif 469 if (error != 0) { 470 device_printf(dev, "PCIe capability structure not found!\n"); 471 return error; 472 } 473 474 /* Clear device-specific "PCI retry timeout" register (41h). */ 475 pci_write_config(dev, 0x41, 0, 1); 476 477 /* Enable bus-mastering. */ 478 pci_enable_busmaster(dev); 479 480 rid = PCIR_BAR(0); 481 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 482 RF_ACTIVE); 483 if (sc->mem == NULL) { 484 device_printf(dev, "can't map mem space\n"); 485 error = ENOMEM; 486 return error; 487 } 488 sc->sc_st = rman_get_bustag(sc->mem); 489 sc->sc_sh = rman_get_bushandle(sc->mem); 490 491 #if defined(__DragonFly__) 492 pci_alloc_1intr(dev, 1, &rid, &irq_flags); 493 /* Install interrupt handler. */ 494 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags); 495 #else 496 i = 1; 497 rid = 0; 498 if (pci_alloc_msi(dev, &i) == 0) 499 rid = 1; 500 /* Install interrupt handler. */ 501 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 502 (rid != 0 ? 0 : RF_SHAREABLE)); 503 #endif 504 if (sc->irq == NULL) { 505 device_printf(dev, "can't map interrupt\n"); 506 error = ENOMEM; 507 goto fail; 508 } 509 510 IWN_LOCK_INIT(sc); 511 512 /* Read hardware revision and attach. */ 513 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) 514 & IWN_HW_REV_TYPE_MASK; 515 sc->subdevice_id = pci_get_subdevice(dev); 516 517 /* 518 * 4965 versus 5000 and later have different methods. 519 * Let's set those up first. 520 */ 521 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 522 error = iwn4965_attach(sc, pci_get_device(dev)); 523 else 524 error = iwn5000_attach(sc, pci_get_device(dev)); 525 if (error != 0) { 526 device_printf(dev, "could not attach device, error %d\n", 527 error); 528 goto fail; 529 } 530 531 /* 532 * Next, let's setup the various parameters of each NIC. 533 */ 534 error = iwn_config_specific(sc, pci_get_device(dev)); 535 if (error != 0) { 536 device_printf(dev, "could not attach device, error %d\n", 537 error); 538 goto fail; 539 } 540 541 if ((error = iwn_hw_prepare(sc)) != 0) { 542 device_printf(dev, "hardware not ready, error %d\n", error); 543 goto fail; 544 } 545 546 /* Allocate DMA memory for firmware transfers. */ 547 if ((error = iwn_alloc_fwmem(sc)) != 0) { 548 device_printf(dev, 549 "could not allocate memory for firmware, error %d\n", 550 error); 551 goto fail; 552 } 553 554 /* Allocate "Keep Warm" page. */ 555 if ((error = iwn_alloc_kw(sc)) != 0) { 556 device_printf(dev, 557 "could not allocate keep warm page, error %d\n", error); 558 goto fail; 559 } 560 561 /* Allocate ICT table for 5000 Series. */ 562 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 563 (error = iwn_alloc_ict(sc)) != 0) { 564 device_printf(dev, "could not allocate ICT table, error %d\n", 565 error); 566 goto fail; 567 } 568 569 /* Allocate TX scheduler "rings". */ 570 if ((error = iwn_alloc_sched(sc)) != 0) { 571 device_printf(dev, 572 "could not allocate TX scheduler rings, error %d\n", error); 573 goto fail; 574 } 575 576 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 577 for (i = 0; i < sc->ntxqs; i++) { 578 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 579 device_printf(dev, 580 "could not allocate TX ring %d, error %d\n", i, 581 error); 582 goto fail; 583 } 584 } 585 586 /* Allocate RX ring. */ 587 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 588 device_printf(dev, "could not allocate RX ring, error %d\n", 589 error); 590 goto fail; 591 } 592 593 /* Clear pending interrupts. */ 594 IWN_WRITE(sc, IWN_INT, 0xffffffff); 595 596 ic = &sc->sc_ic; 597 ic->ic_softc = sc; 598 ic->ic_name = device_get_nameunit(dev); 599 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 600 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 601 602 /* Set device capabilities. */ 603 ic->ic_caps = 604 IEEE80211_C_STA /* station mode supported */ 605 | IEEE80211_C_MONITOR /* monitor mode supported */ 606 #if 0 607 | IEEE80211_C_BGSCAN /* background scanning */ 608 #endif 609 | IEEE80211_C_TXPMGT /* tx power management */ 610 | IEEE80211_C_SHSLOT /* short slot time supported */ 611 | IEEE80211_C_WPA 612 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 613 #if 0 614 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 615 #endif 616 | IEEE80211_C_WME /* WME */ 617 | IEEE80211_C_PMGT /* Station-side power mgmt */ 618 ; 619 620 /* Read MAC address, channels, etc from EEPROM. */ 621 if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) { 622 device_printf(dev, "could not read EEPROM, error %d\n", 623 error); 624 goto fail; 625 } 626 627 /* Count the number of available chains. */ 628 sc->ntxchains = 629 ((sc->txchainmask >> 2) & 1) + 630 ((sc->txchainmask >> 1) & 1) + 631 ((sc->txchainmask >> 0) & 1); 632 sc->nrxchains = 633 ((sc->rxchainmask >> 2) & 1) + 634 ((sc->rxchainmask >> 1) & 1) + 635 ((sc->rxchainmask >> 0) & 1); 636 if (bootverbose) { 637 #if defined(__DragonFly__) 638 char ethstr[ETHER_ADDRSTRLEN+1]; 639 device_printf(dev, "MIMO %dT%dR, %.4s, address %s\n", 640 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 641 kether_ntoa(ic->ic_macaddr, ethstr)); 642 #else 643 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", 644 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 645 ic->ic_macaddr, ":"); 646 #endif 647 } 648 649 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 650 ic->ic_rxstream = sc->nrxchains; 651 ic->ic_txstream = sc->ntxchains; 652 653 /* 654 * Some of the 3 antenna devices (ie, the 4965) only supports 655 * 2x2 operation. So correct the number of streams if 656 * it's not a 3-stream device. 657 */ 658 if (! iwn_is_3stream_device(sc)) { 659 if (ic->ic_rxstream > 2) 660 ic->ic_rxstream = 2; 661 if (ic->ic_txstream > 2) 662 ic->ic_txstream = 2; 663 } 664 665 ic->ic_htcaps = 666 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 667 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 668 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 669 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 670 #ifdef notyet 671 | IEEE80211_HTCAP_GREENFIELD 672 #if IWN_RBUF_SIZE == 8192 673 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 674 #else 675 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 676 #endif 677 #endif 678 /* s/w capabilities */ 679 | IEEE80211_HTC_HT /* HT operation */ 680 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 681 #ifdef notyet 682 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 683 #endif 684 ; 685 } 686 687 ieee80211_ifattach(ic); 688 ic->ic_vap_create = iwn_vap_create; 689 ic->ic_ioctl = iwn_ioctl; 690 ic->ic_parent = iwn_parent; 691 ic->ic_vap_delete = iwn_vap_delete; 692 ic->ic_transmit = iwn_transmit; 693 ic->ic_raw_xmit = iwn_raw_xmit; 694 ic->ic_node_alloc = iwn_node_alloc; 695 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 696 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 697 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 698 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 699 sc->sc_addba_request = ic->ic_addba_request; 700 ic->ic_addba_request = iwn_addba_request; 701 sc->sc_addba_response = ic->ic_addba_response; 702 ic->ic_addba_response = iwn_addba_response; 703 sc->sc_addba_stop = ic->ic_addba_stop; 704 ic->ic_addba_stop = iwn_ampdu_tx_stop; 705 ic->ic_newassoc = iwn_newassoc; 706 ic->ic_wme.wme_update = iwn_updateedca; 707 ic->ic_update_mcast = iwn_update_mcast; 708 ic->ic_scan_start = iwn_scan_start; 709 ic->ic_scan_end = iwn_scan_end; 710 ic->ic_set_channel = iwn_set_channel; 711 ic->ic_scan_curchan = iwn_scan_curchan; 712 ic->ic_scan_mindwell = iwn_scan_mindwell; 713 ic->ic_getradiocaps = iwn_getradiocaps; 714 ic->ic_setregdomain = iwn_setregdomain; 715 716 iwn_radiotap_attach(sc); 717 718 #if defined(__DragonFly__) 719 callout_init_lk(&sc->calib_to, &sc->sc_lk); 720 callout_init_lk(&sc->watchdog_to, &sc->sc_lk); 721 #else 722 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); 723 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 724 #endif 725 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc); 726 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc); 727 TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc); 728 TASK_INIT(&sc->sc_xmit_task, 0, iwn_xmit_task, sc); 729 730 mbufq_init(&sc->sc_xmit_queue, 1024); 731 732 sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK, 733 taskqueue_thread_enqueue, &sc->sc_tq); 734 #if defined(__DragonFly__) 735 error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, 736 -1, "iwn_taskq"); 737 #else 738 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq"); 739 #endif 740 if (error != 0) { 741 device_printf(dev, "can't start threads, error %d\n", error); 742 goto fail; 743 } 744 745 iwn_sysctlattach(sc); 746 747 /* 748 * Hook our interrupt after all initialization is complete. 749 */ 750 #if defined(__DragonFly__) 751 error = bus_setup_intr(dev, sc->irq, INTR_MPSAFE, 752 iwn_intr, sc, &sc->sc_ih, 753 &wlan_global_serializer); 754 #else 755 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 756 NULL, iwn_intr, sc, &sc->sc_ih); 757 #endif 758 if (error != 0) { 759 device_printf(dev, "can't establish interrupt, error %d\n", 760 error); 761 goto fail; 762 } 763 764 #if 0 765 device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n", 766 __func__, 767 sizeof(struct iwn_stats), 768 sizeof(struct iwn_stats_bt)); 769 #endif 770 771 if (bootverbose) 772 ieee80211_announce(ic); 773 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 774 775 /* Add debug ioctl right at the end */ 776 sc->sc_cdev = make_dev(&iwn_cdevsw, device_get_unit(dev), 777 UID_ROOT, GID_WHEEL, 0600, "%s", device_get_nameunit(dev)); 778 if (sc->sc_cdev == NULL) { 779 device_printf(dev, "failed to create debug character device\n"); 780 } else { 781 sc->sc_cdev->si_drv1 = sc; 782 } 783 return 0; 784 fail: 785 iwn_detach(dev); 786 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 787 return error; 788 } 789 790 /* 791 * Define specific configuration based on device id and subdevice id 792 * pid : PCI device id 793 */ 794 static int 795 iwn_config_specific(struct iwn_softc *sc, uint16_t pid) 796 { 797 798 switch (pid) { 799 /* 4965 series */ 800 case IWN_DID_4965_1: 801 case IWN_DID_4965_2: 802 case IWN_DID_4965_3: 803 case IWN_DID_4965_4: 804 sc->base_params = &iwn4965_base_params; 805 sc->limits = &iwn4965_sensitivity_limits; 806 sc->fwname = "iwn4965fw"; 807 /* Override chains masks, ROM is known to be broken. */ 808 sc->txchainmask = IWN_ANT_AB; 809 sc->rxchainmask = IWN_ANT_ABC; 810 /* Enable normal btcoex */ 811 sc->sc_flags |= IWN_FLAG_BTCOEX; 812 break; 813 /* 1000 Series */ 814 case IWN_DID_1000_1: 815 case IWN_DID_1000_2: 816 switch(sc->subdevice_id) { 817 case IWN_SDID_1000_1: 818 case IWN_SDID_1000_2: 819 case IWN_SDID_1000_3: 820 case IWN_SDID_1000_4: 821 case IWN_SDID_1000_5: 822 case IWN_SDID_1000_6: 823 case IWN_SDID_1000_7: 824 case IWN_SDID_1000_8: 825 case IWN_SDID_1000_9: 826 case IWN_SDID_1000_10: 827 case IWN_SDID_1000_11: 828 case IWN_SDID_1000_12: 829 sc->limits = &iwn1000_sensitivity_limits; 830 sc->base_params = &iwn1000_base_params; 831 sc->fwname = "iwn1000fw"; 832 break; 833 default: 834 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 835 "0x%04x rev %d not supported (subdevice)\n", pid, 836 sc->subdevice_id,sc->hw_type); 837 return ENOTSUP; 838 } 839 break; 840 /* 6x00 Series */ 841 case IWN_DID_6x00_2: 842 case IWN_DID_6x00_4: 843 case IWN_DID_6x00_1: 844 case IWN_DID_6x00_3: 845 sc->fwname = "iwn6000fw"; 846 sc->limits = &iwn6000_sensitivity_limits; 847 switch(sc->subdevice_id) { 848 case IWN_SDID_6x00_1: 849 case IWN_SDID_6x00_2: 850 case IWN_SDID_6x00_8: 851 //iwl6000_3agn_cfg 852 sc->base_params = &iwn_6000_base_params; 853 break; 854 case IWN_SDID_6x00_3: 855 case IWN_SDID_6x00_6: 856 case IWN_SDID_6x00_9: 857 ////iwl6000i_2agn 858 case IWN_SDID_6x00_4: 859 case IWN_SDID_6x00_7: 860 case IWN_SDID_6x00_10: 861 //iwl6000i_2abg_cfg 862 case IWN_SDID_6x00_5: 863 //iwl6000i_2bg_cfg 864 sc->base_params = &iwn_6000i_base_params; 865 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 866 sc->txchainmask = IWN_ANT_BC; 867 sc->rxchainmask = IWN_ANT_BC; 868 break; 869 default: 870 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 871 "0x%04x rev %d not supported (subdevice)\n", pid, 872 sc->subdevice_id,sc->hw_type); 873 return ENOTSUP; 874 } 875 break; 876 /* 6x05 Series */ 877 case IWN_DID_6x05_1: 878 case IWN_DID_6x05_2: 879 switch(sc->subdevice_id) { 880 case IWN_SDID_6x05_1: 881 case IWN_SDID_6x05_4: 882 case IWN_SDID_6x05_6: 883 //iwl6005_2agn_cfg 884 case IWN_SDID_6x05_2: 885 case IWN_SDID_6x05_5: 886 case IWN_SDID_6x05_7: 887 //iwl6005_2abg_cfg 888 case IWN_SDID_6x05_3: 889 //iwl6005_2bg_cfg 890 case IWN_SDID_6x05_8: 891 case IWN_SDID_6x05_9: 892 //iwl6005_2agn_sff_cfg 893 case IWN_SDID_6x05_10: 894 //iwl6005_2agn_d_cfg 895 case IWN_SDID_6x05_11: 896 //iwl6005_2agn_mow1_cfg 897 case IWN_SDID_6x05_12: 898 //iwl6005_2agn_mow2_cfg 899 sc->fwname = "iwn6000g2afw"; 900 sc->limits = &iwn6000_sensitivity_limits; 901 sc->base_params = &iwn_6000g2_base_params; 902 break; 903 default: 904 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 905 "0x%04x rev %d not supported (subdevice)\n", pid, 906 sc->subdevice_id,sc->hw_type); 907 return ENOTSUP; 908 } 909 break; 910 /* 6x35 Series */ 911 case IWN_DID_6035_1: 912 case IWN_DID_6035_2: 913 switch(sc->subdevice_id) { 914 case IWN_SDID_6035_1: 915 case IWN_SDID_6035_2: 916 case IWN_SDID_6035_3: 917 case IWN_SDID_6035_4: 918 case IWN_SDID_6035_5: 919 sc->fwname = "iwn6000g2bfw"; 920 sc->limits = &iwn6235_sensitivity_limits; 921 sc->base_params = &iwn_6235_base_params; 922 break; 923 default: 924 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 925 "0x%04x rev %d not supported (subdevice)\n", pid, 926 sc->subdevice_id,sc->hw_type); 927 return ENOTSUP; 928 } 929 break; 930 /* 6x50 WiFi/WiMax Series */ 931 case IWN_DID_6050_1: 932 case IWN_DID_6050_2: 933 switch(sc->subdevice_id) { 934 case IWN_SDID_6050_1: 935 case IWN_SDID_6050_3: 936 case IWN_SDID_6050_5: 937 //iwl6050_2agn_cfg 938 case IWN_SDID_6050_2: 939 case IWN_SDID_6050_4: 940 case IWN_SDID_6050_6: 941 //iwl6050_2abg_cfg 942 sc->fwname = "iwn6050fw"; 943 sc->txchainmask = IWN_ANT_AB; 944 sc->rxchainmask = IWN_ANT_AB; 945 sc->limits = &iwn6000_sensitivity_limits; 946 sc->base_params = &iwn_6050_base_params; 947 break; 948 default: 949 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 950 "0x%04x rev %d not supported (subdevice)\n", pid, 951 sc->subdevice_id,sc->hw_type); 952 return ENOTSUP; 953 } 954 break; 955 /* 6150 WiFi/WiMax Series */ 956 case IWN_DID_6150_1: 957 case IWN_DID_6150_2: 958 switch(sc->subdevice_id) { 959 case IWN_SDID_6150_1: 960 case IWN_SDID_6150_3: 961 case IWN_SDID_6150_5: 962 // iwl6150_bgn_cfg 963 case IWN_SDID_6150_2: 964 case IWN_SDID_6150_4: 965 case IWN_SDID_6150_6: 966 //iwl6150_bg_cfg 967 sc->fwname = "iwn6050fw"; 968 sc->limits = &iwn6000_sensitivity_limits; 969 sc->base_params = &iwn_6150_base_params; 970 break; 971 default: 972 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 973 "0x%04x rev %d not supported (subdevice)\n", pid, 974 sc->subdevice_id,sc->hw_type); 975 return ENOTSUP; 976 } 977 break; 978 /* 6030 Series and 1030 Series */ 979 case IWN_DID_x030_1: 980 case IWN_DID_x030_2: 981 case IWN_DID_x030_3: 982 case IWN_DID_x030_4: 983 switch(sc->subdevice_id) { 984 case IWN_SDID_x030_1: 985 case IWN_SDID_x030_3: 986 case IWN_SDID_x030_5: 987 // iwl1030_bgn_cfg 988 case IWN_SDID_x030_2: 989 case IWN_SDID_x030_4: 990 case IWN_SDID_x030_6: 991 //iwl1030_bg_cfg 992 case IWN_SDID_x030_7: 993 case IWN_SDID_x030_10: 994 case IWN_SDID_x030_14: 995 //iwl6030_2agn_cfg 996 case IWN_SDID_x030_8: 997 case IWN_SDID_x030_11: 998 case IWN_SDID_x030_15: 999 // iwl6030_2bgn_cfg 1000 case IWN_SDID_x030_9: 1001 case IWN_SDID_x030_12: 1002 case IWN_SDID_x030_16: 1003 // iwl6030_2abg_cfg 1004 case IWN_SDID_x030_13: 1005 //iwl6030_2bg_cfg 1006 sc->fwname = "iwn6000g2bfw"; 1007 sc->limits = &iwn6000_sensitivity_limits; 1008 sc->base_params = &iwn_6000g2b_base_params; 1009 break; 1010 default: 1011 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1012 "0x%04x rev %d not supported (subdevice)\n", pid, 1013 sc->subdevice_id,sc->hw_type); 1014 return ENOTSUP; 1015 } 1016 break; 1017 /* 130 Series WiFi */ 1018 /* XXX: This series will need adjustment for rate. 1019 * see rx_with_siso_diversity in linux kernel 1020 */ 1021 case IWN_DID_130_1: 1022 case IWN_DID_130_2: 1023 switch(sc->subdevice_id) { 1024 case IWN_SDID_130_1: 1025 case IWN_SDID_130_3: 1026 case IWN_SDID_130_5: 1027 //iwl130_bgn_cfg 1028 case IWN_SDID_130_2: 1029 case IWN_SDID_130_4: 1030 case IWN_SDID_130_6: 1031 //iwl130_bg_cfg 1032 sc->fwname = "iwn6000g2bfw"; 1033 sc->limits = &iwn6000_sensitivity_limits; 1034 sc->base_params = &iwn_6000g2b_base_params; 1035 break; 1036 default: 1037 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1038 "0x%04x rev %d not supported (subdevice)\n", pid, 1039 sc->subdevice_id,sc->hw_type); 1040 return ENOTSUP; 1041 } 1042 break; 1043 /* 100 Series WiFi */ 1044 case IWN_DID_100_1: 1045 case IWN_DID_100_2: 1046 switch(sc->subdevice_id) { 1047 case IWN_SDID_100_1: 1048 case IWN_SDID_100_2: 1049 case IWN_SDID_100_3: 1050 case IWN_SDID_100_4: 1051 case IWN_SDID_100_5: 1052 case IWN_SDID_100_6: 1053 sc->limits = &iwn1000_sensitivity_limits; 1054 sc->base_params = &iwn1000_base_params; 1055 sc->fwname = "iwn100fw"; 1056 break; 1057 default: 1058 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1059 "0x%04x rev %d not supported (subdevice)\n", pid, 1060 sc->subdevice_id,sc->hw_type); 1061 return ENOTSUP; 1062 } 1063 break; 1064 1065 /* 105 Series */ 1066 /* XXX: This series will need adjustment for rate. 1067 * see rx_with_siso_diversity in linux kernel 1068 */ 1069 case IWN_DID_105_1: 1070 case IWN_DID_105_2: 1071 switch(sc->subdevice_id) { 1072 case IWN_SDID_105_1: 1073 case IWN_SDID_105_2: 1074 case IWN_SDID_105_3: 1075 //iwl105_bgn_cfg 1076 case IWN_SDID_105_4: 1077 //iwl105_bgn_d_cfg 1078 sc->limits = &iwn2030_sensitivity_limits; 1079 sc->base_params = &iwn2000_base_params; 1080 sc->fwname = "iwn105fw"; 1081 break; 1082 default: 1083 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1084 "0x%04x rev %d not supported (subdevice)\n", pid, 1085 sc->subdevice_id,sc->hw_type); 1086 return ENOTSUP; 1087 } 1088 break; 1089 1090 /* 135 Series */ 1091 /* XXX: This series will need adjustment for rate. 1092 * see rx_with_siso_diversity in linux kernel 1093 */ 1094 case IWN_DID_135_1: 1095 case IWN_DID_135_2: 1096 switch(sc->subdevice_id) { 1097 case IWN_SDID_135_1: 1098 case IWN_SDID_135_2: 1099 case IWN_SDID_135_3: 1100 sc->limits = &iwn2030_sensitivity_limits; 1101 sc->base_params = &iwn2030_base_params; 1102 sc->fwname = "iwn135fw"; 1103 break; 1104 default: 1105 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1106 "0x%04x rev %d not supported (subdevice)\n", pid, 1107 sc->subdevice_id,sc->hw_type); 1108 return ENOTSUP; 1109 } 1110 break; 1111 1112 /* 2x00 Series */ 1113 case IWN_DID_2x00_1: 1114 case IWN_DID_2x00_2: 1115 switch(sc->subdevice_id) { 1116 case IWN_SDID_2x00_1: 1117 case IWN_SDID_2x00_2: 1118 case IWN_SDID_2x00_3: 1119 //iwl2000_2bgn_cfg 1120 case IWN_SDID_2x00_4: 1121 //iwl2000_2bgn_d_cfg 1122 sc->limits = &iwn2030_sensitivity_limits; 1123 sc->base_params = &iwn2000_base_params; 1124 sc->fwname = "iwn2000fw"; 1125 break; 1126 default: 1127 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1128 "0x%04x rev %d not supported (subdevice) \n", 1129 pid, sc->subdevice_id, sc->hw_type); 1130 return ENOTSUP; 1131 } 1132 break; 1133 /* 2x30 Series */ 1134 case IWN_DID_2x30_1: 1135 case IWN_DID_2x30_2: 1136 switch(sc->subdevice_id) { 1137 case IWN_SDID_2x30_1: 1138 case IWN_SDID_2x30_3: 1139 case IWN_SDID_2x30_5: 1140 //iwl100_bgn_cfg 1141 case IWN_SDID_2x30_2: 1142 case IWN_SDID_2x30_4: 1143 case IWN_SDID_2x30_6: 1144 //iwl100_bg_cfg 1145 sc->limits = &iwn2030_sensitivity_limits; 1146 sc->base_params = &iwn2030_base_params; 1147 sc->fwname = "iwn2030fw"; 1148 break; 1149 default: 1150 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1151 "0x%04x rev %d not supported (subdevice)\n", pid, 1152 sc->subdevice_id,sc->hw_type); 1153 return ENOTSUP; 1154 } 1155 break; 1156 /* 5x00 Series */ 1157 case IWN_DID_5x00_1: 1158 case IWN_DID_5x00_2: 1159 case IWN_DID_5x00_3: 1160 case IWN_DID_5x00_4: 1161 sc->limits = &iwn5000_sensitivity_limits; 1162 sc->base_params = &iwn5000_base_params; 1163 sc->fwname = "iwn5000fw"; 1164 switch(sc->subdevice_id) { 1165 case IWN_SDID_5x00_1: 1166 case IWN_SDID_5x00_2: 1167 case IWN_SDID_5x00_3: 1168 case IWN_SDID_5x00_4: 1169 case IWN_SDID_5x00_9: 1170 case IWN_SDID_5x00_10: 1171 case IWN_SDID_5x00_11: 1172 case IWN_SDID_5x00_12: 1173 case IWN_SDID_5x00_17: 1174 case IWN_SDID_5x00_18: 1175 case IWN_SDID_5x00_19: 1176 case IWN_SDID_5x00_20: 1177 //iwl5100_agn_cfg 1178 sc->txchainmask = IWN_ANT_B; 1179 sc->rxchainmask = IWN_ANT_AB; 1180 break; 1181 case IWN_SDID_5x00_5: 1182 case IWN_SDID_5x00_6: 1183 case IWN_SDID_5x00_13: 1184 case IWN_SDID_5x00_14: 1185 case IWN_SDID_5x00_21: 1186 case IWN_SDID_5x00_22: 1187 //iwl5100_bgn_cfg 1188 sc->txchainmask = IWN_ANT_B; 1189 sc->rxchainmask = IWN_ANT_AB; 1190 break; 1191 case IWN_SDID_5x00_7: 1192 case IWN_SDID_5x00_8: 1193 case IWN_SDID_5x00_15: 1194 case IWN_SDID_5x00_16: 1195 case IWN_SDID_5x00_23: 1196 case IWN_SDID_5x00_24: 1197 //iwl5100_abg_cfg 1198 sc->txchainmask = IWN_ANT_B; 1199 sc->rxchainmask = IWN_ANT_AB; 1200 break; 1201 case IWN_SDID_5x00_25: 1202 case IWN_SDID_5x00_26: 1203 case IWN_SDID_5x00_27: 1204 case IWN_SDID_5x00_28: 1205 case IWN_SDID_5x00_29: 1206 case IWN_SDID_5x00_30: 1207 case IWN_SDID_5x00_31: 1208 case IWN_SDID_5x00_32: 1209 case IWN_SDID_5x00_33: 1210 case IWN_SDID_5x00_34: 1211 case IWN_SDID_5x00_35: 1212 case IWN_SDID_5x00_36: 1213 //iwl5300_agn_cfg 1214 sc->txchainmask = IWN_ANT_ABC; 1215 sc->rxchainmask = IWN_ANT_ABC; 1216 break; 1217 default: 1218 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1219 "0x%04x rev %d not supported (subdevice)\n", pid, 1220 sc->subdevice_id,sc->hw_type); 1221 return ENOTSUP; 1222 } 1223 break; 1224 /* 5x50 Series */ 1225 case IWN_DID_5x50_1: 1226 case IWN_DID_5x50_2: 1227 case IWN_DID_5x50_3: 1228 case IWN_DID_5x50_4: 1229 sc->limits = &iwn5000_sensitivity_limits; 1230 sc->base_params = &iwn5000_base_params; 1231 sc->fwname = "iwn5000fw"; 1232 switch(sc->subdevice_id) { 1233 case IWN_SDID_5x50_1: 1234 case IWN_SDID_5x50_2: 1235 case IWN_SDID_5x50_3: 1236 //iwl5350_agn_cfg 1237 sc->limits = &iwn5000_sensitivity_limits; 1238 sc->base_params = &iwn5000_base_params; 1239 sc->fwname = "iwn5000fw"; 1240 break; 1241 case IWN_SDID_5x50_4: 1242 case IWN_SDID_5x50_5: 1243 case IWN_SDID_5x50_8: 1244 case IWN_SDID_5x50_9: 1245 case IWN_SDID_5x50_10: 1246 case IWN_SDID_5x50_11: 1247 //iwl5150_agn_cfg 1248 case IWN_SDID_5x50_6: 1249 case IWN_SDID_5x50_7: 1250 case IWN_SDID_5x50_12: 1251 case IWN_SDID_5x50_13: 1252 //iwl5150_abg_cfg 1253 sc->limits = &iwn5000_sensitivity_limits; 1254 sc->fwname = "iwn5150fw"; 1255 sc->base_params = &iwn_5x50_base_params; 1256 break; 1257 default: 1258 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1259 "0x%04x rev %d not supported (subdevice)\n", pid, 1260 sc->subdevice_id,sc->hw_type); 1261 return ENOTSUP; 1262 } 1263 break; 1264 default: 1265 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" 1266 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, 1267 sc->hw_type); 1268 return ENOTSUP; 1269 } 1270 return 0; 1271 } 1272 1273 static int 1274 iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 1275 { 1276 struct iwn_ops *ops = &sc->ops; 1277 1278 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1279 ops->load_firmware = iwn4965_load_firmware; 1280 ops->read_eeprom = iwn4965_read_eeprom; 1281 ops->post_alive = iwn4965_post_alive; 1282 ops->nic_config = iwn4965_nic_config; 1283 ops->update_sched = iwn4965_update_sched; 1284 ops->get_temperature = iwn4965_get_temperature; 1285 ops->get_rssi = iwn4965_get_rssi; 1286 ops->set_txpower = iwn4965_set_txpower; 1287 ops->init_gains = iwn4965_init_gains; 1288 ops->set_gains = iwn4965_set_gains; 1289 ops->add_node = iwn4965_add_node; 1290 ops->tx_done = iwn4965_tx_done; 1291 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 1292 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 1293 sc->ntxqs = IWN4965_NTXQUEUES; 1294 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 1295 sc->ndmachnls = IWN4965_NDMACHNLS; 1296 sc->broadcast_id = IWN4965_ID_BROADCAST; 1297 sc->rxonsz = IWN4965_RXONSZ; 1298 sc->schedsz = IWN4965_SCHEDSZ; 1299 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 1300 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 1301 sc->fwsz = IWN4965_FWSZ; 1302 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 1303 sc->limits = &iwn4965_sensitivity_limits; 1304 sc->fwname = "iwn4965fw"; 1305 /* Override chains masks, ROM is known to be broken. */ 1306 sc->txchainmask = IWN_ANT_AB; 1307 sc->rxchainmask = IWN_ANT_ABC; 1308 /* Enable normal btcoex */ 1309 sc->sc_flags |= IWN_FLAG_BTCOEX; 1310 1311 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 1312 1313 return 0; 1314 } 1315 1316 static int 1317 iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 1318 { 1319 struct iwn_ops *ops = &sc->ops; 1320 1321 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1322 1323 ops->load_firmware = iwn5000_load_firmware; 1324 ops->read_eeprom = iwn5000_read_eeprom; 1325 ops->post_alive = iwn5000_post_alive; 1326 ops->nic_config = iwn5000_nic_config; 1327 ops->update_sched = iwn5000_update_sched; 1328 ops->get_temperature = iwn5000_get_temperature; 1329 ops->get_rssi = iwn5000_get_rssi; 1330 ops->set_txpower = iwn5000_set_txpower; 1331 ops->init_gains = iwn5000_init_gains; 1332 ops->set_gains = iwn5000_set_gains; 1333 ops->add_node = iwn5000_add_node; 1334 ops->tx_done = iwn5000_tx_done; 1335 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 1336 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 1337 sc->ntxqs = IWN5000_NTXQUEUES; 1338 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 1339 sc->ndmachnls = IWN5000_NDMACHNLS; 1340 sc->broadcast_id = IWN5000_ID_BROADCAST; 1341 sc->rxonsz = IWN5000_RXONSZ; 1342 sc->schedsz = IWN5000_SCHEDSZ; 1343 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 1344 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 1345 sc->fwsz = IWN5000_FWSZ; 1346 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 1347 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 1348 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 1349 1350 return 0; 1351 } 1352 1353 /* 1354 * Attach the interface to 802.11 radiotap. 1355 */ 1356 static void 1357 iwn_radiotap_attach(struct iwn_softc *sc) 1358 { 1359 1360 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1361 ieee80211_radiotap_attach(&sc->sc_ic, 1362 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 1363 IWN_TX_RADIOTAP_PRESENT, 1364 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 1365 IWN_RX_RADIOTAP_PRESENT); 1366 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1367 } 1368 1369 static void 1370 iwn_sysctlattach(struct iwn_softc *sc) 1371 { 1372 #ifdef IWN_DEBUG 1373 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 1374 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 1375 1376 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1377 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 1378 "control debugging printfs"); 1379 #endif 1380 } 1381 1382 static struct ieee80211vap * 1383 iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1384 enum ieee80211_opmode opmode, int flags, 1385 const uint8_t bssid[IEEE80211_ADDR_LEN], 1386 const uint8_t mac[IEEE80211_ADDR_LEN]) 1387 { 1388 struct iwn_softc *sc = ic->ic_softc; 1389 struct iwn_vap *ivp; 1390 struct ieee80211vap *vap; 1391 1392 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 1393 return NULL; 1394 1395 ivp = kmalloc(sizeof(struct iwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); 1396 vap = &ivp->iv_vap; 1397 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 1398 ivp->ctx = IWN_RXON_BSS_CTX; 1399 vap->iv_bmissthreshold = 10; /* override default */ 1400 /* Override with driver methods. */ 1401 ivp->iv_newstate = vap->iv_newstate; 1402 vap->iv_newstate = iwn_newstate; 1403 sc->ivap[IWN_RXON_BSS_CTX] = vap; 1404 1405 ieee80211_ratectl_init(vap); 1406 /* Complete setup. */ 1407 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status, 1408 mac); 1409 ic->ic_opmode = opmode; 1410 return vap; 1411 } 1412 1413 static void 1414 iwn_vap_delete(struct ieee80211vap *vap) 1415 { 1416 struct iwn_vap *ivp = IWN_VAP(vap); 1417 1418 ieee80211_ratectl_deinit(vap); 1419 ieee80211_vap_detach(vap); 1420 kfree(ivp, M_80211_VAP); 1421 } 1422 1423 static void 1424 iwn_xmit_queue_drain(struct iwn_softc *sc) 1425 { 1426 struct mbuf *m; 1427 struct ieee80211_node *ni; 1428 1429 IWN_LOCK_ASSERT(sc); 1430 while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) { 1431 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 1432 ieee80211_free_node(ni); 1433 m_freem(m); 1434 } 1435 } 1436 1437 static int 1438 iwn_xmit_queue_enqueue(struct iwn_softc *sc, struct mbuf *m) 1439 { 1440 1441 IWN_LOCK_ASSERT(sc); 1442 return (mbufq_enqueue(&sc->sc_xmit_queue, m)); 1443 } 1444 1445 static int 1446 iwn_detach(device_t dev) 1447 { 1448 struct iwn_softc *sc = device_get_softc(dev); 1449 int qid; 1450 1451 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1452 1453 if (sc->sc_ic.ic_softc != NULL) { 1454 /* Free the mbuf queue and node references */ 1455 IWN_LOCK(sc); 1456 iwn_xmit_queue_drain(sc); 1457 IWN_UNLOCK(sc); 1458 1459 ieee80211_draintask(&sc->sc_ic, &sc->sc_radioon_task); 1460 ieee80211_draintask(&sc->sc_ic, &sc->sc_radiooff_task); 1461 iwn_stop(sc); 1462 1463 #if defined(__DragonFly__) 1464 /* doesn't exist for DFly, DFly drains tasks on free */ 1465 #else 1466 taskqueue_drain_all(sc->sc_tq); 1467 #endif 1468 taskqueue_free(sc->sc_tq); 1469 1470 callout_drain(&sc->watchdog_to); 1471 callout_drain(&sc->calib_to); 1472 ieee80211_ifdetach(&sc->sc_ic); 1473 } 1474 1475 /* Uninstall interrupt handler. */ 1476 if (sc->irq != NULL) { 1477 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 1478 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 1479 sc->irq); 1480 pci_release_msi(dev); 1481 } 1482 1483 /* Free DMA resources. */ 1484 iwn_free_rx_ring(sc, &sc->rxq); 1485 for (qid = 0; qid < sc->ntxqs; qid++) 1486 iwn_free_tx_ring(sc, &sc->txq[qid]); 1487 iwn_free_sched(sc); 1488 iwn_free_kw(sc); 1489 if (sc->ict != NULL) 1490 iwn_free_ict(sc); 1491 iwn_free_fwmem(sc); 1492 1493 if (sc->mem != NULL) 1494 bus_release_resource(dev, SYS_RES_MEMORY, 1495 rman_get_rid(sc->mem), sc->mem); 1496 1497 if (sc->sc_cdev) { 1498 destroy_dev(sc->sc_cdev); 1499 sc->sc_cdev = NULL; 1500 } 1501 1502 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); 1503 IWN_LOCK_DESTROY(sc); 1504 return 0; 1505 } 1506 1507 static int 1508 iwn_shutdown(device_t dev) 1509 { 1510 struct iwn_softc *sc = device_get_softc(dev); 1511 1512 iwn_stop(sc); 1513 return 0; 1514 } 1515 1516 static int 1517 iwn_suspend(device_t dev) 1518 { 1519 struct iwn_softc *sc = device_get_softc(dev); 1520 1521 ieee80211_suspend_all(&sc->sc_ic); 1522 return 0; 1523 } 1524 1525 static int 1526 iwn_resume(device_t dev) 1527 { 1528 struct iwn_softc *sc = device_get_softc(dev); 1529 1530 /* Clear device-specific "PCI retry timeout" register (41h). */ 1531 pci_write_config(dev, 0x41, 0, 1); 1532 1533 ieee80211_resume_all(&sc->sc_ic); 1534 return 0; 1535 } 1536 1537 static int 1538 iwn_nic_lock(struct iwn_softc *sc) 1539 { 1540 int ntries; 1541 1542 /* Request exclusive access to NIC. */ 1543 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1544 1545 /* Spin until we actually get the lock. */ 1546 for (ntries = 0; ntries < 1000; ntries++) { 1547 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1548 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1549 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1550 return 0; 1551 DELAY(10); 1552 } 1553 return ETIMEDOUT; 1554 } 1555 1556 static __inline void 1557 iwn_nic_unlock(struct iwn_softc *sc) 1558 { 1559 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1560 } 1561 1562 static __inline uint32_t 1563 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1564 { 1565 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1566 IWN_BARRIER_READ_WRITE(sc); 1567 return IWN_READ(sc, IWN_PRPH_RDATA); 1568 } 1569 1570 static __inline void 1571 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1572 { 1573 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1574 IWN_BARRIER_WRITE(sc); 1575 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1576 } 1577 1578 static __inline void 1579 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1580 { 1581 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1582 } 1583 1584 static __inline void 1585 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1586 { 1587 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1588 } 1589 1590 static __inline void 1591 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1592 const uint32_t *data, int count) 1593 { 1594 for (; count > 0; count--, data++, addr += 4) 1595 iwn_prph_write(sc, addr, *data); 1596 } 1597 1598 static __inline uint32_t 1599 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1600 { 1601 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1602 IWN_BARRIER_READ_WRITE(sc); 1603 return IWN_READ(sc, IWN_MEM_RDATA); 1604 } 1605 1606 static __inline void 1607 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1608 { 1609 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1610 IWN_BARRIER_WRITE(sc); 1611 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1612 } 1613 1614 static __inline void 1615 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1616 { 1617 uint32_t tmp; 1618 1619 tmp = iwn_mem_read(sc, addr & ~3); 1620 if (addr & 3) 1621 tmp = (tmp & 0x0000ffff) | data << 16; 1622 else 1623 tmp = (tmp & 0xffff0000) | data; 1624 iwn_mem_write(sc, addr & ~3, tmp); 1625 } 1626 1627 static __inline void 1628 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1629 int count) 1630 { 1631 for (; count > 0; count--, addr += 4) 1632 *data++ = iwn_mem_read(sc, addr); 1633 } 1634 1635 static __inline void 1636 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1637 int count) 1638 { 1639 for (; count > 0; count--, addr += 4) 1640 iwn_mem_write(sc, addr, val); 1641 } 1642 1643 static int 1644 iwn_eeprom_lock(struct iwn_softc *sc) 1645 { 1646 int i, ntries; 1647 1648 for (i = 0; i < 100; i++) { 1649 /* Request exclusive access to EEPROM. */ 1650 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1651 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1652 1653 /* Spin until we actually get the lock. */ 1654 for (ntries = 0; ntries < 100; ntries++) { 1655 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1656 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1657 return 0; 1658 DELAY(10); 1659 } 1660 } 1661 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1662 return ETIMEDOUT; 1663 } 1664 1665 static __inline void 1666 iwn_eeprom_unlock(struct iwn_softc *sc) 1667 { 1668 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1669 } 1670 1671 /* 1672 * Initialize access by host to One Time Programmable ROM. 1673 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1674 */ 1675 static int 1676 iwn_init_otprom(struct iwn_softc *sc) 1677 { 1678 uint16_t prev, base, next; 1679 int count, error; 1680 1681 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1682 1683 /* Wait for clock stabilization before accessing prph. */ 1684 if ((error = iwn_clock_wait(sc)) != 0) 1685 return error; 1686 1687 if ((error = iwn_nic_lock(sc)) != 0) 1688 return error; 1689 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1690 DELAY(5); 1691 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1692 iwn_nic_unlock(sc); 1693 1694 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1695 if (sc->base_params->shadow_ram_support) { 1696 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1697 IWN_RESET_LINK_PWR_MGMT_DIS); 1698 } 1699 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1700 /* Clear ECC status. */ 1701 IWN_SETBITS(sc, IWN_OTP_GP, 1702 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1703 1704 /* 1705 * Find the block before last block (contains the EEPROM image) 1706 * for HW without OTP shadow RAM. 1707 */ 1708 if (! sc->base_params->shadow_ram_support) { 1709 /* Switch to absolute addressing mode. */ 1710 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1711 base = prev = 0; 1712 for (count = 0; count < sc->base_params->max_ll_items; 1713 count++) { 1714 error = iwn_read_prom_data(sc, base, &next, 2); 1715 if (error != 0) 1716 return error; 1717 if (next == 0) /* End of linked-list. */ 1718 break; 1719 prev = base; 1720 base = le16toh(next); 1721 } 1722 if (count == 0 || count == sc->base_params->max_ll_items) 1723 return EIO; 1724 /* Skip "next" word. */ 1725 sc->prom_base = prev + 1; 1726 } 1727 1728 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1729 1730 return 0; 1731 } 1732 1733 static int 1734 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1735 { 1736 uint8_t *out = data; 1737 uint32_t val, tmp; 1738 int ntries; 1739 1740 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1741 1742 addr += sc->prom_base; 1743 for (; count > 0; count -= 2, addr++) { 1744 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1745 for (ntries = 0; ntries < 10; ntries++) { 1746 val = IWN_READ(sc, IWN_EEPROM); 1747 if (val & IWN_EEPROM_READ_VALID) 1748 break; 1749 DELAY(5); 1750 } 1751 if (ntries == 10) { 1752 device_printf(sc->sc_dev, 1753 "timeout reading ROM at 0x%x\n", addr); 1754 return ETIMEDOUT; 1755 } 1756 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1757 /* OTPROM, check for ECC errors. */ 1758 tmp = IWN_READ(sc, IWN_OTP_GP); 1759 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1760 device_printf(sc->sc_dev, 1761 "OTPROM ECC error at 0x%x\n", addr); 1762 return EIO; 1763 } 1764 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1765 /* Correctable ECC error, clear bit. */ 1766 IWN_SETBITS(sc, IWN_OTP_GP, 1767 IWN_OTP_GP_ECC_CORR_STTS); 1768 } 1769 } 1770 *out++ = val >> 16; 1771 if (count > 1) 1772 *out++ = val >> 24; 1773 } 1774 1775 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1776 1777 return 0; 1778 } 1779 1780 static void 1781 iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1782 { 1783 if (error != 0) 1784 return; 1785 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1786 *(bus_addr_t *)arg = segs[0].ds_addr; 1787 } 1788 1789 static int 1790 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1791 void **kvap, bus_size_t size, bus_size_t alignment) 1792 { 1793 int error; 1794 1795 dma->tag = NULL; 1796 dma->size = size; 1797 1798 #if defined(__DragonFly__) 1799 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1800 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1801 1, size, 0, &dma->tag); 1802 #else 1803 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1804 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1805 1, size, 0, NULL, NULL, &dma->tag); 1806 #endif 1807 if (error != 0) 1808 goto fail; 1809 1810 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1811 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1812 if (error != 0) 1813 goto fail; 1814 1815 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1816 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1817 if (error != 0) 1818 goto fail; 1819 1820 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1821 1822 if (kvap != NULL) 1823 *kvap = dma->vaddr; 1824 1825 return 0; 1826 1827 fail: iwn_dma_contig_free(dma); 1828 return error; 1829 } 1830 1831 static void 1832 iwn_dma_contig_free(struct iwn_dma_info *dma) 1833 { 1834 if (dma->vaddr != NULL) { 1835 bus_dmamap_sync(dma->tag, dma->map, 1836 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1837 bus_dmamap_unload(dma->tag, dma->map); 1838 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1839 dma->vaddr = NULL; 1840 } 1841 if (dma->tag != NULL) { 1842 bus_dma_tag_destroy(dma->tag); 1843 dma->tag = NULL; 1844 } 1845 } 1846 1847 static int 1848 iwn_alloc_sched(struct iwn_softc *sc) 1849 { 1850 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1851 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1852 sc->schedsz, 1024); 1853 } 1854 1855 static void 1856 iwn_free_sched(struct iwn_softc *sc) 1857 { 1858 iwn_dma_contig_free(&sc->sched_dma); 1859 } 1860 1861 static int 1862 iwn_alloc_kw(struct iwn_softc *sc) 1863 { 1864 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1865 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1866 } 1867 1868 static void 1869 iwn_free_kw(struct iwn_softc *sc) 1870 { 1871 iwn_dma_contig_free(&sc->kw_dma); 1872 } 1873 1874 static int 1875 iwn_alloc_ict(struct iwn_softc *sc) 1876 { 1877 /* ICT table must be aligned on a 4KB boundary. */ 1878 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1879 IWN_ICT_SIZE, 4096); 1880 } 1881 1882 static void 1883 iwn_free_ict(struct iwn_softc *sc) 1884 { 1885 iwn_dma_contig_free(&sc->ict_dma); 1886 } 1887 1888 static int 1889 iwn_alloc_fwmem(struct iwn_softc *sc) 1890 { 1891 /* Must be aligned on a 16-byte boundary. */ 1892 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1893 } 1894 1895 static void 1896 iwn_free_fwmem(struct iwn_softc *sc) 1897 { 1898 iwn_dma_contig_free(&sc->fw_dma); 1899 } 1900 1901 static int 1902 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1903 { 1904 bus_size_t size; 1905 int i, error; 1906 1907 ring->cur = 0; 1908 1909 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1910 1911 /* Allocate RX descriptors (256-byte aligned). */ 1912 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1913 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1914 size, 256); 1915 if (error != 0) { 1916 device_printf(sc->sc_dev, 1917 "%s: could not allocate RX ring DMA memory, error %d\n", 1918 __func__, error); 1919 goto fail; 1920 } 1921 1922 /* Allocate RX status area (16-byte aligned). */ 1923 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1924 sizeof (struct iwn_rx_status), 16); 1925 if (error != 0) { 1926 device_printf(sc->sc_dev, 1927 "%s: could not allocate RX status DMA memory, error %d\n", 1928 __func__, error); 1929 goto fail; 1930 } 1931 1932 /* Create RX buffer DMA tag. */ 1933 #if defined(__DragonFly__) 1934 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1935 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1936 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, 0, &ring->data_dmat); 1937 #else 1938 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1939 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1940 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat); 1941 #endif 1942 if (error != 0) { 1943 device_printf(sc->sc_dev, 1944 "%s: could not create RX buf DMA tag, error %d\n", 1945 __func__, error); 1946 goto fail; 1947 } 1948 1949 /* 1950 * Allocate and map RX buffers. 1951 */ 1952 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1953 struct iwn_rx_data *data = &ring->data[i]; 1954 bus_addr_t paddr; 1955 1956 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1957 if (error != 0) { 1958 device_printf(sc->sc_dev, 1959 "%s: could not create RX buf DMA map, error %d\n", 1960 __func__, error); 1961 goto fail; 1962 } 1963 1964 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1965 IWN_RBUF_SIZE); 1966 if (data->m == NULL) { 1967 device_printf(sc->sc_dev, 1968 "%s: could not allocate RX mbuf\n", __func__); 1969 error = ENOBUFS; 1970 goto fail; 1971 } 1972 1973 error = bus_dmamap_load(ring->data_dmat, data->map, 1974 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1975 &paddr, BUS_DMA_NOWAIT); 1976 if (error != 0 && error != EFBIG) { 1977 device_printf(sc->sc_dev, 1978 "%s: can't map mbuf, error %d\n", __func__, 1979 error); 1980 goto fail; 1981 } 1982 1983 /* Set physical address of RX buffer (256-byte aligned). */ 1984 ring->desc[i] = htole32(paddr >> 8); 1985 } 1986 1987 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1988 BUS_DMASYNC_PREWRITE); 1989 1990 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1991 1992 return 0; 1993 1994 fail: iwn_free_rx_ring(sc, ring); 1995 1996 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1997 1998 return error; 1999 } 2000 2001 static void 2002 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 2003 { 2004 int ntries; 2005 2006 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 2007 2008 if (iwn_nic_lock(sc) == 0) { 2009 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 2010 for (ntries = 0; ntries < 1000; ntries++) { 2011 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 2012 IWN_FH_RX_STATUS_IDLE) 2013 break; 2014 DELAY(10); 2015 } 2016 iwn_nic_unlock(sc); 2017 } 2018 ring->cur = 0; 2019 sc->last_rx_valid = 0; 2020 } 2021 2022 static void 2023 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 2024 { 2025 int i; 2026 2027 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 2028 2029 iwn_dma_contig_free(&ring->desc_dma); 2030 iwn_dma_contig_free(&ring->stat_dma); 2031 2032 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 2033 struct iwn_rx_data *data = &ring->data[i]; 2034 2035 if (data->m != NULL) { 2036 bus_dmamap_sync(ring->data_dmat, data->map, 2037 BUS_DMASYNC_POSTREAD); 2038 bus_dmamap_unload(ring->data_dmat, data->map); 2039 m_freem(data->m); 2040 data->m = NULL; 2041 } 2042 if (data->map != NULL) 2043 bus_dmamap_destroy(ring->data_dmat, data->map); 2044 } 2045 if (ring->data_dmat != NULL) { 2046 bus_dma_tag_destroy(ring->data_dmat); 2047 ring->data_dmat = NULL; 2048 } 2049 } 2050 2051 static int 2052 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 2053 { 2054 bus_addr_t paddr; 2055 bus_size_t size; 2056 int i, error; 2057 2058 ring->qid = qid; 2059 ring->queued = 0; 2060 ring->cur = 0; 2061 2062 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2063 2064 /* Allocate TX descriptors (256-byte aligned). */ 2065 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 2066 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 2067 size, 256); 2068 if (error != 0) { 2069 device_printf(sc->sc_dev, 2070 "%s: could not allocate TX ring DMA memory, error %d\n", 2071 __func__, error); 2072 goto fail; 2073 } 2074 2075 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 2076 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 2077 size, 4); 2078 if (error != 0) { 2079 device_printf(sc->sc_dev, 2080 "%s: could not allocate TX cmd DMA memory, error %d\n", 2081 __func__, error); 2082 goto fail; 2083 } 2084 2085 #if defined(__DragonFly__) 2086 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 2087 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 2088 IWN_MAX_SCATTER - 1, MCLBYTES, 0, &ring->data_dmat); 2089 #else 2090 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 2091 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 2092 IWN_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); 2093 #endif 2094 if (error != 0) { 2095 device_printf(sc->sc_dev, 2096 "%s: could not create TX buf DMA tag, error %d\n", 2097 __func__, error); 2098 goto fail; 2099 } 2100 2101 paddr = ring->cmd_dma.paddr; 2102 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2103 struct iwn_tx_data *data = &ring->data[i]; 2104 2105 data->cmd_paddr = paddr; 2106 data->scratch_paddr = paddr + 12; 2107 paddr += sizeof (struct iwn_tx_cmd); 2108 2109 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2110 if (error != 0) { 2111 device_printf(sc->sc_dev, 2112 "%s: could not create TX buf DMA map, error %d\n", 2113 __func__, error); 2114 goto fail; 2115 } 2116 } 2117 2118 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2119 2120 return 0; 2121 2122 fail: iwn_free_tx_ring(sc, ring); 2123 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2124 return error; 2125 } 2126 2127 static void 2128 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2129 { 2130 int i; 2131 2132 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 2133 2134 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2135 struct iwn_tx_data *data = &ring->data[i]; 2136 2137 if (data->m != NULL) { 2138 bus_dmamap_sync(ring->data_dmat, data->map, 2139 BUS_DMASYNC_POSTWRITE); 2140 bus_dmamap_unload(ring->data_dmat, data->map); 2141 m_freem(data->m); 2142 data->m = NULL; 2143 } 2144 if (data->ni != NULL) { 2145 ieee80211_free_node(data->ni); 2146 data->ni = NULL; 2147 } 2148 } 2149 /* Clear TX descriptors. */ 2150 memset(ring->desc, 0, ring->desc_dma.size); 2151 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2152 BUS_DMASYNC_PREWRITE); 2153 sc->qfullmsk &= ~(1 << ring->qid); 2154 ring->queued = 0; 2155 ring->cur = 0; 2156 } 2157 2158 static void 2159 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2160 { 2161 int i; 2162 2163 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 2164 2165 iwn_dma_contig_free(&ring->desc_dma); 2166 iwn_dma_contig_free(&ring->cmd_dma); 2167 2168 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2169 struct iwn_tx_data *data = &ring->data[i]; 2170 2171 if (data->m != NULL) { 2172 bus_dmamap_sync(ring->data_dmat, data->map, 2173 BUS_DMASYNC_POSTWRITE); 2174 bus_dmamap_unload(ring->data_dmat, data->map); 2175 m_freem(data->m); 2176 } 2177 if (data->map != NULL) 2178 bus_dmamap_destroy(ring->data_dmat, data->map); 2179 } 2180 if (ring->data_dmat != NULL) { 2181 bus_dma_tag_destroy(ring->data_dmat); 2182 ring->data_dmat = NULL; 2183 } 2184 } 2185 2186 static void 2187 iwn5000_ict_reset(struct iwn_softc *sc) 2188 { 2189 /* Disable interrupts. */ 2190 IWN_WRITE(sc, IWN_INT_MASK, 0); 2191 2192 /* Reset ICT table. */ 2193 memset(sc->ict, 0, IWN_ICT_SIZE); 2194 sc->ict_cur = 0; 2195 2196 /* Set physical address of ICT table (4KB aligned). */ 2197 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 2198 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 2199 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 2200 2201 /* Enable periodic RX interrupt. */ 2202 sc->int_mask |= IWN_INT_RX_PERIODIC; 2203 /* Switch to ICT interrupt mode in driver. */ 2204 sc->sc_flags |= IWN_FLAG_USE_ICT; 2205 2206 /* Re-enable interrupts. */ 2207 IWN_WRITE(sc, IWN_INT, 0xffffffff); 2208 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2209 } 2210 2211 static int 2212 iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 2213 { 2214 struct iwn_ops *ops = &sc->ops; 2215 uint16_t val; 2216 int error; 2217 2218 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2219 2220 /* Check whether adapter has an EEPROM or an OTPROM. */ 2221 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 2222 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 2223 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 2224 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 2225 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 2226 2227 /* Adapter has to be powered on for EEPROM access to work. */ 2228 if ((error = iwn_apm_init(sc)) != 0) { 2229 device_printf(sc->sc_dev, 2230 "%s: could not power ON adapter, error %d\n", __func__, 2231 error); 2232 return error; 2233 } 2234 2235 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 2236 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 2237 return EIO; 2238 } 2239 if ((error = iwn_eeprom_lock(sc)) != 0) { 2240 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 2241 __func__, error); 2242 return error; 2243 } 2244 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 2245 if ((error = iwn_init_otprom(sc)) != 0) { 2246 device_printf(sc->sc_dev, 2247 "%s: could not initialize OTPROM, error %d\n", 2248 __func__, error); 2249 return error; 2250 } 2251 } 2252 2253 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 2254 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 2255 /* Check if HT support is bonded out. */ 2256 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 2257 sc->sc_flags |= IWN_FLAG_HAS_11N; 2258 2259 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 2260 sc->rfcfg = le16toh(val); 2261 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 2262 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 2263 if (sc->txchainmask == 0) 2264 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 2265 if (sc->rxchainmask == 0) 2266 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 2267 2268 /* Read MAC address. */ 2269 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 2270 2271 /* Read adapter-specific information from EEPROM. */ 2272 ops->read_eeprom(sc); 2273 2274 iwn_apm_stop(sc); /* Power OFF adapter. */ 2275 2276 iwn_eeprom_unlock(sc); 2277 2278 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2279 2280 return 0; 2281 } 2282 2283 static void 2284 iwn4965_read_eeprom(struct iwn_softc *sc) 2285 { 2286 uint32_t addr; 2287 uint16_t val; 2288 int i; 2289 2290 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2291 2292 /* Read regulatory domain (4 ASCII characters). */ 2293 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 2294 2295 /* Read the list of authorized channels (20MHz & 40MHz). */ 2296 for (i = 0; i < IWN_NBANDS - 1; i++) { 2297 addr = iwn4965_regulatory_bands[i]; 2298 iwn_read_eeprom_channels(sc, i, addr); 2299 } 2300 2301 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 2302 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 2303 sc->maxpwr2GHz = val & 0xff; 2304 sc->maxpwr5GHz = val >> 8; 2305 /* Check that EEPROM values are within valid range. */ 2306 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 2307 sc->maxpwr5GHz = 38; 2308 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 2309 sc->maxpwr2GHz = 38; 2310 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 2311 sc->maxpwr2GHz, sc->maxpwr5GHz); 2312 2313 /* Read samples for each TX power group. */ 2314 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 2315 sizeof sc->bands); 2316 2317 /* Read voltage at which samples were taken. */ 2318 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 2319 sc->eeprom_voltage = (int16_t)le16toh(val); 2320 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 2321 sc->eeprom_voltage); 2322 2323 #ifdef IWN_DEBUG 2324 /* Print samples. */ 2325 if (sc->sc_debug & IWN_DEBUG_ANY) { 2326 for (i = 0; i < IWN_NBANDS - 1; i++) 2327 iwn4965_print_power_group(sc, i); 2328 } 2329 #endif 2330 2331 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2332 } 2333 2334 #ifdef IWN_DEBUG 2335 static void 2336 iwn4965_print_power_group(struct iwn_softc *sc, int i) 2337 { 2338 struct iwn4965_eeprom_band *band = &sc->bands[i]; 2339 struct iwn4965_eeprom_chan_samples *chans = band->chans; 2340 int j, c; 2341 2342 kprintf("===band %d===\n", i); 2343 kprintf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 2344 kprintf("chan1 num=%d\n", chans[0].num); 2345 for (c = 0; c < 2; c++) { 2346 for (j = 0; j < IWN_NSAMPLES; j++) { 2347 kprintf("chain %d, sample %d: temp=%d gain=%d " 2348 "power=%d pa_det=%d\n", c, j, 2349 chans[0].samples[c][j].temp, 2350 chans[0].samples[c][j].gain, 2351 chans[0].samples[c][j].power, 2352 chans[0].samples[c][j].pa_det); 2353 } 2354 } 2355 kprintf("chan2 num=%d\n", chans[1].num); 2356 for (c = 0; c < 2; c++) { 2357 for (j = 0; j < IWN_NSAMPLES; j++) { 2358 kprintf("chain %d, sample %d: temp=%d gain=%d " 2359 "power=%d pa_det=%d\n", c, j, 2360 chans[1].samples[c][j].temp, 2361 chans[1].samples[c][j].gain, 2362 chans[1].samples[c][j].power, 2363 chans[1].samples[c][j].pa_det); 2364 } 2365 } 2366 } 2367 #endif 2368 2369 static void 2370 iwn5000_read_eeprom(struct iwn_softc *sc) 2371 { 2372 struct iwn5000_eeprom_calib_hdr hdr; 2373 int32_t volt; 2374 uint32_t base, addr; 2375 uint16_t val; 2376 int i; 2377 2378 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2379 2380 /* Read regulatory domain (4 ASCII characters). */ 2381 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2382 base = le16toh(val); 2383 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 2384 sc->eeprom_domain, 4); 2385 2386 /* Read the list of authorized channels (20MHz & 40MHz). */ 2387 for (i = 0; i < IWN_NBANDS - 1; i++) { 2388 addr = base + sc->base_params->regulatory_bands[i]; 2389 iwn_read_eeprom_channels(sc, i, addr); 2390 } 2391 2392 /* Read enhanced TX power information for 6000 Series. */ 2393 if (sc->base_params->enhanced_TX_power) 2394 iwn_read_eeprom_enhinfo(sc); 2395 2396 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 2397 base = le16toh(val); 2398 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 2399 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2400 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 2401 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 2402 sc->calib_ver = hdr.version; 2403 2404 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 2405 sc->eeprom_voltage = le16toh(hdr.volt); 2406 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2407 sc->eeprom_temp_high=le16toh(val); 2408 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2409 sc->eeprom_temp = le16toh(val); 2410 } 2411 2412 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 2413 /* Compute temperature offset. */ 2414 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2415 sc->eeprom_temp = le16toh(val); 2416 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2417 volt = le16toh(val); 2418 sc->temp_off = sc->eeprom_temp - (volt / -5); 2419 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 2420 sc->eeprom_temp, volt, sc->temp_off); 2421 } else { 2422 /* Read crystal calibration. */ 2423 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 2424 &sc->eeprom_crystal, sizeof (uint32_t)); 2425 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 2426 le32toh(sc->eeprom_crystal)); 2427 } 2428 2429 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2430 2431 } 2432 2433 /* 2434 * Translate EEPROM flags to net80211. 2435 */ 2436 static uint32_t 2437 iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 2438 { 2439 uint32_t nflags; 2440 2441 nflags = 0; 2442 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 2443 nflags |= IEEE80211_CHAN_PASSIVE; 2444 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 2445 nflags |= IEEE80211_CHAN_NOADHOC; 2446 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 2447 nflags |= IEEE80211_CHAN_DFS; 2448 /* XXX apparently IBSS may still be marked */ 2449 nflags |= IEEE80211_CHAN_NOADHOC; 2450 } 2451 2452 return nflags; 2453 } 2454 2455 static void 2456 iwn_read_eeprom_band(struct iwn_softc *sc, int n, int maxchans, int *nchans, 2457 struct ieee80211_channel chans[]) 2458 { 2459 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2460 const struct iwn_chan_band *band = &iwn_bands[n]; 2461 uint8_t bands[IEEE80211_MODE_BYTES]; 2462 uint8_t chan; 2463 int i, error, nflags; 2464 2465 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2466 2467 memset(bands, 0, sizeof(bands)); 2468 if (n == 0) { 2469 setbit(bands, IEEE80211_MODE_11B); 2470 setbit(bands, IEEE80211_MODE_11G); 2471 if (sc->sc_flags & IWN_FLAG_HAS_11N) 2472 setbit(bands, IEEE80211_MODE_11NG); 2473 } else { 2474 setbit(bands, IEEE80211_MODE_11A); 2475 if (sc->sc_flags & IWN_FLAG_HAS_11N) 2476 setbit(bands, IEEE80211_MODE_11NA); 2477 } 2478 2479 for (i = 0; i < band->nchan; i++) { 2480 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2481 DPRINTF(sc, IWN_DEBUG_RESET, 2482 "skip chan %d flags 0x%x maxpwr %d\n", 2483 band->chan[i], channels[i].flags, 2484 channels[i].maxpwr); 2485 continue; 2486 } 2487 2488 chan = band->chan[i]; 2489 nflags = iwn_eeprom_channel_flags(&channels[i]); 2490 error = ieee80211_add_channel(chans, maxchans, nchans, 2491 chan, 0, channels[i].maxpwr, nflags, bands); 2492 if (error != 0) 2493 break; 2494 2495 /* Save maximum allowed TX power for this channel. */ 2496 /* XXX wrong */ 2497 sc->maxpwr[chan] = channels[i].maxpwr; 2498 2499 DPRINTF(sc, IWN_DEBUG_RESET, 2500 "add chan %d flags 0x%x maxpwr %d\n", chan, 2501 channels[i].flags, channels[i].maxpwr); 2502 } 2503 2504 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2505 2506 } 2507 2508 static void 2509 iwn_read_eeprom_ht40(struct iwn_softc *sc, int n, int maxchans, int *nchans, 2510 struct ieee80211_channel chans[]) 2511 { 2512 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2513 const struct iwn_chan_band *band = &iwn_bands[n]; 2514 uint8_t chan; 2515 int i, error, nflags; 2516 2517 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 2518 2519 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 2520 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 2521 return; 2522 } 2523 2524 for (i = 0; i < band->nchan; i++) { 2525 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2526 DPRINTF(sc, IWN_DEBUG_RESET, 2527 "skip chan %d flags 0x%x maxpwr %d\n", 2528 band->chan[i], channels[i].flags, 2529 channels[i].maxpwr); 2530 continue; 2531 } 2532 2533 chan = band->chan[i]; 2534 nflags = iwn_eeprom_channel_flags(&channels[i]); 2535 nflags |= (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A); 2536 error = ieee80211_add_channel_ht40(chans, maxchans, nchans, 2537 chan, channels[i].maxpwr, nflags); 2538 switch (error) { 2539 case EINVAL: 2540 device_printf(sc->sc_dev, 2541 "%s: no entry for channel %d\n", __func__, chan); 2542 continue; 2543 case ENOENT: 2544 DPRINTF(sc, IWN_DEBUG_RESET, 2545 "%s: skip chan %d, extension channel not found\n", 2546 __func__, chan); 2547 continue; 2548 case ENOBUFS: 2549 device_printf(sc->sc_dev, 2550 "%s: channel table is full!\n", __func__); 2551 break; 2552 case 0: 2553 DPRINTF(sc, IWN_DEBUG_RESET, 2554 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2555 chan, channels[i].flags, channels[i].maxpwr); 2556 /* FALLTHROUGH */ 2557 default: 2558 break; 2559 } 2560 } 2561 2562 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2563 2564 } 2565 2566 static void 2567 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2568 { 2569 struct ieee80211com *ic = &sc->sc_ic; 2570 2571 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2572 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2573 2574 if (n < 5) { 2575 iwn_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, 2576 ic->ic_channels); 2577 } else { 2578 iwn_read_eeprom_ht40(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, 2579 ic->ic_channels); 2580 } 2581 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2582 } 2583 2584 static struct iwn_eeprom_chan * 2585 iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2586 { 2587 int band, chan, i, j; 2588 2589 if (IEEE80211_IS_CHAN_HT40(c)) { 2590 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2591 if (IEEE80211_IS_CHAN_HT40D(c)) 2592 chan = c->ic_extieee; 2593 else 2594 chan = c->ic_ieee; 2595 for (i = 0; i < iwn_bands[band].nchan; i++) { 2596 if (iwn_bands[band].chan[i] == chan) 2597 return &sc->eeprom_channels[band][i]; 2598 } 2599 } else { 2600 for (j = 0; j < 5; j++) { 2601 for (i = 0; i < iwn_bands[j].nchan; i++) { 2602 if (iwn_bands[j].chan[i] == c->ic_ieee && 2603 ((j == 0) ^ IEEE80211_IS_CHAN_A(c)) == 1) 2604 return &sc->eeprom_channels[j][i]; 2605 } 2606 } 2607 } 2608 return NULL; 2609 } 2610 2611 static void 2612 iwn_getradiocaps(struct ieee80211com *ic, 2613 int maxchans, int *nchans, struct ieee80211_channel chans[]) 2614 { 2615 struct iwn_softc *sc = ic->ic_softc; 2616 int i; 2617 2618 /* Parse the list of authorized channels. */ 2619 for (i = 0; i < 5 && *nchans < maxchans; i++) 2620 iwn_read_eeprom_band(sc, i, maxchans, nchans, chans); 2621 for (i = 5; i < IWN_NBANDS - 1 && *nchans < maxchans; i++) 2622 iwn_read_eeprom_ht40(sc, i, maxchans, nchans, chans); 2623 } 2624 2625 /* 2626 * Enforce flags read from EEPROM. 2627 */ 2628 static int 2629 iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2630 int nchan, struct ieee80211_channel chans[]) 2631 { 2632 struct iwn_softc *sc = ic->ic_softc; 2633 int i; 2634 2635 for (i = 0; i < nchan; i++) { 2636 struct ieee80211_channel *c = &chans[i]; 2637 struct iwn_eeprom_chan *channel; 2638 2639 channel = iwn_find_eeprom_channel(sc, c); 2640 if (channel == NULL) { 2641 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 2642 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2643 return EINVAL; 2644 } 2645 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2646 } 2647 2648 return 0; 2649 } 2650 2651 static void 2652 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2653 { 2654 struct iwn_eeprom_enhinfo enhinfo[35]; 2655 struct ieee80211com *ic = &sc->sc_ic; 2656 struct ieee80211_channel *c; 2657 uint16_t val, base; 2658 int8_t maxpwr; 2659 uint8_t flags; 2660 int i, j; 2661 2662 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2663 2664 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2665 base = le16toh(val); 2666 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2667 enhinfo, sizeof enhinfo); 2668 2669 for (i = 0; i < nitems(enhinfo); i++) { 2670 flags = enhinfo[i].flags; 2671 if (!(flags & IWN_ENHINFO_VALID)) 2672 continue; /* Skip invalid entries. */ 2673 2674 maxpwr = 0; 2675 if (sc->txchainmask & IWN_ANT_A) 2676 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2677 if (sc->txchainmask & IWN_ANT_B) 2678 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2679 if (sc->txchainmask & IWN_ANT_C) 2680 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2681 if (sc->ntxchains == 2) 2682 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2683 else if (sc->ntxchains == 3) 2684 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2685 2686 for (j = 0; j < ic->ic_nchans; j++) { 2687 c = &ic->ic_channels[j]; 2688 if ((flags & IWN_ENHINFO_5GHZ)) { 2689 if (!IEEE80211_IS_CHAN_A(c)) 2690 continue; 2691 } else if ((flags & IWN_ENHINFO_OFDM)) { 2692 if (!IEEE80211_IS_CHAN_G(c)) 2693 continue; 2694 } else if (!IEEE80211_IS_CHAN_B(c)) 2695 continue; 2696 if ((flags & IWN_ENHINFO_HT40)) { 2697 if (!IEEE80211_IS_CHAN_HT40(c)) 2698 continue; 2699 } else { 2700 if (IEEE80211_IS_CHAN_HT40(c)) 2701 continue; 2702 } 2703 if (enhinfo[i].chan != 0 && 2704 enhinfo[i].chan != c->ic_ieee) 2705 continue; 2706 2707 DPRINTF(sc, IWN_DEBUG_RESET, 2708 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2709 c->ic_flags, maxpwr / 2); 2710 c->ic_maxregpower = maxpwr / 2; 2711 c->ic_maxpower = maxpwr; 2712 } 2713 } 2714 2715 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2716 2717 } 2718 2719 static struct ieee80211_node * 2720 iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2721 { 2722 return kmalloc(sizeof (struct iwn_node), M_80211_NODE, 2723 M_INTWAIT | M_ZERO); 2724 } 2725 2726 static __inline int 2727 rate2plcp(int rate) 2728 { 2729 switch (rate & 0xff) { 2730 case 12: return 0xd; 2731 case 18: return 0xf; 2732 case 24: return 0x5; 2733 case 36: return 0x7; 2734 case 48: return 0x9; 2735 case 72: return 0xb; 2736 case 96: return 0x1; 2737 case 108: return 0x3; 2738 case 2: return 10; 2739 case 4: return 20; 2740 case 11: return 55; 2741 case 22: return 110; 2742 } 2743 return 0; 2744 } 2745 2746 static int 2747 iwn_get_1stream_tx_antmask(struct iwn_softc *sc) 2748 { 2749 2750 return IWN_LSB(sc->txchainmask); 2751 } 2752 2753 static int 2754 iwn_get_2stream_tx_antmask(struct iwn_softc *sc) 2755 { 2756 int tx; 2757 2758 /* 2759 * The '2 stream' setup is a bit .. odd. 2760 * 2761 * For NICs that support only 1 antenna, default to IWN_ANT_AB or 2762 * the firmware panics (eg Intel 5100.) 2763 * 2764 * For NICs that support two antennas, we use ANT_AB. 2765 * 2766 * For NICs that support three antennas, we use the two that 2767 * wasn't the default one. 2768 * 2769 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict 2770 * this to only one antenna. 2771 */ 2772 2773 /* Default - transmit on the other antennas */ 2774 tx = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); 2775 2776 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ 2777 if (tx == 0) 2778 tx = IWN_ANT_AB; 2779 2780 /* 2781 * If the NIC is a two-stream TX NIC, configure the TX mask to 2782 * the default chainmask 2783 */ 2784 else if (sc->ntxchains == 2) 2785 tx = sc->txchainmask; 2786 2787 return (tx); 2788 } 2789 2790 2791 2792 /* 2793 * Calculate the required PLCP value from the given rate, 2794 * to the given node. 2795 * 2796 * This will take the node configuration (eg 11n, rate table 2797 * setup, etc) into consideration. 2798 */ 2799 static uint32_t 2800 iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2801 uint8_t rate) 2802 { 2803 struct ieee80211com *ic = ni->ni_ic; 2804 uint32_t plcp = 0; 2805 int ridx; 2806 2807 /* 2808 * If it's an MCS rate, let's set the plcp correctly 2809 * and set the relevant flags based on the node config. 2810 */ 2811 if (rate & IEEE80211_RATE_MCS) { 2812 /* 2813 * Set the initial PLCP value to be between 0->31 for 2814 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2815 * flag. 2816 */ 2817 plcp = IEEE80211_RV(rate) | IWN_RFLAG_MCS; 2818 2819 /* 2820 * XXX the following should only occur if both 2821 * the local configuration _and_ the remote node 2822 * advertise these capabilities. Thus this code 2823 * may need fixing! 2824 */ 2825 2826 /* 2827 * Set the channel width and guard interval. 2828 */ 2829 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2830 plcp |= IWN_RFLAG_HT40; 2831 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2832 plcp |= IWN_RFLAG_SGI; 2833 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2834 plcp |= IWN_RFLAG_SGI; 2835 } 2836 2837 /* 2838 * Ensure the selected rate matches the link quality 2839 * table entries being used. 2840 */ 2841 if (rate > 0x8f) 2842 plcp |= IWN_RFLAG_ANT(sc->txchainmask); 2843 else if (rate > 0x87) 2844 plcp |= IWN_RFLAG_ANT(iwn_get_2stream_tx_antmask(sc)); 2845 else 2846 plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc)); 2847 } else { 2848 /* 2849 * Set the initial PLCP - fine for both 2850 * OFDM and CCK rates. 2851 */ 2852 plcp = rate2plcp(rate); 2853 2854 /* Set CCK flag if it's CCK */ 2855 2856 /* XXX It would be nice to have a method 2857 * to map the ridx -> phy table entry 2858 * so we could just query that, rather than 2859 * this hack to check against IWN_RIDX_OFDM6. 2860 */ 2861 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2862 rate & IEEE80211_RATE_VAL); 2863 if (ridx < IWN_RIDX_OFDM6 && 2864 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2865 plcp |= IWN_RFLAG_CCK; 2866 2867 /* Set antenna configuration */ 2868 /* XXX TODO: is this the right antenna to use for legacy? */ 2869 plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc)); 2870 } 2871 2872 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2873 __func__, 2874 rate, 2875 plcp); 2876 2877 return (htole32(plcp)); 2878 } 2879 2880 static void 2881 iwn_newassoc(struct ieee80211_node *ni, int isnew) 2882 { 2883 /* Doesn't do anything at the moment */ 2884 } 2885 2886 static int 2887 iwn_media_change(struct ifnet *ifp) 2888 { 2889 int error; 2890 2891 error = ieee80211_media_change(ifp); 2892 /* NB: only the fixed rate can change and that doesn't need a reset */ 2893 return (error == ENETRESET ? 0 : error); 2894 } 2895 2896 static int 2897 iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2898 { 2899 struct iwn_vap *ivp = IWN_VAP(vap); 2900 struct ieee80211com *ic = vap->iv_ic; 2901 struct iwn_softc *sc = ic->ic_softc; 2902 int error = 0; 2903 2904 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2905 2906 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2907 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2908 2909 IEEE80211_UNLOCK(ic); 2910 IWN_LOCK(sc); 2911 #if defined(__DragonFly__) 2912 callout_stop_sync(&sc->calib_to); 2913 #else 2914 callout_stop(&sc->calib_to); 2915 #endif 2916 2917 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 2918 2919 switch (nstate) { 2920 case IEEE80211_S_ASSOC: 2921 if (vap->iv_state != IEEE80211_S_RUN) 2922 break; 2923 /* FALLTHROUGH */ 2924 case IEEE80211_S_AUTH: 2925 if (vap->iv_state == IEEE80211_S_AUTH) 2926 break; 2927 2928 /* 2929 * !AUTH -> AUTH transition requires state reset to handle 2930 * reassociations correctly. 2931 */ 2932 sc->rxon->associd = 0; 2933 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); 2934 sc->calib.state = IWN_CALIB_STATE_INIT; 2935 2936 /* Wait until we hear a beacon before we transmit */ 2937 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 2938 sc->sc_beacon_wait = 1; 2939 2940 if ((error = iwn_auth(sc, vap)) != 0) { 2941 device_printf(sc->sc_dev, 2942 "%s: could not move to auth state\n", __func__); 2943 } 2944 break; 2945 2946 case IEEE80211_S_RUN: 2947 /* 2948 * RUN -> RUN transition; Just restart the timers. 2949 */ 2950 if (vap->iv_state == IEEE80211_S_RUN) { 2951 sc->calib_cnt = 0; 2952 break; 2953 } 2954 2955 /* Wait until we hear a beacon before we transmit */ 2956 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 2957 sc->sc_beacon_wait = 1; 2958 2959 /* 2960 * !RUN -> RUN requires setting the association id 2961 * which is done with a firmware cmd. We also defer 2962 * starting the timers until that work is done. 2963 */ 2964 if ((error = iwn_run(sc, vap)) != 0) { 2965 device_printf(sc->sc_dev, 2966 "%s: could not move to run state\n", __func__); 2967 } 2968 break; 2969 2970 case IEEE80211_S_INIT: 2971 sc->calib.state = IWN_CALIB_STATE_INIT; 2972 /* 2973 * Purge the xmit queue so we don't have old frames 2974 * during a new association attempt. 2975 */ 2976 sc->sc_beacon_wait = 0; 2977 iwn_xmit_queue_drain(sc); 2978 break; 2979 2980 default: 2981 break; 2982 } 2983 IWN_UNLOCK(sc); 2984 IEEE80211_LOCK(ic); 2985 if (error != 0){ 2986 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2987 return error; 2988 } 2989 2990 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2991 2992 return ivp->iv_newstate(vap, nstate, arg); 2993 } 2994 2995 static void 2996 iwn_calib_timeout(void *arg) 2997 { 2998 struct iwn_softc *sc = arg; 2999 3000 IWN_LOCK_ASSERT(sc); 3001 3002 /* Force automatic TX power calibration every 60 secs. */ 3003 if (++sc->calib_cnt >= 120) { 3004 uint32_t flags = 0; 3005 3006 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 3007 "sending request for statistics"); 3008 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 3009 sizeof flags, 1); 3010 sc->calib_cnt = 0; 3011 } 3012 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 3013 sc); 3014 } 3015 3016 /* 3017 * Process an RX_PHY firmware notification. This is usually immediately 3018 * followed by an MPDU_RX_DONE notification. 3019 */ 3020 static void 3021 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3022 struct iwn_rx_data *data) 3023 { 3024 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 3025 3026 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 3027 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3028 3029 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 3030 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 3031 sc->last_rx_valid = 1; 3032 } 3033 3034 /* 3035 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 3036 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 3037 */ 3038 static void 3039 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3040 struct iwn_rx_data *data) 3041 { 3042 struct iwn_ops *ops = &sc->ops; 3043 struct ieee80211com *ic = &sc->sc_ic; 3044 struct iwn_rx_ring *ring = &sc->rxq; 3045 struct ieee80211_frame *wh; 3046 struct ieee80211_node *ni; 3047 struct mbuf *m, *m1; 3048 struct iwn_rx_stat *stat; 3049 caddr_t head; 3050 bus_addr_t paddr; 3051 uint32_t flags; 3052 int error, len, rssi, nf; 3053 3054 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3055 3056 if (desc->type == IWN_MPDU_RX_DONE) { 3057 /* Check for prior RX_PHY notification. */ 3058 if (!sc->last_rx_valid) { 3059 DPRINTF(sc, IWN_DEBUG_ANY, 3060 "%s: missing RX_PHY\n", __func__); 3061 return; 3062 } 3063 stat = &sc->last_rx_stat; 3064 } else 3065 stat = (struct iwn_rx_stat *)(desc + 1); 3066 3067 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3068 3069 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 3070 device_printf(sc->sc_dev, 3071 "%s: invalid RX statistic header, len %d\n", __func__, 3072 stat->cfg_phy_len); 3073 return; 3074 } 3075 if (desc->type == IWN_MPDU_RX_DONE) { 3076 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 3077 head = (caddr_t)(mpdu + 1); 3078 len = le16toh(mpdu->len); 3079 } else { 3080 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 3081 len = le16toh(stat->len); 3082 } 3083 3084 flags = le32toh(*(uint32_t *)(head + len)); 3085 3086 /* Discard frames with a bad FCS early. */ 3087 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 3088 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 3089 __func__, flags); 3090 #if defined(__DragonFly__) 3091 ++ic->ic_ierrors; 3092 #else 3093 counter_u64_add(ic->ic_ierrors, 1); 3094 #endif 3095 return; 3096 } 3097 /* Discard frames that are too short. */ 3098 if (len < sizeof (struct ieee80211_frame_ack)) { 3099 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 3100 __func__, len); 3101 #if defined(__DragonFly__) 3102 ++ic->ic_ierrors; 3103 #else 3104 counter_u64_add(ic->ic_ierrors, 1); 3105 #endif 3106 return; 3107 } 3108 3109 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 3110 if (m1 == NULL) { 3111 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 3112 __func__); 3113 #if defined(__DragonFly__) 3114 ++ic->ic_ierrors; 3115 #else 3116 counter_u64_add(ic->ic_ierrors, 1); 3117 #endif 3118 return; 3119 } 3120 bus_dmamap_unload(ring->data_dmat, data->map); 3121 3122 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 3123 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3124 if (error != 0 && error != EFBIG) { 3125 device_printf(sc->sc_dev, 3126 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 3127 m_freem(m1); 3128 3129 /* Try to reload the old mbuf. */ 3130 error = bus_dmamap_load(ring->data_dmat, data->map, 3131 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 3132 &paddr, BUS_DMA_NOWAIT); 3133 if (error != 0 && error != EFBIG) { 3134 panic("%s: could not load old RX mbuf", __func__); 3135 } 3136 /* Physical address may have changed. */ 3137 ring->desc[ring->cur] = htole32(paddr >> 8); 3138 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 3139 BUS_DMASYNC_PREWRITE); 3140 #if defined(__DragonFly__) 3141 ++ic->ic_ierrors; 3142 #else 3143 counter_u64_add(ic->ic_ierrors, 1); 3144 #endif 3145 return; 3146 } 3147 3148 m = data->m; 3149 data->m = m1; 3150 /* Update RX descriptor. */ 3151 ring->desc[ring->cur] = htole32(paddr >> 8); 3152 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3153 BUS_DMASYNC_PREWRITE); 3154 3155 /* Finalize mbuf. */ 3156 m->m_data = head; 3157 m->m_pkthdr.len = m->m_len = len; 3158 3159 /* Grab a reference to the source node. */ 3160 wh = mtod(m, struct ieee80211_frame *); 3161 if (len >= sizeof(struct ieee80211_frame_min)) 3162 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 3163 else 3164 ni = NULL; 3165 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 3166 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 3167 3168 rssi = ops->get_rssi(sc, stat); 3169 3170 if (ieee80211_radiotap_active(ic)) { 3171 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 3172 3173 tap->wr_flags = 0; 3174 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 3175 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3176 tap->wr_dbm_antsignal = (int8_t)rssi; 3177 tap->wr_dbm_antnoise = (int8_t)nf; 3178 tap->wr_tsft = stat->tstamp; 3179 switch (stat->rate) { 3180 /* CCK rates. */ 3181 case 10: tap->wr_rate = 2; break; 3182 case 20: tap->wr_rate = 4; break; 3183 case 55: tap->wr_rate = 11; break; 3184 case 110: tap->wr_rate = 22; break; 3185 /* OFDM rates. */ 3186 case 0xd: tap->wr_rate = 12; break; 3187 case 0xf: tap->wr_rate = 18; break; 3188 case 0x5: tap->wr_rate = 24; break; 3189 case 0x7: tap->wr_rate = 36; break; 3190 case 0x9: tap->wr_rate = 48; break; 3191 case 0xb: tap->wr_rate = 72; break; 3192 case 0x1: tap->wr_rate = 96; break; 3193 case 0x3: tap->wr_rate = 108; break; 3194 /* Unknown rate: should not happen. */ 3195 default: tap->wr_rate = 0; 3196 } 3197 } 3198 3199 /* 3200 * If it's a beacon and we're waiting, then do the 3201 * wakeup. This should unblock raw_xmit/start. 3202 */ 3203 if (sc->sc_beacon_wait) { 3204 uint8_t type, subtype; 3205 /* NB: Re-assign wh */ 3206 wh = mtod(m, struct ieee80211_frame *); 3207 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3208 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3209 /* 3210 * This assumes at this point we've received our own 3211 * beacon. 3212 */ 3213 DPRINTF(sc, IWN_DEBUG_TRACE, 3214 "%s: beacon_wait, type=%d, subtype=%d\n", 3215 __func__, type, subtype); 3216 if (type == IEEE80211_FC0_TYPE_MGT && 3217 subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 3218 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, 3219 "%s: waking things up\n", __func__); 3220 /* queue taskqueue to transmit! */ 3221 taskqueue_enqueue(sc->sc_tq, &sc->sc_xmit_task); 3222 } 3223 } 3224 3225 IWN_UNLOCK(sc); 3226 3227 /* Send the frame to the 802.11 layer. */ 3228 if (ni != NULL) { 3229 if (ni->ni_flags & IEEE80211_NODE_HT) 3230 m->m_flags |= M_AMPDU; 3231 (void)ieee80211_input(ni, m, rssi - nf, nf); 3232 /* Node is no longer needed. */ 3233 ieee80211_free_node(ni); 3234 } else 3235 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 3236 3237 IWN_LOCK(sc); 3238 3239 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3240 3241 } 3242 3243 /* Process an incoming Compressed BlockAck. */ 3244 static void 3245 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3246 struct iwn_rx_data *data) 3247 { 3248 struct iwn_ops *ops = &sc->ops; 3249 struct iwn_node *wn; 3250 struct ieee80211_node *ni; 3251 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 3252 struct iwn_tx_ring *txq; 3253 struct iwn_tx_data *txdata; 3254 struct ieee80211_tx_ampdu *tap; 3255 struct mbuf *m; 3256 uint64_t bitmap; 3257 uint16_t ssn; 3258 uint8_t tid; 3259 int ackfailcnt = 0, i, lastidx, qid, *res, shift; 3260 int tx_ok = 0, tx_err = 0; 3261 3262 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s begin\n", __func__); 3263 3264 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3265 3266 qid = le16toh(ba->qid); 3267 txq = &sc->txq[ba->qid]; 3268 tap = sc->qid2tap[ba->qid]; 3269 tid = tap->txa_tid; 3270 wn = (void *)tap->txa_ni; 3271 3272 res = NULL; 3273 ssn = 0; 3274 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3275 res = tap->txa_private; 3276 ssn = tap->txa_start & 0xfff; 3277 } 3278 3279 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { 3280 txdata = &txq->data[txq->read]; 3281 3282 /* Unmap and free mbuf. */ 3283 bus_dmamap_sync(txq->data_dmat, txdata->map, 3284 BUS_DMASYNC_POSTWRITE); 3285 bus_dmamap_unload(txq->data_dmat, txdata->map); 3286 m = txdata->m, txdata->m = NULL; 3287 ni = txdata->ni, txdata->ni = NULL; 3288 3289 KASSERT(ni != NULL, ("no node")); 3290 KASSERT(m != NULL, ("no mbuf")); 3291 3292 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m); 3293 ieee80211_tx_complete(ni, m, 1); 3294 3295 txq->queued--; 3296 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 3297 } 3298 3299 if (txq->queued == 0 && res != NULL) { 3300 iwn_nic_lock(sc); 3301 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3302 iwn_nic_unlock(sc); 3303 sc->qid2tap[qid] = NULL; 3304 kfree(res, M_DEVBUF); 3305 return; 3306 } 3307 3308 if (wn->agg[tid].bitmap == 0) 3309 return; 3310 3311 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 3312 if (shift < 0) 3313 shift += 0x100; 3314 3315 if (wn->agg[tid].nframes > (64 - shift)) 3316 return; 3317 3318 /* 3319 * Walk the bitmap and calculate how many successful and failed 3320 * attempts are made. 3321 * 3322 * Yes, the rate control code doesn't know these are A-MPDU 3323 * subframes and that it's okay to fail some of these. 3324 */ 3325 ni = tap->txa_ni; 3326 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 3327 for (i = 0; bitmap; i++) { 3328 if ((bitmap & 1) == 0) { 3329 tx_err ++; 3330 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3331 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3332 } else { 3333 tx_ok ++; 3334 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 3335 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3336 } 3337 bitmap >>= 1; 3338 } 3339 3340 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, 3341 "->%s: end; %d ok; %d err\n",__func__, tx_ok, tx_err); 3342 3343 } 3344 3345 /* 3346 * Process a CALIBRATION_RESULT notification sent by the initialization 3347 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 3348 */ 3349 static void 3350 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3351 struct iwn_rx_data *data) 3352 { 3353 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 3354 int len, idx = -1; 3355 3356 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3357 3358 /* Runtime firmware should not send such a notification. */ 3359 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 3360 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", 3361 __func__); 3362 return; 3363 } 3364 len = (le32toh(desc->len) & 0x3fff) - 4; 3365 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3366 3367 switch (calib->code) { 3368 case IWN5000_PHY_CALIB_DC: 3369 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) 3370 idx = 0; 3371 break; 3372 case IWN5000_PHY_CALIB_LO: 3373 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) 3374 idx = 1; 3375 break; 3376 case IWN5000_PHY_CALIB_TX_IQ: 3377 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) 3378 idx = 2; 3379 break; 3380 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 3381 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) 3382 idx = 3; 3383 break; 3384 case IWN5000_PHY_CALIB_BASE_BAND: 3385 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) 3386 idx = 4; 3387 break; 3388 } 3389 if (idx == -1) /* Ignore other results. */ 3390 return; 3391 3392 /* Save calibration result. */ 3393 if (sc->calibcmd[idx].buf != NULL) 3394 kfree(sc->calibcmd[idx].buf, M_DEVBUF); 3395 sc->calibcmd[idx].buf = kmalloc(len, M_DEVBUF, M_INTWAIT); 3396 if (sc->calibcmd[idx].buf == NULL) { 3397 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3398 "not enough memory for calibration result %d\n", 3399 calib->code); 3400 return; 3401 } 3402 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3403 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); 3404 sc->calibcmd[idx].len = len; 3405 memcpy(sc->calibcmd[idx].buf, calib, len); 3406 } 3407 3408 static void 3409 iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib, 3410 struct iwn_stats *stats, int len) 3411 { 3412 struct iwn_stats_bt *stats_bt; 3413 struct iwn_stats *lstats; 3414 3415 /* 3416 * First - check whether the length is the bluetooth or normal. 3417 * 3418 * If it's normal - just copy it and bump out. 3419 * Otherwise we have to convert things. 3420 */ 3421 3422 if (len == sizeof(struct iwn_stats) + 4) { 3423 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); 3424 sc->last_stat_valid = 1; 3425 return; 3426 } 3427 3428 /* 3429 * If it's not the bluetooth size - log, then just copy. 3430 */ 3431 if (len != sizeof(struct iwn_stats_bt) + 4) { 3432 DPRINTF(sc, IWN_DEBUG_STATS, 3433 "%s: size of rx statistics (%d) not an expected size!\n", 3434 __func__, 3435 len); 3436 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); 3437 sc->last_stat_valid = 1; 3438 return; 3439 } 3440 3441 /* 3442 * Ok. Time to copy. 3443 */ 3444 stats_bt = (struct iwn_stats_bt *) stats; 3445 lstats = &sc->last_stat; 3446 3447 /* flags */ 3448 lstats->flags = stats_bt->flags; 3449 /* rx_bt */ 3450 memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm, 3451 sizeof(struct iwn_rx_phy_stats)); 3452 memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck, 3453 sizeof(struct iwn_rx_phy_stats)); 3454 memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common, 3455 sizeof(struct iwn_rx_general_stats)); 3456 memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht, 3457 sizeof(struct iwn_rx_ht_phy_stats)); 3458 /* tx */ 3459 memcpy(&lstats->tx, &stats_bt->tx, 3460 sizeof(struct iwn_tx_stats)); 3461 /* general */ 3462 memcpy(&lstats->general, &stats_bt->general, 3463 sizeof(struct iwn_general_stats)); 3464 3465 /* XXX TODO: Squirrel away the extra bluetooth stats somewhere */ 3466 sc->last_stat_valid = 1; 3467 } 3468 3469 /* 3470 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 3471 * The latter is sent by the firmware after each received beacon. 3472 */ 3473 static void 3474 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3475 struct iwn_rx_data *data) 3476 { 3477 struct iwn_ops *ops = &sc->ops; 3478 struct ieee80211com *ic = &sc->sc_ic; 3479 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3480 struct iwn_calib_state *calib = &sc->calib; 3481 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 3482 struct iwn_stats *lstats; 3483 int temp; 3484 3485 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3486 3487 /* Ignore statistics received during a scan. */ 3488 if (vap->iv_state != IEEE80211_S_RUN || 3489 (ic->ic_flags & IEEE80211_F_SCAN)){ 3490 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 3491 __func__); 3492 return; 3493 } 3494 3495 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3496 3497 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS, 3498 "%s: received statistics, cmd %d, len %d\n", 3499 __func__, desc->type, le16toh(desc->len)); 3500 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 3501 3502 /* 3503 * Collect/track general statistics for reporting. 3504 * 3505 * This takes care of ensuring that the bluetooth sized message 3506 * will be correctly converted to the legacy sized message. 3507 */ 3508 iwn_stats_update(sc, calib, stats, le16toh(desc->len)); 3509 3510 /* 3511 * And now, let's take a reference of it to use! 3512 */ 3513 lstats = &sc->last_stat; 3514 3515 /* Test if temperature has changed. */ 3516 if (lstats->general.temp != sc->rawtemp) { 3517 /* Convert "raw" temperature to degC. */ 3518 sc->rawtemp = stats->general.temp; 3519 temp = ops->get_temperature(sc); 3520 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 3521 __func__, temp); 3522 3523 /* Update TX power if need be (4965AGN only). */ 3524 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3525 iwn4965_power_calibration(sc, temp); 3526 } 3527 3528 if (desc->type != IWN_BEACON_STATISTICS) 3529 return; /* Reply to a statistics request. */ 3530 3531 sc->noise = iwn_get_noise(&lstats->rx.general); 3532 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 3533 3534 /* Test that RSSI and noise are present in stats report. */ 3535 if (le32toh(lstats->rx.general.flags) != 1) { 3536 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 3537 "received statistics without RSSI"); 3538 return; 3539 } 3540 3541 if (calib->state == IWN_CALIB_STATE_ASSOC) 3542 iwn_collect_noise(sc, &lstats->rx.general); 3543 else if (calib->state == IWN_CALIB_STATE_RUN) { 3544 iwn_tune_sensitivity(sc, &lstats->rx); 3545 /* 3546 * XXX TODO: Only run the RX recovery if we're associated! 3547 */ 3548 iwn_check_rx_recovery(sc, lstats); 3549 iwn_save_stats_counters(sc, lstats); 3550 } 3551 3552 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3553 } 3554 3555 /* 3556 * Save the relevant statistic counters for the next calibration 3557 * pass. 3558 */ 3559 static void 3560 iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs) 3561 { 3562 struct iwn_calib_state *calib = &sc->calib; 3563 3564 /* Save counters values for next call. */ 3565 calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp); 3566 calib->fa_cck = le32toh(rs->rx.cck.fa); 3567 calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp); 3568 calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp); 3569 calib->fa_ofdm = le32toh(rs->rx.ofdm.fa); 3570 3571 /* Last time we received these tick values */ 3572 sc->last_calib_ticks = ticks; 3573 } 3574 3575 /* 3576 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 3577 * and 5000 adapters have different incompatible TX status formats. 3578 */ 3579 static void 3580 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3581 struct iwn_rx_data *data) 3582 { 3583 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 3584 struct iwn_tx_ring *ring; 3585 int qid; 3586 3587 qid = desc->qid & 0xf; 3588 ring = &sc->txq[qid]; 3589 3590 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3591 "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n", 3592 __func__, desc->qid, desc->idx, 3593 stat->rtsfailcnt, 3594 stat->ackfailcnt, 3595 stat->btkillcnt, 3596 stat->rate, le16toh(stat->duration), 3597 le32toh(stat->status)); 3598 3599 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3600 if (qid >= sc->firstaggqueue) { 3601 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3602 stat->ackfailcnt, &stat->status); 3603 } else { 3604 iwn_tx_done(sc, desc, stat->ackfailcnt, 3605 le32toh(stat->status) & 0xff); 3606 } 3607 } 3608 3609 static void 3610 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3611 struct iwn_rx_data *data) 3612 { 3613 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 3614 struct iwn_tx_ring *ring; 3615 int qid; 3616 3617 qid = desc->qid & 0xf; 3618 ring = &sc->txq[qid]; 3619 3620 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3621 "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n", 3622 __func__, desc->qid, desc->idx, 3623 stat->rtsfailcnt, 3624 stat->ackfailcnt, 3625 stat->btkillcnt, 3626 stat->rate, le16toh(stat->duration), 3627 le32toh(stat->status)); 3628 3629 #ifdef notyet 3630 /* Reset TX scheduler slot. */ 3631 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 3632 #endif 3633 3634 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 3635 if (qid >= sc->firstaggqueue) { 3636 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 3637 stat->ackfailcnt, &stat->status); 3638 } else { 3639 iwn_tx_done(sc, desc, stat->ackfailcnt, 3640 le16toh(stat->status) & 0xff); 3641 } 3642 } 3643 3644 /* 3645 * Adapter-independent backend for TX_DONE firmware notifications. 3646 */ 3647 static void 3648 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 3649 uint8_t status) 3650 { 3651 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 3652 struct iwn_tx_data *data = &ring->data[desc->idx]; 3653 struct mbuf *m; 3654 struct ieee80211_node *ni; 3655 struct ieee80211vap *vap; 3656 3657 KASSERT(data->ni != NULL, ("no node")); 3658 3659 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3660 3661 /* Unmap and free mbuf. */ 3662 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 3663 bus_dmamap_unload(ring->data_dmat, data->map); 3664 m = data->m, data->m = NULL; 3665 ni = data->ni, data->ni = NULL; 3666 vap = ni->ni_vap; 3667 3668 /* 3669 * Update rate control statistics for the node. 3670 */ 3671 if (status & IWN_TX_FAIL) 3672 ieee80211_ratectl_tx_complete(vap, ni, 3673 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 3674 else 3675 ieee80211_ratectl_tx_complete(vap, ni, 3676 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 3677 3678 /* 3679 * Channels marked for "radar" require traffic to be received 3680 * to unlock before we can transmit. Until traffic is seen 3681 * any attempt to transmit is returned immediately with status 3682 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 3683 * happen on first authenticate after scanning. To workaround 3684 * this we ignore a failure of this sort in AUTH state so the 3685 * 802.11 layer will fall back to using a timeout to wait for 3686 * the AUTH reply. This allows the firmware time to see 3687 * traffic so a subsequent retry of AUTH succeeds. It's 3688 * unclear why the firmware does not maintain state for 3689 * channels recently visited as this would allow immediate 3690 * use of the channel after a scan (where we see traffic). 3691 */ 3692 if (status == IWN_TX_FAIL_TX_LOCKED && 3693 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 3694 ieee80211_tx_complete(ni, m, 0); 3695 else 3696 ieee80211_tx_complete(ni, m, 3697 (status & IWN_TX_FAIL) != 0); 3698 3699 sc->sc_tx_timer = 0; 3700 if (--ring->queued < IWN_TX_RING_LOMARK) 3701 sc->qfullmsk &= ~(1 << ring->qid); 3702 3703 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3704 } 3705 3706 /* 3707 * Process a "command done" firmware notification. This is where we wakeup 3708 * processes waiting for a synchronous command completion. 3709 */ 3710 static void 3711 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3712 { 3713 struct iwn_tx_ring *ring; 3714 struct iwn_tx_data *data; 3715 int cmd_queue_num; 3716 3717 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 3718 cmd_queue_num = IWN_PAN_CMD_QUEUE; 3719 else 3720 cmd_queue_num = IWN_CMD_QUEUE_NUM; 3721 3722 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) 3723 return; /* Not a command ack. */ 3724 3725 ring = &sc->txq[cmd_queue_num]; 3726 data = &ring->data[desc->idx]; 3727 3728 /* If the command was mapped in an mbuf, free it. */ 3729 if (data->m != NULL) { 3730 bus_dmamap_sync(ring->data_dmat, data->map, 3731 BUS_DMASYNC_POSTWRITE); 3732 bus_dmamap_unload(ring->data_dmat, data->map); 3733 m_freem(data->m); 3734 data->m = NULL; 3735 } 3736 wakeup(&ring->desc[desc->idx]); 3737 } 3738 3739 static void 3740 iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 3741 int ackfailcnt, void *stat) 3742 { 3743 struct iwn_ops *ops = &sc->ops; 3744 struct iwn_tx_ring *ring = &sc->txq[qid]; 3745 struct iwn_tx_data *data; 3746 struct mbuf *m; 3747 struct iwn_node *wn; 3748 struct ieee80211_node *ni; 3749 struct ieee80211_tx_ampdu *tap; 3750 uint64_t bitmap; 3751 uint32_t *status = stat; 3752 uint16_t *aggstatus = stat; 3753 uint16_t ssn; 3754 uint8_t tid; 3755 int bit, i, lastidx, *res, seqno, shift, start; 3756 3757 /* XXX TODO: status is le16 field! Grr */ 3758 3759 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3760 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: nframes=%d, status=0x%08x\n", 3761 __func__, 3762 nframes, 3763 *status); 3764 3765 tap = sc->qid2tap[qid]; 3766 tid = tap->txa_tid; 3767 wn = (void *)tap->txa_ni; 3768 ni = tap->txa_ni; 3769 3770 /* 3771 * XXX TODO: ACK and RTS failures would be nice here! 3772 */ 3773 3774 /* 3775 * A-MPDU single frame status - if we failed to transmit it 3776 * in A-MPDU, then it may be a permanent failure. 3777 * 3778 * XXX TODO: check what the Linux iwlwifi driver does here; 3779 * there's some permanent and temporary failures that may be 3780 * handled differently. 3781 */ 3782 if (nframes == 1) { 3783 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) { 3784 #ifdef NOT_YET 3785 kprintf("ieee80211_send_bar()\n"); 3786 #endif 3787 /* 3788 * If we completely fail a transmit, make sure a 3789 * notification is pushed up to the rate control 3790 * layer. 3791 */ 3792 ieee80211_ratectl_tx_complete(ni->ni_vap, 3793 ni, 3794 IEEE80211_RATECTL_TX_FAILURE, 3795 &ackfailcnt, 3796 NULL); 3797 } else { 3798 /* 3799 * If nframes=1, then we won't be getting a BA for 3800 * this frame. Ensure that we correctly update the 3801 * rate control code with how many retries were 3802 * needed to send it. 3803 */ 3804 ieee80211_ratectl_tx_complete(ni->ni_vap, 3805 ni, 3806 IEEE80211_RATECTL_TX_SUCCESS, 3807 &ackfailcnt, 3808 NULL); 3809 } 3810 } 3811 3812 bitmap = 0; 3813 start = idx; 3814 for (i = 0; i < nframes; i++) { 3815 if (le16toh(aggstatus[i * 2]) & 0xc) 3816 continue; 3817 3818 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 3819 bit = idx - start; 3820 shift = 0; 3821 if (bit >= 64) { 3822 shift = 0x100 - idx + start; 3823 bit = 0; 3824 start = idx; 3825 } else if (bit <= -64) 3826 bit = 0x100 - start + idx; 3827 else if (bit < 0) { 3828 shift = start - idx; 3829 start = idx; 3830 bit = 0; 3831 } 3832 bitmap = bitmap << shift; 3833 bitmap |= 1ULL << bit; 3834 } 3835 tap = sc->qid2tap[qid]; 3836 tid = tap->txa_tid; 3837 wn = (void *)tap->txa_ni; 3838 wn->agg[tid].bitmap = bitmap; 3839 wn->agg[tid].startidx = start; 3840 wn->agg[tid].nframes = nframes; 3841 3842 res = NULL; 3843 ssn = 0; 3844 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3845 res = tap->txa_private; 3846 ssn = tap->txa_start & 0xfff; 3847 } 3848 3849 /* This is going nframes DWORDS into the descriptor? */ 3850 seqno = le32toh(*(status + nframes)) & 0xfff; 3851 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 3852 data = &ring->data[ring->read]; 3853 3854 /* Unmap and free mbuf. */ 3855 bus_dmamap_sync(ring->data_dmat, data->map, 3856 BUS_DMASYNC_POSTWRITE); 3857 bus_dmamap_unload(ring->data_dmat, data->map); 3858 m = data->m, data->m = NULL; 3859 ni = data->ni, data->ni = NULL; 3860 3861 KASSERT(ni != NULL, ("no node")); 3862 KASSERT(m != NULL, ("no mbuf")); 3863 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m); 3864 ieee80211_tx_complete(ni, m, 1); 3865 3866 ring->queued--; 3867 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 3868 } 3869 3870 if (ring->queued == 0 && res != NULL) { 3871 iwn_nic_lock(sc); 3872 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3873 iwn_nic_unlock(sc); 3874 sc->qid2tap[qid] = NULL; 3875 kfree(res, M_DEVBUF); 3876 return; 3877 } 3878 3879 sc->sc_tx_timer = 0; 3880 if (ring->queued < IWN_TX_RING_LOMARK) 3881 sc->qfullmsk &= ~(1 << ring->qid); 3882 3883 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3884 } 3885 3886 /* 3887 * Process an INT_FH_RX or INT_SW_RX interrupt. 3888 */ 3889 static void 3890 iwn_notif_intr(struct iwn_softc *sc) 3891 { 3892 struct iwn_ops *ops = &sc->ops; 3893 struct ieee80211com *ic = &sc->sc_ic; 3894 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3895 uint16_t hw; 3896 3897 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3898 BUS_DMASYNC_POSTREAD); 3899 3900 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3901 while (sc->rxq.cur != hw) { 3902 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3903 struct iwn_rx_desc *desc; 3904 3905 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3906 BUS_DMASYNC_POSTREAD); 3907 desc = mtod(data->m, struct iwn_rx_desc *); 3908 3909 DPRINTF(sc, IWN_DEBUG_RECV, 3910 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 3911 __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags, 3912 desc->type, iwn_intr_str(desc->type), 3913 le16toh(desc->len)); 3914 3915 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ 3916 iwn_cmd_done(sc, desc); 3917 3918 switch (desc->type) { 3919 case IWN_RX_PHY: 3920 iwn_rx_phy(sc, desc, data); 3921 break; 3922 3923 case IWN_RX_DONE: /* 4965AGN only. */ 3924 case IWN_MPDU_RX_DONE: 3925 /* An 802.11 frame has been received. */ 3926 iwn_rx_done(sc, desc, data); 3927 break; 3928 3929 case IWN_RX_COMPRESSED_BA: 3930 /* A Compressed BlockAck has been received. */ 3931 iwn_rx_compressed_ba(sc, desc, data); 3932 break; 3933 3934 case IWN_TX_DONE: 3935 /* An 802.11 frame has been transmitted. */ 3936 ops->tx_done(sc, desc, data); 3937 break; 3938 3939 case IWN_RX_STATISTICS: 3940 case IWN_BEACON_STATISTICS: 3941 iwn_rx_statistics(sc, desc, data); 3942 break; 3943 3944 case IWN_BEACON_MISSED: 3945 { 3946 struct iwn_beacon_missed *miss = 3947 (struct iwn_beacon_missed *)(desc + 1); 3948 int misses; 3949 3950 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3951 BUS_DMASYNC_POSTREAD); 3952 misses = le32toh(miss->consecutive); 3953 3954 DPRINTF(sc, IWN_DEBUG_STATE, 3955 "%s: beacons missed %d/%d\n", __func__, 3956 misses, le32toh(miss->total)); 3957 /* 3958 * If more than 5 consecutive beacons are missed, 3959 * reinitialize the sensitivity state machine. 3960 */ 3961 if (vap->iv_state == IEEE80211_S_RUN && 3962 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 3963 if (misses > 5) 3964 (void)iwn_init_sensitivity(sc); 3965 if (misses >= vap->iv_bmissthreshold) { 3966 IWN_UNLOCK(sc); 3967 ieee80211_beacon_miss(ic); 3968 IWN_LOCK(sc); 3969 } 3970 } 3971 break; 3972 } 3973 case IWN_UC_READY: 3974 { 3975 struct iwn_ucode_info *uc = 3976 (struct iwn_ucode_info *)(desc + 1); 3977 3978 /* The microcontroller is ready. */ 3979 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3980 BUS_DMASYNC_POSTREAD); 3981 DPRINTF(sc, IWN_DEBUG_RESET, 3982 "microcode alive notification version=%d.%d " 3983 "subtype=%x alive=%x\n", uc->major, uc->minor, 3984 uc->subtype, le32toh(uc->valid)); 3985 3986 if (le32toh(uc->valid) != 1) { 3987 device_printf(sc->sc_dev, 3988 "microcontroller initialization failed"); 3989 break; 3990 } 3991 if (uc->subtype == IWN_UCODE_INIT) { 3992 /* Save microcontroller report. */ 3993 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 3994 } 3995 /* Save the address of the error log in SRAM. */ 3996 sc->errptr = le32toh(uc->errptr); 3997 break; 3998 } 3999 case IWN_STATE_CHANGED: 4000 { 4001 /* 4002 * State change allows hardware switch change to be 4003 * noted. However, we handle this in iwn_intr as we 4004 * get both the enable/disble intr. 4005 */ 4006 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 4007 BUS_DMASYNC_POSTREAD); 4008 #ifdef IWN_DEBUG 4009 uint32_t *status = (uint32_t *)(desc + 1); 4010 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, 4011 "state changed to %x\n", 4012 le32toh(*status)); 4013 #endif 4014 break; 4015 } 4016 case IWN_START_SCAN: 4017 { 4018 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 4019 BUS_DMASYNC_POSTREAD); 4020 #ifdef IWN_DEBUG 4021 struct iwn_start_scan *scan = 4022 (struct iwn_start_scan *)(desc + 1); 4023 DPRINTF(sc, IWN_DEBUG_ANY, 4024 "%s: scanning channel %d status %x\n", 4025 __func__, scan->chan, le32toh(scan->status)); 4026 #endif 4027 break; 4028 } 4029 case IWN_STOP_SCAN: 4030 { 4031 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 4032 BUS_DMASYNC_POSTREAD); 4033 #ifdef IWN_DEBUG 4034 struct iwn_stop_scan *scan = 4035 (struct iwn_stop_scan *)(desc + 1); 4036 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, 4037 "scan finished nchan=%d status=%d chan=%d\n", 4038 scan->nchan, scan->status, scan->chan); 4039 #endif 4040 sc->sc_is_scanning = 0; 4041 IWN_UNLOCK(sc); 4042 ieee80211_scan_next(vap); 4043 IWN_LOCK(sc); 4044 break; 4045 } 4046 case IWN5000_CALIBRATION_RESULT: 4047 iwn5000_rx_calib_results(sc, desc, data); 4048 break; 4049 4050 case IWN5000_CALIBRATION_DONE: 4051 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 4052 wakeup(sc); 4053 break; 4054 } 4055 4056 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 4057 } 4058 4059 /* Tell the firmware what we have processed. */ 4060 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 4061 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 4062 } 4063 4064 /* 4065 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 4066 * from power-down sleep mode. 4067 */ 4068 static void 4069 iwn_wakeup_intr(struct iwn_softc *sc) 4070 { 4071 int qid; 4072 4073 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 4074 __func__); 4075 4076 /* Wakeup RX and TX rings. */ 4077 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 4078 for (qid = 0; qid < sc->ntxqs; qid++) { 4079 struct iwn_tx_ring *ring = &sc->txq[qid]; 4080 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 4081 } 4082 } 4083 4084 static void 4085 iwn_rftoggle_intr(struct iwn_softc *sc) 4086 { 4087 struct ieee80211com *ic = &sc->sc_ic; 4088 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 4089 4090 IWN_LOCK_ASSERT(sc); 4091 4092 device_printf(sc->sc_dev, "RF switch: radio %s\n", 4093 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 4094 if (tmp & IWN_GP_CNTRL_RFKILL) 4095 ieee80211_runtask(ic, &sc->sc_radioon_task); 4096 else 4097 ieee80211_runtask(ic, &sc->sc_radiooff_task); 4098 } 4099 4100 /* 4101 * Dump the error log of the firmware when a firmware panic occurs. Although 4102 * we can't debug the firmware because it is neither open source nor free, it 4103 * can help us to identify certain classes of problems. 4104 */ 4105 static void 4106 iwn_fatal_intr(struct iwn_softc *sc) 4107 { 4108 struct iwn_fw_dump dump; 4109 int i; 4110 4111 IWN_LOCK_ASSERT(sc); 4112 4113 /* Force a complete recalibration on next init. */ 4114 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 4115 4116 /* Check that the error log address is valid. */ 4117 if (sc->errptr < IWN_FW_DATA_BASE || 4118 sc->errptr + sizeof (dump) > 4119 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 4120 kprintf("%s: bad firmware error log address 0x%08x\n", __func__, 4121 sc->errptr); 4122 return; 4123 } 4124 if (iwn_nic_lock(sc) != 0) { 4125 kprintf("%s: could not read firmware error log\n", __func__); 4126 return; 4127 } 4128 /* Read firmware error log from SRAM. */ 4129 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 4130 sizeof (dump) / sizeof (uint32_t)); 4131 iwn_nic_unlock(sc); 4132 4133 if (dump.valid == 0) { 4134 kprintf("%s: firmware error log is empty\n", __func__); 4135 return; 4136 } 4137 kprintf("firmware error log:\n"); 4138 kprintf(" error type = \"%s\" (0x%08X)\n", 4139 (dump.id < nitems(iwn_fw_errmsg)) ? 4140 iwn_fw_errmsg[dump.id] : "UNKNOWN", 4141 dump.id); 4142 kprintf(" program counter = 0x%08X\n", dump.pc); 4143 kprintf(" source line = 0x%08X\n", dump.src_line); 4144 kprintf(" error data = 0x%08X%08X\n", 4145 dump.error_data[0], dump.error_data[1]); 4146 kprintf(" branch link = 0x%08X%08X\n", 4147 dump.branch_link[0], dump.branch_link[1]); 4148 kprintf(" interrupt link = 0x%08X%08X\n", 4149 dump.interrupt_link[0], dump.interrupt_link[1]); 4150 kprintf(" time = %u\n", dump.time[0]); 4151 4152 /* Dump driver status (TX and RX rings) while we're here. */ 4153 kprintf("driver status:\n"); 4154 for (i = 0; i < sc->ntxqs; i++) { 4155 struct iwn_tx_ring *ring = &sc->txq[i]; 4156 kprintf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 4157 i, ring->qid, ring->cur, ring->queued); 4158 } 4159 kprintf(" rx ring: cur=%d\n", sc->rxq.cur); 4160 } 4161 4162 static void 4163 iwn_intr(void *arg) 4164 { 4165 struct iwn_softc *sc = arg; 4166 uint32_t r1, r2, tmp; 4167 4168 IWN_LOCK(sc); 4169 4170 /* Disable interrupts. */ 4171 IWN_WRITE(sc, IWN_INT_MASK, 0); 4172 4173 /* Read interrupts from ICT (fast) or from registers (slow). */ 4174 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 4175 tmp = 0; 4176 while (sc->ict[sc->ict_cur] != 0) { 4177 tmp |= sc->ict[sc->ict_cur]; 4178 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 4179 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 4180 } 4181 tmp = le32toh(tmp); 4182 if (tmp == 0xffffffff) /* Shouldn't happen. */ 4183 tmp = 0; 4184 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 4185 tmp |= 0x8000; 4186 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 4187 r2 = 0; /* Unused. */ 4188 } else { 4189 r1 = IWN_READ(sc, IWN_INT); 4190 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) { 4191 IWN_UNLOCK(sc); 4192 return; /* Hardware gone! */ 4193 } 4194 r2 = IWN_READ(sc, IWN_FH_INT); 4195 } 4196 4197 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 4198 , r1, r2); 4199 4200 if (r1 == 0 && r2 == 0) 4201 goto done; /* Interrupt not for us. */ 4202 4203 /* Acknowledge interrupts. */ 4204 IWN_WRITE(sc, IWN_INT, r1); 4205 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 4206 IWN_WRITE(sc, IWN_FH_INT, r2); 4207 4208 if (r1 & IWN_INT_RF_TOGGLED) { 4209 iwn_rftoggle_intr(sc); 4210 goto done; 4211 } 4212 if (r1 & IWN_INT_CT_REACHED) { 4213 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 4214 __func__); 4215 } 4216 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 4217 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 4218 __func__); 4219 #ifdef IWN_DEBUG 4220 iwn_debug_register(sc); 4221 #endif 4222 /* Dump firmware error log and stop. */ 4223 iwn_fatal_intr(sc); 4224 4225 taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task); 4226 goto done; 4227 } 4228 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 4229 (r2 & IWN_FH_INT_RX)) { 4230 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 4231 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 4232 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 4233 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 4234 IWN_INT_PERIODIC_DIS); 4235 iwn_notif_intr(sc); 4236 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 4237 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 4238 IWN_INT_PERIODIC_ENA); 4239 } 4240 } else 4241 iwn_notif_intr(sc); 4242 } 4243 4244 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 4245 if (sc->sc_flags & IWN_FLAG_USE_ICT) 4246 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 4247 wakeup(sc); /* FH DMA transfer completed. */ 4248 } 4249 4250 if (r1 & IWN_INT_ALIVE) 4251 wakeup(sc); /* Firmware is alive. */ 4252 4253 if (r1 & IWN_INT_WAKEUP) 4254 iwn_wakeup_intr(sc); 4255 4256 done: 4257 /* Re-enable interrupts. */ 4258 if (sc->sc_flags & IWN_FLAG_RUNNING) 4259 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 4260 4261 IWN_UNLOCK(sc); 4262 } 4263 4264 /* 4265 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 4266 * 5000 adapters use a slightly different format). 4267 */ 4268 static void 4269 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 4270 uint16_t len) 4271 { 4272 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 4273 4274 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4275 4276 *w = htole16(len + 8); 4277 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4278 BUS_DMASYNC_PREWRITE); 4279 if (idx < IWN_SCHED_WINSZ) { 4280 *(w + IWN_TX_RING_COUNT) = *w; 4281 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4282 BUS_DMASYNC_PREWRITE); 4283 } 4284 } 4285 4286 static void 4287 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 4288 uint16_t len) 4289 { 4290 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 4291 4292 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4293 4294 *w = htole16(id << 12 | (len + 8)); 4295 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4296 BUS_DMASYNC_PREWRITE); 4297 if (idx < IWN_SCHED_WINSZ) { 4298 *(w + IWN_TX_RING_COUNT) = *w; 4299 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4300 BUS_DMASYNC_PREWRITE); 4301 } 4302 } 4303 4304 #ifdef notyet 4305 static void 4306 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 4307 { 4308 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 4309 4310 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4311 4312 *w = (*w & htole16(0xf000)) | htole16(1); 4313 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4314 BUS_DMASYNC_PREWRITE); 4315 if (idx < IWN_SCHED_WINSZ) { 4316 *(w + IWN_TX_RING_COUNT) = *w; 4317 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4318 BUS_DMASYNC_PREWRITE); 4319 } 4320 } 4321 #endif 4322 4323 /* 4324 * Check whether OFDM 11g protection will be enabled for the given rate. 4325 * 4326 * The original driver code only enabled protection for OFDM rates. 4327 * It didn't check to see whether it was operating in 11a or 11bg mode. 4328 */ 4329 static int 4330 iwn_check_rate_needs_protection(struct iwn_softc *sc, 4331 struct ieee80211vap *vap, uint8_t rate) 4332 { 4333 struct ieee80211com *ic = vap->iv_ic; 4334 4335 /* 4336 * Not in 2GHz mode? Then there's no need to enable OFDM 4337 * 11bg protection. 4338 */ 4339 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { 4340 return (0); 4341 } 4342 4343 /* 4344 * 11bg protection not enabled? Then don't use it. 4345 */ 4346 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) 4347 return (0); 4348 4349 /* 4350 * If it's an 11n rate - no protection. 4351 * We'll do it via a specific 11n check. 4352 */ 4353 if (rate & IEEE80211_RATE_MCS) { 4354 return (0); 4355 } 4356 4357 /* 4358 * Do a rate table lookup. If the PHY is CCK, 4359 * don't do protection. 4360 */ 4361 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) 4362 return (0); 4363 4364 /* 4365 * Yup, enable protection. 4366 */ 4367 return (1); 4368 } 4369 4370 /* 4371 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into 4372 * the link quality table that reflects this particular entry. 4373 */ 4374 static int 4375 iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, 4376 uint8_t rate) 4377 { 4378 struct ieee80211_rateset *rs; 4379 int is_11n; 4380 int nr; 4381 int i; 4382 uint8_t cmp_rate; 4383 4384 /* 4385 * Figure out if we're using 11n or not here. 4386 */ 4387 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) 4388 is_11n = 1; 4389 else 4390 is_11n = 0; 4391 4392 /* 4393 * Use the correct rate table. 4394 */ 4395 if (is_11n) { 4396 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4397 nr = ni->ni_htrates.rs_nrates; 4398 } else { 4399 rs = &ni->ni_rates; 4400 nr = rs->rs_nrates; 4401 } 4402 4403 /* 4404 * Find the relevant link quality entry in the table. 4405 */ 4406 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { 4407 /* 4408 * The link quality table index starts at 0 == highest 4409 * rate, so we walk the rate table backwards. 4410 */ 4411 cmp_rate = rs->rs_rates[(nr - 1) - i]; 4412 if (rate & IEEE80211_RATE_MCS) 4413 cmp_rate |= IEEE80211_RATE_MCS; 4414 4415 #if 0 4416 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", 4417 __func__, 4418 i, 4419 nr, 4420 rate, 4421 cmp_rate); 4422 #endif 4423 4424 if (cmp_rate == rate) 4425 return (i); 4426 } 4427 4428 /* Failed? Start at the end */ 4429 return (IWN_MAX_TX_RETRIES - 1); 4430 } 4431 4432 static int 4433 iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 4434 { 4435 struct iwn_ops *ops = &sc->ops; 4436 const struct ieee80211_txparam *tp; 4437 struct ieee80211vap *vap = ni->ni_vap; 4438 struct ieee80211com *ic = ni->ni_ic; 4439 struct iwn_node *wn = (void *)ni; 4440 struct iwn_tx_ring *ring; 4441 struct iwn_tx_desc *desc; 4442 struct iwn_tx_data *data; 4443 struct iwn_tx_cmd *cmd; 4444 struct iwn_cmd_data *tx; 4445 struct ieee80211_frame *wh; 4446 struct ieee80211_key *k = NULL; 4447 struct mbuf *m1; 4448 uint32_t flags; 4449 uint16_t qos; 4450 u_int hdrlen; 4451 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4452 uint8_t tid, type; 4453 int ac, i, totlen, error, pad, nsegs = 0, rate; 4454 4455 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4456 4457 IWN_LOCK_ASSERT(sc); 4458 4459 wh = mtod(m, struct ieee80211_frame *); 4460 hdrlen = ieee80211_anyhdrsize(wh); 4461 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4462 4463 /* Select EDCA Access Category and TX ring for this frame. */ 4464 if (IEEE80211_QOS_HAS_SEQ(wh)) { 4465 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 4466 tid = qos & IEEE80211_QOS_TID; 4467 } else { 4468 qos = 0; 4469 tid = 0; 4470 } 4471 ac = M_WME_GETAC(m); 4472 if (m->m_flags & M_AMPDU_MPDU) { 4473 uint16_t seqno; 4474 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 4475 4476 if (!IEEE80211_AMPDU_RUNNING(tap)) { 4477 return EINVAL; 4478 } 4479 4480 /* 4481 * Queue this frame to the hardware ring that we've 4482 * negotiated AMPDU TX on. 4483 * 4484 * Note that the sequence number must match the TX slot 4485 * being used! 4486 */ 4487 ac = *(int *)tap->txa_private; 4488 seqno = ni->ni_txseqs[tid]; 4489 *(uint16_t *)wh->i_seq = 4490 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 4491 ring = &sc->txq[ac]; 4492 if ((seqno % 256) != ring->cur) { 4493 device_printf(sc->sc_dev, 4494 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", 4495 __func__, 4496 m, 4497 seqno, 4498 seqno % 256, 4499 ring->cur); 4500 } 4501 ni->ni_txseqs[tid]++; 4502 } 4503 ring = &sc->txq[ac]; 4504 desc = &ring->desc[ring->cur]; 4505 data = &ring->data[ring->cur]; 4506 4507 /* Choose a TX rate index. */ 4508 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 4509 if (type == IEEE80211_FC0_TYPE_MGT) 4510 rate = tp->mgmtrate; 4511 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 4512 rate = tp->mcastrate; 4513 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 4514 rate = tp->ucastrate; 4515 else if (m->m_flags & M_EAPOL) 4516 rate = tp->mgmtrate; 4517 else { 4518 /* XXX pass pktlen */ 4519 (void) ieee80211_ratectl_rate(ni, NULL, 0); 4520 rate = ni->ni_txrate; 4521 } 4522 4523 /* Encrypt the frame if need be. */ 4524 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 4525 /* Retrieve key for TX. */ 4526 k = ieee80211_crypto_encap(ni, m); 4527 if (k == NULL) { 4528 return ENOBUFS; 4529 } 4530 /* 802.11 header may have moved. */ 4531 wh = mtod(m, struct ieee80211_frame *); 4532 } 4533 totlen = m->m_pkthdr.len; 4534 4535 if (ieee80211_radiotap_active_vap(vap)) { 4536 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4537 4538 tap->wt_flags = 0; 4539 tap->wt_rate = rate; 4540 if (k != NULL) 4541 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 4542 4543 ieee80211_radiotap_tx(vap, m); 4544 } 4545 4546 /* Prepare TX firmware command. */ 4547 cmd = &ring->cmd[ring->cur]; 4548 cmd->code = IWN_CMD_TX_DATA; 4549 cmd->flags = 0; 4550 cmd->qid = ring->qid; 4551 cmd->idx = ring->cur; 4552 4553 tx = (struct iwn_cmd_data *)cmd->data; 4554 /* NB: No need to clear tx, all fields are reinitialized here. */ 4555 tx->scratch = 0; /* clear "scratch" area */ 4556 4557 flags = 0; 4558 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4559 /* Unicast frame, check if an ACK is expected. */ 4560 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 4561 IEEE80211_QOS_ACKPOLICY_NOACK) 4562 flags |= IWN_TX_NEED_ACK; 4563 } 4564 if ((wh->i_fc[0] & 4565 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 4566 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 4567 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 4568 4569 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 4570 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 4571 4572 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 4573 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4574 /* NB: Group frames are sent using CCK in 802.11b/g. */ 4575 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 4576 flags |= IWN_TX_NEED_RTS; 4577 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { 4578 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 4579 flags |= IWN_TX_NEED_CTS; 4580 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 4581 flags |= IWN_TX_NEED_RTS; 4582 } else if ((rate & IEEE80211_RATE_MCS) && 4583 (ic->ic_htprotmode == IEEE80211_PROT_RTSCTS)) { 4584 flags |= IWN_TX_NEED_RTS; 4585 } 4586 4587 /* XXX HT protection? */ 4588 4589 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 4590 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4591 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4592 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 4593 flags |= IWN_TX_NEED_PROTECTION; 4594 } else 4595 flags |= IWN_TX_FULL_TXOP; 4596 } 4597 } 4598 4599 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 4600 type != IEEE80211_FC0_TYPE_DATA) 4601 tx->id = sc->broadcast_id; 4602 else 4603 tx->id = wn->id; 4604 4605 if (type == IEEE80211_FC0_TYPE_MGT) { 4606 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4607 4608 /* Tell HW to set timestamp in probe responses. */ 4609 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4610 flags |= IWN_TX_INSERT_TSTAMP; 4611 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4612 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4613 tx->timeout = htole16(3); 4614 else 4615 tx->timeout = htole16(2); 4616 } else 4617 tx->timeout = htole16(0); 4618 4619 if (hdrlen & 3) { 4620 /* First segment length must be a multiple of 4. */ 4621 flags |= IWN_TX_NEED_PADDING; 4622 pad = 4 - (hdrlen & 3); 4623 } else 4624 pad = 0; 4625 4626 tx->len = htole16(totlen); 4627 tx->tid = tid; 4628 tx->rts_ntries = 60; 4629 tx->data_ntries = 15; 4630 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4631 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4632 if (tx->id == sc->broadcast_id) { 4633 /* Group or management frame. */ 4634 tx->linkq = 0; 4635 } else { 4636 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); 4637 flags |= IWN_TX_LINKQ; /* enable MRR */ 4638 } 4639 4640 /* Set physical address of "scratch area". */ 4641 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4642 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4643 4644 /* Copy 802.11 header in TX command. */ 4645 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4646 4647 /* Trim 802.11 header. */ 4648 m_adj(m, hdrlen); 4649 tx->security = 0; 4650 tx->flags = htole32(flags); 4651 4652 #if defined(__DragonFly__) 4653 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, 4654 data->map, m, 4655 segs, IWN_MAX_SCATTER - 1, 4656 &nsegs, BUS_DMA_NOWAIT); 4657 #else 4658 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4659 &nsegs, BUS_DMA_NOWAIT); 4660 #endif 4661 if (error != 0) { 4662 if (error != EFBIG) { 4663 device_printf(sc->sc_dev, 4664 "%s: can't map mbuf (error %d)\n", __func__, error); 4665 return error; 4666 } 4667 /* Too many DMA segments, linearize mbuf. */ 4668 #if defined(__DragonFly__) 4669 m1 = m_defrag(m, M_NOWAIT); 4670 #else 4671 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1); 4672 #endif 4673 if (m1 == NULL) { 4674 device_printf(sc->sc_dev, 4675 "%s: could not defrag mbuf\n", __func__); 4676 return ENOBUFS; 4677 } 4678 m = m1; 4679 4680 #if defined(__DragonFly__) 4681 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, 4682 data->map, m, 4683 segs, IWN_MAX_SCATTER - 1, 4684 &nsegs, BUS_DMA_NOWAIT); 4685 #else 4686 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4687 segs, &nsegs, BUS_DMA_NOWAIT); 4688 #endif 4689 if (error != 0) { 4690 device_printf(sc->sc_dev, 4691 "%s: can't map mbuf (error %d)\n", __func__, error); 4692 return error; 4693 } 4694 } 4695 4696 data->m = m; 4697 data->ni = ni; 4698 4699 DPRINTF(sc, IWN_DEBUG_XMIT, 4700 "%s: qid %d idx %d len %d nsegs %d flags 0x%08x rate 0x%04x plcp 0x%08x\n", 4701 __func__, 4702 ring->qid, 4703 ring->cur, 4704 m->m_pkthdr.len, 4705 nsegs, 4706 flags, 4707 rate, 4708 tx->rate); 4709 4710 /* Fill TX descriptor. */ 4711 desc->nsegs = 1; 4712 if (m->m_len != 0) 4713 desc->nsegs += nsegs; 4714 /* First DMA segment is used by the TX command. */ 4715 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4716 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4717 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4718 /* Other DMA segments are for data payload. */ 4719 seg = &segs[0]; 4720 for (i = 1; i <= nsegs; i++) { 4721 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4722 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4723 seg->ds_len << 4); 4724 seg++; 4725 } 4726 4727 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4728 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4729 BUS_DMASYNC_PREWRITE); 4730 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4731 BUS_DMASYNC_PREWRITE); 4732 4733 /* Update TX scheduler. */ 4734 if (ring->qid >= sc->firstaggqueue) 4735 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4736 4737 /* Kick TX ring. */ 4738 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4739 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4740 4741 /* Mark TX ring as full if we reach a certain threshold. */ 4742 if (++ring->queued > IWN_TX_RING_HIMARK) 4743 sc->qfullmsk |= 1 << ring->qid; 4744 4745 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4746 4747 return 0; 4748 } 4749 4750 static int 4751 iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 4752 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 4753 { 4754 struct iwn_ops *ops = &sc->ops; 4755 struct ieee80211vap *vap = ni->ni_vap; 4756 struct iwn_tx_cmd *cmd; 4757 struct iwn_cmd_data *tx; 4758 struct ieee80211_frame *wh; 4759 struct iwn_tx_ring *ring; 4760 struct iwn_tx_desc *desc; 4761 struct iwn_tx_data *data; 4762 struct mbuf *m1; 4763 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4764 uint32_t flags; 4765 u_int hdrlen; 4766 int ac, totlen, error, pad, nsegs = 0, i, rate; 4767 uint8_t type; 4768 4769 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4770 4771 IWN_LOCK_ASSERT(sc); 4772 4773 wh = mtod(m, struct ieee80211_frame *); 4774 hdrlen = ieee80211_anyhdrsize(wh); 4775 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4776 4777 ac = params->ibp_pri & 3; 4778 4779 ring = &sc->txq[ac]; 4780 desc = &ring->desc[ring->cur]; 4781 data = &ring->data[ring->cur]; 4782 4783 /* Choose a TX rate. */ 4784 rate = params->ibp_rate0; 4785 totlen = m->m_pkthdr.len; 4786 4787 /* Prepare TX firmware command. */ 4788 cmd = &ring->cmd[ring->cur]; 4789 cmd->code = IWN_CMD_TX_DATA; 4790 cmd->flags = 0; 4791 cmd->qid = ring->qid; 4792 cmd->idx = ring->cur; 4793 4794 tx = (struct iwn_cmd_data *)cmd->data; 4795 /* NB: No need to clear tx, all fields are reinitialized here. */ 4796 tx->scratch = 0; /* clear "scratch" area */ 4797 4798 flags = 0; 4799 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 4800 flags |= IWN_TX_NEED_ACK; 4801 if (params->ibp_flags & IEEE80211_BPF_RTS) { 4802 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4803 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4804 flags &= ~IWN_TX_NEED_RTS; 4805 flags |= IWN_TX_NEED_PROTECTION; 4806 } else 4807 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 4808 } 4809 if (params->ibp_flags & IEEE80211_BPF_CTS) { 4810 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4811 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4812 flags &= ~IWN_TX_NEED_CTS; 4813 flags |= IWN_TX_NEED_PROTECTION; 4814 } else 4815 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 4816 } 4817 if (type == IEEE80211_FC0_TYPE_MGT) { 4818 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4819 4820 /* Tell HW to set timestamp in probe responses. */ 4821 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4822 flags |= IWN_TX_INSERT_TSTAMP; 4823 4824 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4825 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4826 tx->timeout = htole16(3); 4827 else 4828 tx->timeout = htole16(2); 4829 } else 4830 tx->timeout = htole16(0); 4831 4832 if (hdrlen & 3) { 4833 /* First segment length must be a multiple of 4. */ 4834 flags |= IWN_TX_NEED_PADDING; 4835 pad = 4 - (hdrlen & 3); 4836 } else 4837 pad = 0; 4838 4839 if (ieee80211_radiotap_active_vap(vap)) { 4840 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4841 4842 tap->wt_flags = 0; 4843 tap->wt_rate = rate; 4844 4845 ieee80211_radiotap_tx(vap, m); 4846 } 4847 4848 tx->len = htole16(totlen); 4849 tx->tid = 0; 4850 tx->id = sc->broadcast_id; 4851 tx->rts_ntries = params->ibp_try1; 4852 tx->data_ntries = params->ibp_try0; 4853 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4854 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4855 4856 /* Group or management frame. */ 4857 tx->linkq = 0; 4858 4859 /* Set physical address of "scratch area". */ 4860 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4861 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4862 4863 /* Copy 802.11 header in TX command. */ 4864 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4865 4866 /* Trim 802.11 header. */ 4867 m_adj(m, hdrlen); 4868 tx->security = 0; 4869 tx->flags = htole32(flags); 4870 4871 #if defined(__DragonFly__) 4872 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, 4873 m, segs, IWN_MAX_SCATTER - 1, 4874 &nsegs, BUS_DMA_NOWAIT); 4875 #else 4876 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4877 &nsegs, BUS_DMA_NOWAIT); 4878 #endif 4879 if (error != 0) { 4880 if (error != EFBIG) { 4881 device_printf(sc->sc_dev, 4882 "%s: can't map mbuf (error %d)\n", __func__, error); 4883 return error; 4884 } 4885 /* Too many DMA segments, linearize mbuf. */ 4886 #if defined(__DragonFly__) 4887 m1 = m_defrag(m, M_NOWAIT); 4888 #else 4889 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1); 4890 #endif 4891 if (m1 == NULL) { 4892 device_printf(sc->sc_dev, 4893 "%s: could not defrag mbuf\n", __func__); 4894 return ENOBUFS; 4895 } 4896 m = m1; 4897 4898 #if defined(__DragonFly__) 4899 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, 4900 data->map, m, 4901 segs, IWN_MAX_SCATTER - 1, 4902 &nsegs, BUS_DMA_NOWAIT); 4903 #else 4904 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4905 segs, &nsegs, BUS_DMA_NOWAIT); 4906 #endif 4907 if (error != 0) { 4908 device_printf(sc->sc_dev, 4909 "%s: can't map mbuf (error %d)\n", __func__, error); 4910 return error; 4911 } 4912 } 4913 4914 data->m = m; 4915 data->ni = ni; 4916 4917 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 4918 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 4919 4920 /* Fill TX descriptor. */ 4921 desc->nsegs = 1; 4922 if (m->m_len != 0) 4923 desc->nsegs += nsegs; 4924 /* First DMA segment is used by the TX command. */ 4925 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4926 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4927 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4928 /* Other DMA segments are for data payload. */ 4929 seg = &segs[0]; 4930 for (i = 1; i <= nsegs; i++) { 4931 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4932 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4933 seg->ds_len << 4); 4934 seg++; 4935 } 4936 4937 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4938 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4939 BUS_DMASYNC_PREWRITE); 4940 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4941 BUS_DMASYNC_PREWRITE); 4942 4943 /* Update TX scheduler. */ 4944 if (ring->qid >= sc->firstaggqueue) 4945 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4946 4947 /* Kick TX ring. */ 4948 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4949 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4950 4951 /* Mark TX ring as full if we reach a certain threshold. */ 4952 if (++ring->queued > IWN_TX_RING_HIMARK) 4953 sc->qfullmsk |= 1 << ring->qid; 4954 4955 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4956 4957 return 0; 4958 } 4959 4960 static void 4961 iwn_xmit_task(void *arg0, int pending) 4962 { 4963 struct iwn_softc *sc = arg0; 4964 struct ieee80211_node *ni; 4965 struct mbuf *m; 4966 int error; 4967 struct ieee80211_bpf_params p; 4968 int have_p; 4969 4970 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__); 4971 4972 IWN_LOCK(sc); 4973 /* 4974 * Dequeue frames, attempt to transmit, 4975 * then disable beaconwait when we're done. 4976 */ 4977 while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) { 4978 have_p = 0; 4979 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4980 4981 /* Get xmit params if appropriate */ 4982 if (ieee80211_get_xmit_params(m, &p) == 0) 4983 have_p = 1; 4984 4985 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: m=%p, have_p=%d\n", 4986 __func__, m, have_p); 4987 4988 /* If we have xmit params, use them */ 4989 if (have_p) 4990 error = iwn_tx_data_raw(sc, m, ni, &p); 4991 else 4992 error = iwn_tx_data(sc, m, ni); 4993 4994 if (error != 0) { 4995 if_inc_counter(ni->ni_vap->iv_ifp, 4996 IFCOUNTER_OERRORS, 1); 4997 ieee80211_free_node(ni); 4998 m_freem(m); 4999 } 5000 } 5001 5002 sc->sc_beacon_wait = 0; 5003 IWN_UNLOCK(sc); 5004 } 5005 5006 /* 5007 * raw frame xmit - free node/reference if failed. 5008 */ 5009 static int 5010 iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 5011 const struct ieee80211_bpf_params *params) 5012 { 5013 struct ieee80211com *ic = ni->ni_ic; 5014 struct iwn_softc *sc = ic->ic_softc; 5015 int error = 0; 5016 5017 DPRINTF(sc, IWN_DEBUG_XMIT | IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5018 5019 IWN_LOCK(sc); 5020 if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) { 5021 m_freem(m); 5022 IWN_UNLOCK(sc); 5023 return (ENETDOWN); 5024 } 5025 5026 /* queue frame if we have to */ 5027 if (sc->sc_beacon_wait) { 5028 if (iwn_xmit_queue_enqueue(sc, m) != 0) { 5029 m_freem(m); 5030 IWN_UNLOCK(sc); 5031 return (ENOBUFS); 5032 } 5033 /* Queued, so just return OK */ 5034 IWN_UNLOCK(sc); 5035 return (0); 5036 } 5037 5038 if (params == NULL) { 5039 /* 5040 * Legacy path; interpret frame contents to decide 5041 * precisely how to send the frame. 5042 */ 5043 error = iwn_tx_data(sc, m, ni); 5044 } else { 5045 /* 5046 * Caller supplied explicit parameters to use in 5047 * sending the frame. 5048 */ 5049 error = iwn_tx_data_raw(sc, m, ni, params); 5050 } 5051 if (error == 0) 5052 sc->sc_tx_timer = 5; 5053 else 5054 m_freem(m); 5055 5056 IWN_UNLOCK(sc); 5057 5058 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s: end\n",__func__); 5059 5060 return (error); 5061 } 5062 5063 /* 5064 * transmit - don't free mbuf if failed; don't free node ref if failed. 5065 */ 5066 static int 5067 iwn_transmit(struct ieee80211com *ic, struct mbuf *m) 5068 { 5069 struct iwn_softc *sc = ic->ic_softc; 5070 struct ieee80211_node *ni; 5071 int error; 5072 5073 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 5074 5075 IWN_LOCK(sc); 5076 if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0 || sc->sc_beacon_wait) { 5077 IWN_UNLOCK(sc); 5078 return (ENXIO); 5079 } 5080 5081 if (sc->qfullmsk) { 5082 IWN_UNLOCK(sc); 5083 return (ENOBUFS); 5084 } 5085 5086 error = iwn_tx_data(sc, m, ni); 5087 if (!error) 5088 sc->sc_tx_timer = 5; 5089 IWN_UNLOCK(sc); 5090 return (error); 5091 } 5092 5093 static void 5094 iwn_watchdog(void *arg) 5095 { 5096 struct iwn_softc *sc = arg; 5097 struct ieee80211com *ic = &sc->sc_ic; 5098 5099 IWN_LOCK_ASSERT(sc); 5100 5101 KASSERT(sc->sc_flags & IWN_FLAG_RUNNING, ("not running")); 5102 5103 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5104 5105 if (sc->sc_tx_timer > 0) { 5106 if (--sc->sc_tx_timer == 0) { 5107 ic_printf(ic, "device timeout\n"); 5108 ieee80211_restart_all(ic); 5109 return; 5110 } 5111 } 5112 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 5113 } 5114 5115 #if defined(__DragonFly__) 5116 static int 5117 iwn_cdev_open(struct dev_open_args *ap) 5118 #else 5119 static int 5120 iwn_cdev_open(struct cdev *dev, int flags, int type, struct thread *td) 5121 #endif 5122 { 5123 5124 return (0); 5125 } 5126 5127 #if defined(__DragonFly__) 5128 static int 5129 iwn_cdev_close(struct dev_close_args *ap) 5130 #else 5131 static int 5132 iwn_cdev_close(struct cdev *dev, int flags, int type, struct thread *td) 5133 #endif 5134 { 5135 5136 return (0); 5137 } 5138 5139 #if defined(__DragonFly__) 5140 static int 5141 iwn_cdev_ioctl(struct dev_ioctl_args *ap) 5142 { 5143 cdev_t dev = ap->a_head.a_dev; 5144 unsigned long cmd = ap->a_cmd; 5145 caddr_t data = ap->a_data; 5146 struct thread *td = curthread; 5147 #else 5148 static int 5149 iwn_cdev_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 5150 struct thread *td) 5151 { 5152 #endif 5153 int rc; 5154 struct iwn_softc *sc = dev->si_drv1; 5155 struct iwn_ioctl_data *d; 5156 5157 rc = priv_check(td, PRIV_DRIVER); 5158 if (rc != 0) 5159 return (0); 5160 5161 switch (cmd) { 5162 case SIOCGIWNSTATS: 5163 d = (struct iwn_ioctl_data *) data; 5164 IWN_LOCK(sc); 5165 /* XXX validate permissions/memory/etc? */ 5166 rc = copyout(&sc->last_stat, d->dst_addr, sizeof(struct iwn_stats)); 5167 IWN_UNLOCK(sc); 5168 break; 5169 case SIOCZIWNSTATS: 5170 IWN_LOCK(sc); 5171 memset(&sc->last_stat, 0, sizeof(struct iwn_stats)); 5172 IWN_UNLOCK(sc); 5173 break; 5174 default: 5175 rc = EINVAL; 5176 break; 5177 } 5178 return (rc); 5179 } 5180 5181 static int 5182 iwn_ioctl(struct ieee80211com *ic, u_long cmd, void *data) 5183 { 5184 5185 return (ENOTTY); 5186 } 5187 5188 static void 5189 iwn_parent(struct ieee80211com *ic) 5190 { 5191 struct iwn_softc *sc = ic->ic_softc; 5192 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5193 int startall = 0, stop = 0; 5194 5195 IWN_LOCK(sc); 5196 if (ic->ic_nrunning > 0) { 5197 if (!(sc->sc_flags & IWN_FLAG_RUNNING)) { 5198 iwn_init_locked(sc); 5199 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 5200 startall = 1; 5201 else 5202 stop = 1; 5203 } 5204 } else if (sc->sc_flags & IWN_FLAG_RUNNING) 5205 iwn_stop_locked(sc); 5206 IWN_UNLOCK(sc); 5207 if (startall) 5208 ieee80211_start_all(ic); 5209 else if (vap != NULL && stop) 5210 ieee80211_stop(vap); 5211 } 5212 5213 /* 5214 * Send a command to the firmware. 5215 */ 5216 static int 5217 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 5218 { 5219 struct iwn_tx_ring *ring; 5220 struct iwn_tx_desc *desc; 5221 struct iwn_tx_data *data; 5222 struct iwn_tx_cmd *cmd; 5223 struct mbuf *m; 5224 bus_addr_t paddr; 5225 int totlen, error; 5226 int cmd_queue_num; 5227 5228 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5229 5230 if (async == 0) 5231 IWN_LOCK_ASSERT(sc); 5232 5233 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 5234 cmd_queue_num = IWN_PAN_CMD_QUEUE; 5235 else 5236 cmd_queue_num = IWN_CMD_QUEUE_NUM; 5237 5238 ring = &sc->txq[cmd_queue_num]; 5239 desc = &ring->desc[ring->cur]; 5240 data = &ring->data[ring->cur]; 5241 totlen = 4 + size; 5242 5243 if (size > sizeof cmd->data) { 5244 /* Command is too large to fit in a descriptor. */ 5245 if (totlen > MCLBYTES) 5246 return EINVAL; 5247 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 5248 if (m == NULL) 5249 return ENOMEM; 5250 cmd = mtod(m, struct iwn_tx_cmd *); 5251 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 5252 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 5253 if (error != 0) { 5254 m_freem(m); 5255 return error; 5256 } 5257 data->m = m; 5258 } else { 5259 cmd = &ring->cmd[ring->cur]; 5260 paddr = data->cmd_paddr; 5261 } 5262 5263 cmd->code = code; 5264 cmd->flags = 0; 5265 cmd->qid = ring->qid; 5266 cmd->idx = ring->cur; 5267 memcpy(cmd->data, buf, size); 5268 5269 desc->nsegs = 1; 5270 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 5271 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 5272 5273 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 5274 __func__, iwn_intr_str(cmd->code), cmd->code, 5275 cmd->flags, cmd->qid, cmd->idx); 5276 5277 if (size > sizeof cmd->data) { 5278 bus_dmamap_sync(ring->data_dmat, data->map, 5279 BUS_DMASYNC_PREWRITE); 5280 } else { 5281 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 5282 BUS_DMASYNC_PREWRITE); 5283 } 5284 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 5285 BUS_DMASYNC_PREWRITE); 5286 5287 /* Kick command ring. */ 5288 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 5289 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 5290 5291 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5292 5293 #if defined(__DragonFly__) 5294 return async ? 0 : lksleep(desc, &sc->sc_lk, PCATCH, "iwncmd", hz); 5295 #else 5296 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 5297 #endif 5298 } 5299 5300 static int 5301 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 5302 { 5303 struct iwn4965_node_info hnode; 5304 caddr_t src, dst; 5305 5306 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5307 5308 /* 5309 * We use the node structure for 5000 Series internally (it is 5310 * a superset of the one for 4965AGN). We thus copy the common 5311 * fields before sending the command. 5312 */ 5313 src = (caddr_t)node; 5314 dst = (caddr_t)&hnode; 5315 memcpy(dst, src, 48); 5316 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 5317 memcpy(dst + 48, src + 72, 20); 5318 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 5319 } 5320 5321 static int 5322 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 5323 { 5324 5325 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5326 5327 /* Direct mapping. */ 5328 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 5329 } 5330 5331 static int 5332 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 5333 { 5334 struct iwn_node *wn = (void *)ni; 5335 struct ieee80211_rateset *rs; 5336 struct iwn_cmd_link_quality linkq; 5337 int i, rate, txrate; 5338 int is_11n; 5339 5340 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5341 5342 memset(&linkq, 0, sizeof linkq); 5343 linkq.id = wn->id; 5344 linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc); 5345 linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc); 5346 5347 linkq.ampdu_max = 32; /* XXX negotiated? */ 5348 linkq.ampdu_threshold = 3; 5349 linkq.ampdu_limit = htole16(4000); /* 4ms */ 5350 5351 DPRINTF(sc, IWN_DEBUG_XMIT, 5352 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", 5353 __func__, 5354 linkq.antmsk_1stream, 5355 linkq.antmsk_2stream, 5356 sc->ntxchains); 5357 5358 /* 5359 * Are we using 11n rates? Ensure the channel is 5360 * 11n _and_ we have some 11n rates, or don't 5361 * try. 5362 */ 5363 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { 5364 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 5365 is_11n = 1; 5366 } else { 5367 rs = &ni->ni_rates; 5368 is_11n = 0; 5369 } 5370 5371 /* Start at highest available bit-rate. */ 5372 /* 5373 * XXX this is all very dirty! 5374 */ 5375 if (is_11n) 5376 txrate = ni->ni_htrates.rs_nrates - 1; 5377 else 5378 txrate = rs->rs_nrates - 1; 5379 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 5380 uint32_t plcp; 5381 5382 /* 5383 * XXX TODO: ensure the last two slots are the two lowest 5384 * rate entries, just for now. 5385 */ 5386 if (i == 14 || i == 15) 5387 txrate = 0; 5388 5389 if (is_11n) 5390 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; 5391 else 5392 rate = IEEE80211_RV(rs->rs_rates[txrate]); 5393 5394 /* Do rate -> PLCP config mapping */ 5395 plcp = iwn_rate_to_plcp(sc, ni, rate); 5396 linkq.retry[i] = plcp; 5397 DPRINTF(sc, IWN_DEBUG_XMIT, 5398 "%s: i=%d, txrate=%d, rate=0x%02x, plcp=0x%08x\n", 5399 __func__, 5400 i, 5401 txrate, 5402 rate, 5403 le32toh(plcp)); 5404 5405 /* 5406 * The mimo field is an index into the table which 5407 * indicates the first index where it and subsequent entries 5408 * will not be using MIMO. 5409 * 5410 * Since we're filling linkq from 0..15 and we're filling 5411 * from the highest MCS rates to the lowest rates, if we 5412 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, 5413 * the next entry.) That way if the next entry is a non-MIMO 5414 * entry, we're already pointing at it. 5415 */ 5416 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 5417 IEEE80211_RV(le32toh(plcp)) > 7) 5418 linkq.mimo = i + 1; 5419 5420 /* Next retry at immediate lower bit-rate. */ 5421 if (txrate > 0) 5422 txrate--; 5423 } 5424 /* 5425 * If we reached the end of the list and indeed we hit 5426 * all MIMO rates (eg 5300 doing MCS23-15) then yes, 5427 * set mimo to 15. Setting it to 16 panics the firmware. 5428 */ 5429 if (linkq.mimo > 15) 5430 linkq.mimo = 15; 5431 5432 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: mimo = %d\n", __func__, linkq.mimo); 5433 5434 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5435 5436 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 5437 } 5438 5439 /* 5440 * Broadcast node is used to send group-addressed and management frames. 5441 */ 5442 static int 5443 iwn_add_broadcast_node(struct iwn_softc *sc, int async) 5444 { 5445 struct iwn_ops *ops = &sc->ops; 5446 struct ieee80211com *ic = &sc->sc_ic; 5447 struct iwn_node_info node; 5448 struct iwn_cmd_link_quality linkq; 5449 uint8_t txant; 5450 int i, error; 5451 5452 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5453 5454 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5455 5456 memset(&node, 0, sizeof node); 5457 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 5458 node.id = sc->broadcast_id; 5459 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 5460 if ((error = ops->add_node(sc, &node, async)) != 0) 5461 return error; 5462 5463 /* Use the first valid TX antenna. */ 5464 txant = IWN_LSB(sc->txchainmask); 5465 5466 memset(&linkq, 0, sizeof linkq); 5467 linkq.id = sc->broadcast_id; 5468 linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc); 5469 linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc); 5470 linkq.ampdu_max = 64; 5471 linkq.ampdu_threshold = 3; 5472 linkq.ampdu_limit = htole16(4000); /* 4ms */ 5473 5474 /* Use lowest mandatory bit-rate. */ 5475 /* XXX rate table lookup? */ 5476 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 5477 linkq.retry[0] = htole32(0xd); 5478 else 5479 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 5480 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 5481 /* Use same bit-rate for all TX retries. */ 5482 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 5483 linkq.retry[i] = linkq.retry[0]; 5484 } 5485 5486 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5487 5488 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 5489 } 5490 5491 static int 5492 iwn_updateedca(struct ieee80211com *ic) 5493 { 5494 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 5495 struct iwn_softc *sc = ic->ic_softc; 5496 struct iwn_edca_params cmd; 5497 int aci; 5498 5499 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5500 5501 memset(&cmd, 0, sizeof cmd); 5502 cmd.flags = htole32(IWN_EDCA_UPDATE); 5503 5504 IEEE80211_LOCK(ic); 5505 for (aci = 0; aci < WME_NUM_AC; aci++) { 5506 const struct wmeParams *ac = 5507 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 5508 cmd.ac[aci].aifsn = ac->wmep_aifsn; 5509 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 5510 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 5511 cmd.ac[aci].txoplimit = 5512 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 5513 } 5514 IEEE80211_UNLOCK(ic); 5515 5516 IWN_LOCK(sc); 5517 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5518 IWN_UNLOCK(sc); 5519 5520 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5521 5522 return 0; 5523 #undef IWN_EXP2 5524 } 5525 5526 static void 5527 iwn_update_mcast(struct ieee80211com *ic) 5528 { 5529 /* Ignore */ 5530 } 5531 5532 static void 5533 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 5534 { 5535 struct iwn_cmd_led led; 5536 5537 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5538 5539 #if 0 5540 /* XXX don't set LEDs during scan? */ 5541 if (sc->sc_is_scanning) 5542 return; 5543 #endif 5544 5545 /* Clear microcode LED ownership. */ 5546 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 5547 5548 led.which = which; 5549 led.unit = htole32(10000); /* on/off in unit of 100ms */ 5550 led.off = off; 5551 led.on = on; 5552 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 5553 } 5554 5555 /* 5556 * Set the critical temperature at which the firmware will stop the radio 5557 * and notify us. 5558 */ 5559 static int 5560 iwn_set_critical_temp(struct iwn_softc *sc) 5561 { 5562 struct iwn_critical_temp crit; 5563 int32_t temp; 5564 5565 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5566 5567 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 5568 5569 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 5570 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 5571 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 5572 temp = IWN_CTOK(110); 5573 else 5574 temp = 110; 5575 memset(&crit, 0, sizeof crit); 5576 crit.tempR = htole32(temp); 5577 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 5578 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 5579 } 5580 5581 static int 5582 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 5583 { 5584 struct iwn_cmd_timing cmd; 5585 uint64_t val, mod; 5586 5587 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5588 5589 memset(&cmd, 0, sizeof cmd); 5590 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 5591 cmd.bintval = htole16(ni->ni_intval); 5592 cmd.lintval = htole16(10); 5593 5594 /* Compute remaining time until next beacon. */ 5595 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 5596 mod = le64toh(cmd.tstamp) % val; 5597 cmd.binitval = htole32((uint32_t)(val - mod)); 5598 5599 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 5600 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 5601 5602 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 5603 } 5604 5605 static void 5606 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 5607 { 5608 struct ieee80211com *ic = &sc->sc_ic; 5609 5610 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5611 5612 /* Adjust TX power if need be (delta >= 3 degC). */ 5613 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 5614 __func__, sc->temp, temp); 5615 if (abs(temp - sc->temp) >= 3) { 5616 /* Record temperature of last calibration. */ 5617 sc->temp = temp; 5618 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 5619 } 5620 } 5621 5622 /* 5623 * Set TX power for current channel (each rate has its own power settings). 5624 * This function takes into account the regulatory information from EEPROM, 5625 * the current temperature and the current voltage. 5626 */ 5627 static int 5628 iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5629 int async) 5630 { 5631 /* Fixed-point arithmetic division using a n-bit fractional part. */ 5632 #define fdivround(a, b, n) \ 5633 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 5634 /* Linear interpolation. */ 5635 #define interpolate(x, x1, y1, x2, y2, n) \ 5636 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 5637 5638 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 5639 struct iwn_ucode_info *uc = &sc->ucode_info; 5640 struct iwn4965_cmd_txpower cmd; 5641 struct iwn4965_eeprom_chan_samples *chans; 5642 const uint8_t *rf_gain, *dsp_gain; 5643 int32_t vdiff, tdiff; 5644 int i, c, grp, maxpwr; 5645 uint8_t chan; 5646 5647 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5648 /* Retrieve current channel from last RXON. */ 5649 chan = sc->rxon->chan; 5650 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 5651 chan); 5652 5653 memset(&cmd, 0, sizeof cmd); 5654 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 5655 cmd.chan = chan; 5656 5657 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 5658 maxpwr = sc->maxpwr5GHz; 5659 rf_gain = iwn4965_rf_gain_5ghz; 5660 dsp_gain = iwn4965_dsp_gain_5ghz; 5661 } else { 5662 maxpwr = sc->maxpwr2GHz; 5663 rf_gain = iwn4965_rf_gain_2ghz; 5664 dsp_gain = iwn4965_dsp_gain_2ghz; 5665 } 5666 5667 /* Compute voltage compensation. */ 5668 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 5669 if (vdiff > 0) 5670 vdiff *= 2; 5671 if (abs(vdiff) > 2) 5672 vdiff = 0; 5673 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5674 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 5675 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 5676 5677 /* Get channel attenuation group. */ 5678 if (chan <= 20) /* 1-20 */ 5679 grp = 4; 5680 else if (chan <= 43) /* 34-43 */ 5681 grp = 0; 5682 else if (chan <= 70) /* 44-70 */ 5683 grp = 1; 5684 else if (chan <= 124) /* 71-124 */ 5685 grp = 2; 5686 else /* 125-200 */ 5687 grp = 3; 5688 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5689 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 5690 5691 /* Get channel sub-band. */ 5692 for (i = 0; i < IWN_NBANDS; i++) 5693 if (sc->bands[i].lo != 0 && 5694 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 5695 break; 5696 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 5697 return EINVAL; 5698 chans = sc->bands[i].chans; 5699 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5700 "%s: chan %d sub-band=%d\n", __func__, chan, i); 5701 5702 for (c = 0; c < 2; c++) { 5703 uint8_t power, gain, temp; 5704 int maxchpwr, pwr, ridx, idx; 5705 5706 power = interpolate(chan, 5707 chans[0].num, chans[0].samples[c][1].power, 5708 chans[1].num, chans[1].samples[c][1].power, 1); 5709 gain = interpolate(chan, 5710 chans[0].num, chans[0].samples[c][1].gain, 5711 chans[1].num, chans[1].samples[c][1].gain, 1); 5712 temp = interpolate(chan, 5713 chans[0].num, chans[0].samples[c][1].temp, 5714 chans[1].num, chans[1].samples[c][1].temp, 1); 5715 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5716 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 5717 __func__, c, power, gain, temp); 5718 5719 /* Compute temperature compensation. */ 5720 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 5721 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5722 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 5723 __func__, tdiff, sc->temp, temp); 5724 5725 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 5726 /* Convert dBm to half-dBm. */ 5727 maxchpwr = sc->maxpwr[chan] * 2; 5728 if ((ridx / 8) & 1) 5729 maxchpwr -= 6; /* MIMO 2T: -3dB */ 5730 5731 pwr = maxpwr; 5732 5733 /* Adjust TX power based on rate. */ 5734 if ((ridx % 8) == 5) 5735 pwr -= 15; /* OFDM48: -7.5dB */ 5736 else if ((ridx % 8) == 6) 5737 pwr -= 17; /* OFDM54: -8.5dB */ 5738 else if ((ridx % 8) == 7) 5739 pwr -= 20; /* OFDM60: -10dB */ 5740 else 5741 pwr -= 10; /* Others: -5dB */ 5742 5743 /* Do not exceed channel max TX power. */ 5744 if (pwr > maxchpwr) 5745 pwr = maxchpwr; 5746 5747 idx = gain - (pwr - power) - tdiff - vdiff; 5748 if ((ridx / 8) & 1) /* MIMO */ 5749 idx += (int32_t)le32toh(uc->atten[grp][c]); 5750 5751 if (cmd.band == 0) 5752 idx += 9; /* 5GHz */ 5753 if (ridx == IWN_RIDX_MAX) 5754 idx += 5; /* CCK */ 5755 5756 /* Make sure idx stays in a valid range. */ 5757 if (idx < 0) 5758 idx = 0; 5759 else if (idx > IWN4965_MAX_PWR_INDEX) 5760 idx = IWN4965_MAX_PWR_INDEX; 5761 5762 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5763 "%s: Tx chain %d, rate idx %d: power=%d\n", 5764 __func__, c, ridx, idx); 5765 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 5766 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 5767 } 5768 } 5769 5770 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5771 "%s: set tx power for chan %d\n", __func__, chan); 5772 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 5773 5774 #undef interpolate 5775 #undef fdivround 5776 } 5777 5778 static int 5779 iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 5780 int async) 5781 { 5782 struct iwn5000_cmd_txpower cmd; 5783 int cmdid; 5784 5785 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5786 5787 /* 5788 * TX power calibration is handled automatically by the firmware 5789 * for 5000 Series. 5790 */ 5791 memset(&cmd, 0, sizeof cmd); 5792 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 5793 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 5794 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 5795 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT, 5796 "%s: setting TX power; rev=%d\n", 5797 __func__, 5798 IWN_UCODE_API(sc->ucode_rev)); 5799 if (IWN_UCODE_API(sc->ucode_rev) == 1) 5800 cmdid = IWN_CMD_TXPOWER_DBM_V1; 5801 else 5802 cmdid = IWN_CMD_TXPOWER_DBM; 5803 return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async); 5804 } 5805 5806 /* 5807 * Retrieve the maximum RSSI (in dBm) among receivers. 5808 */ 5809 static int 5810 iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5811 { 5812 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 5813 uint8_t mask, agc; 5814 int rssi; 5815 5816 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5817 5818 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 5819 agc = (le16toh(phy->agc) >> 7) & 0x7f; 5820 5821 rssi = 0; 5822 if (mask & IWN_ANT_A) 5823 rssi = MAX(rssi, phy->rssi[0]); 5824 if (mask & IWN_ANT_B) 5825 rssi = MAX(rssi, phy->rssi[2]); 5826 if (mask & IWN_ANT_C) 5827 rssi = MAX(rssi, phy->rssi[4]); 5828 5829 DPRINTF(sc, IWN_DEBUG_RECV, 5830 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 5831 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 5832 rssi - agc - IWN_RSSI_TO_DBM); 5833 return rssi - agc - IWN_RSSI_TO_DBM; 5834 } 5835 5836 static int 5837 iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5838 { 5839 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 5840 uint8_t agc; 5841 int rssi; 5842 5843 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5844 5845 agc = (le32toh(phy->agc) >> 9) & 0x7f; 5846 5847 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 5848 le16toh(phy->rssi[1]) & 0xff); 5849 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 5850 5851 DPRINTF(sc, IWN_DEBUG_RECV, 5852 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 5853 phy->rssi[0], phy->rssi[1], phy->rssi[2], 5854 rssi - agc - IWN_RSSI_TO_DBM); 5855 return rssi - agc - IWN_RSSI_TO_DBM; 5856 } 5857 5858 /* 5859 * Retrieve the average noise (in dBm) among receivers. 5860 */ 5861 static int 5862 iwn_get_noise(const struct iwn_rx_general_stats *stats) 5863 { 5864 int i, total, nbant, noise; 5865 5866 total = nbant = 0; 5867 for (i = 0; i < 3; i++) { 5868 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 5869 continue; 5870 total += noise; 5871 nbant++; 5872 } 5873 /* There should be at least one antenna but check anyway. */ 5874 return (nbant == 0) ? -127 : (total / nbant) - 107; 5875 } 5876 5877 /* 5878 * Compute temperature (in degC) from last received statistics. 5879 */ 5880 static int 5881 iwn4965_get_temperature(struct iwn_softc *sc) 5882 { 5883 struct iwn_ucode_info *uc = &sc->ucode_info; 5884 int32_t r1, r2, r3, r4, temp; 5885 5886 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5887 5888 r1 = le32toh(uc->temp[0].chan20MHz); 5889 r2 = le32toh(uc->temp[1].chan20MHz); 5890 r3 = le32toh(uc->temp[2].chan20MHz); 5891 r4 = le32toh(sc->rawtemp); 5892 5893 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 5894 return 0; 5895 5896 /* Sign-extend 23-bit R4 value to 32-bit. */ 5897 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 5898 /* Compute temperature in Kelvin. */ 5899 temp = (259 * (r4 - r2)) / (r3 - r1); 5900 temp = (temp * 97) / 100 + 8; 5901 5902 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 5903 IWN_KTOC(temp)); 5904 return IWN_KTOC(temp); 5905 } 5906 5907 static int 5908 iwn5000_get_temperature(struct iwn_softc *sc) 5909 { 5910 int32_t temp; 5911 5912 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5913 5914 /* 5915 * Temperature is not used by the driver for 5000 Series because 5916 * TX power calibration is handled by firmware. 5917 */ 5918 temp = le32toh(sc->rawtemp); 5919 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 5920 temp = (temp / -5) + sc->temp_off; 5921 temp = IWN_KTOC(temp); 5922 } 5923 return temp; 5924 } 5925 5926 /* 5927 * Initialize sensitivity calibration state machine. 5928 */ 5929 static int 5930 iwn_init_sensitivity(struct iwn_softc *sc) 5931 { 5932 struct iwn_ops *ops = &sc->ops; 5933 struct iwn_calib_state *calib = &sc->calib; 5934 uint32_t flags; 5935 int error; 5936 5937 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5938 5939 /* Reset calibration state machine. */ 5940 memset(calib, 0, sizeof (*calib)); 5941 calib->state = IWN_CALIB_STATE_INIT; 5942 calib->cck_state = IWN_CCK_STATE_HIFA; 5943 /* Set initial correlation values. */ 5944 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 5945 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 5946 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 5947 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 5948 calib->cck_x4 = 125; 5949 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 5950 calib->energy_cck = sc->limits->energy_cck; 5951 5952 /* Write initial sensitivity. */ 5953 if ((error = iwn_send_sensitivity(sc)) != 0) 5954 return error; 5955 5956 /* Write initial gains. */ 5957 if ((error = ops->init_gains(sc)) != 0) 5958 return error; 5959 5960 /* Request statistics at each beacon interval. */ 5961 flags = 0; 5962 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 5963 __func__); 5964 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 5965 } 5966 5967 /* 5968 * Collect noise and RSSI statistics for the first 20 beacons received 5969 * after association and use them to determine connected antennas and 5970 * to set differential gains. 5971 */ 5972 static void 5973 iwn_collect_noise(struct iwn_softc *sc, 5974 const struct iwn_rx_general_stats *stats) 5975 { 5976 struct iwn_ops *ops = &sc->ops; 5977 struct iwn_calib_state *calib = &sc->calib; 5978 struct ieee80211com *ic = &sc->sc_ic; 5979 uint32_t val; 5980 int i; 5981 5982 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5983 5984 /* Accumulate RSSI and noise for all 3 antennas. */ 5985 for (i = 0; i < 3; i++) { 5986 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 5987 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 5988 } 5989 /* NB: We update differential gains only once after 20 beacons. */ 5990 if (++calib->nbeacons < 20) 5991 return; 5992 5993 /* Determine highest average RSSI. */ 5994 val = MAX(calib->rssi[0], calib->rssi[1]); 5995 val = MAX(calib->rssi[2], val); 5996 5997 /* Determine which antennas are connected. */ 5998 sc->chainmask = sc->rxchainmask; 5999 for (i = 0; i < 3; i++) 6000 if (val - calib->rssi[i] > 15 * 20) 6001 sc->chainmask &= ~(1 << i); 6002 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT, 6003 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 6004 __func__, sc->rxchainmask, sc->chainmask); 6005 6006 /* If none of the TX antennas are connected, keep at least one. */ 6007 if ((sc->chainmask & sc->txchainmask) == 0) 6008 sc->chainmask |= IWN_LSB(sc->txchainmask); 6009 6010 (void)ops->set_gains(sc); 6011 calib->state = IWN_CALIB_STATE_RUN; 6012 6013 #ifdef notyet 6014 /* XXX Disable RX chains with no antennas connected. */ 6015 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 6016 if (sc->sc_is_scanning) 6017 device_printf(sc->sc_dev, 6018 "%s: is_scanning set, before RXON\n", 6019 __func__); 6020 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6021 #endif 6022 6023 /* Enable power-saving mode if requested by user. */ 6024 if (ic->ic_flags & IEEE80211_F_PMGTON) 6025 (void)iwn_set_pslevel(sc, 0, 3, 1); 6026 6027 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6028 6029 } 6030 6031 static int 6032 iwn4965_init_gains(struct iwn_softc *sc) 6033 { 6034 struct iwn_phy_calib_gain cmd; 6035 6036 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6037 6038 memset(&cmd, 0, sizeof cmd); 6039 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 6040 /* Differential gains initially set to 0 for all 3 antennas. */ 6041 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6042 "%s: setting initial differential gains\n", __func__); 6043 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 6044 } 6045 6046 static int 6047 iwn5000_init_gains(struct iwn_softc *sc) 6048 { 6049 struct iwn_phy_calib cmd; 6050 6051 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6052 6053 memset(&cmd, 0, sizeof cmd); 6054 cmd.code = sc->reset_noise_gain; 6055 cmd.ngroups = 1; 6056 cmd.isvalid = 1; 6057 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6058 "%s: setting initial differential gains\n", __func__); 6059 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 6060 } 6061 6062 static int 6063 iwn4965_set_gains(struct iwn_softc *sc) 6064 { 6065 struct iwn_calib_state *calib = &sc->calib; 6066 struct iwn_phy_calib_gain cmd; 6067 int i, delta, noise; 6068 6069 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6070 6071 /* Get minimal noise among connected antennas. */ 6072 noise = INT_MAX; /* NB: There's at least one antenna. */ 6073 for (i = 0; i < 3; i++) 6074 if (sc->chainmask & (1 << i)) 6075 noise = MIN(calib->noise[i], noise); 6076 6077 memset(&cmd, 0, sizeof cmd); 6078 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 6079 /* Set differential gains for connected antennas. */ 6080 for (i = 0; i < 3; i++) { 6081 if (sc->chainmask & (1 << i)) { 6082 /* Compute attenuation (in unit of 1.5dB). */ 6083 delta = (noise - (int32_t)calib->noise[i]) / 30; 6084 /* NB: delta <= 0 */ 6085 /* Limit to [-4.5dB,0]. */ 6086 cmd.gain[i] = MIN(abs(delta), 3); 6087 if (delta < 0) 6088 cmd.gain[i] |= 1 << 2; /* sign bit */ 6089 } 6090 } 6091 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6092 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 6093 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 6094 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 6095 } 6096 6097 static int 6098 iwn5000_set_gains(struct iwn_softc *sc) 6099 { 6100 struct iwn_calib_state *calib = &sc->calib; 6101 struct iwn_phy_calib_gain cmd; 6102 int i, ant, div, delta; 6103 6104 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6105 6106 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 6107 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 6108 6109 memset(&cmd, 0, sizeof cmd); 6110 cmd.code = sc->noise_gain; 6111 cmd.ngroups = 1; 6112 cmd.isvalid = 1; 6113 /* Get first available RX antenna as referential. */ 6114 ant = IWN_LSB(sc->rxchainmask); 6115 /* Set differential gains for other antennas. */ 6116 for (i = ant + 1; i < 3; i++) { 6117 if (sc->chainmask & (1 << i)) { 6118 /* The delta is relative to antenna "ant". */ 6119 delta = ((int32_t)calib->noise[ant] - 6120 (int32_t)calib->noise[i]) / div; 6121 /* Limit to [-4.5dB,+4.5dB]. */ 6122 cmd.gain[i - 1] = MIN(abs(delta), 3); 6123 if (delta < 0) 6124 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 6125 } 6126 } 6127 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT, 6128 "setting differential gains Ant B/C: %x/%x (%x)\n", 6129 cmd.gain[0], cmd.gain[1], sc->chainmask); 6130 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 6131 } 6132 6133 /* 6134 * Tune RF RX sensitivity based on the number of false alarms detected 6135 * during the last beacon period. 6136 */ 6137 static void 6138 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 6139 { 6140 #define inc(val, inc, max) \ 6141 if ((val) < (max)) { \ 6142 if ((val) < (max) - (inc)) \ 6143 (val) += (inc); \ 6144 else \ 6145 (val) = (max); \ 6146 needs_update = 1; \ 6147 } 6148 #define dec(val, dec, min) \ 6149 if ((val) > (min)) { \ 6150 if ((val) > (min) + (dec)) \ 6151 (val) -= (dec); \ 6152 else \ 6153 (val) = (min); \ 6154 needs_update = 1; \ 6155 } 6156 6157 const struct iwn_sensitivity_limits *limits = sc->limits; 6158 struct iwn_calib_state *calib = &sc->calib; 6159 uint32_t val, rxena, fa; 6160 uint32_t energy[3], energy_min; 6161 uint8_t noise[3], noise_ref; 6162 int i, needs_update = 0; 6163 6164 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6165 6166 /* Check that we've been enabled long enough. */ 6167 if ((rxena = le32toh(stats->general.load)) == 0){ 6168 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 6169 return; 6170 } 6171 6172 /* Compute number of false alarms since last call for OFDM. */ 6173 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 6174 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 6175 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 6176 6177 if (fa > 50 * rxena) { 6178 /* High false alarm count, decrease sensitivity. */ 6179 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6180 "%s: OFDM high false alarm count: %u\n", __func__, fa); 6181 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 6182 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 6183 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 6184 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 6185 6186 } else if (fa < 5 * rxena) { 6187 /* Low false alarm count, increase sensitivity. */ 6188 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6189 "%s: OFDM low false alarm count: %u\n", __func__, fa); 6190 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 6191 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 6192 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 6193 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 6194 } 6195 6196 /* Compute maximum noise among 3 receivers. */ 6197 for (i = 0; i < 3; i++) 6198 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 6199 val = MAX(noise[0], noise[1]); 6200 val = MAX(noise[2], val); 6201 /* Insert it into our samples table. */ 6202 calib->noise_samples[calib->cur_noise_sample] = val; 6203 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 6204 6205 /* Compute maximum noise among last 20 samples. */ 6206 noise_ref = calib->noise_samples[0]; 6207 for (i = 1; i < 20; i++) 6208 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 6209 6210 /* Compute maximum energy among 3 receivers. */ 6211 for (i = 0; i < 3; i++) 6212 energy[i] = le32toh(stats->general.energy[i]); 6213 val = MIN(energy[0], energy[1]); 6214 val = MIN(energy[2], val); 6215 /* Insert it into our samples table. */ 6216 calib->energy_samples[calib->cur_energy_sample] = val; 6217 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 6218 6219 /* Compute minimum energy among last 10 samples. */ 6220 energy_min = calib->energy_samples[0]; 6221 for (i = 1; i < 10; i++) 6222 energy_min = MAX(energy_min, calib->energy_samples[i]); 6223 energy_min += 6; 6224 6225 /* Compute number of false alarms since last call for CCK. */ 6226 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 6227 fa += le32toh(stats->cck.fa) - calib->fa_cck; 6228 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 6229 6230 if (fa > 50 * rxena) { 6231 /* High false alarm count, decrease sensitivity. */ 6232 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6233 "%s: CCK high false alarm count: %u\n", __func__, fa); 6234 calib->cck_state = IWN_CCK_STATE_HIFA; 6235 calib->low_fa = 0; 6236 6237 if (calib->cck_x4 > 160) { 6238 calib->noise_ref = noise_ref; 6239 if (calib->energy_cck > 2) 6240 dec(calib->energy_cck, 2, energy_min); 6241 } 6242 if (calib->cck_x4 < 160) { 6243 calib->cck_x4 = 161; 6244 needs_update = 1; 6245 } else 6246 inc(calib->cck_x4, 3, limits->max_cck_x4); 6247 6248 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 6249 6250 } else if (fa < 5 * rxena) { 6251 /* Low false alarm count, increase sensitivity. */ 6252 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6253 "%s: CCK low false alarm count: %u\n", __func__, fa); 6254 calib->cck_state = IWN_CCK_STATE_LOFA; 6255 calib->low_fa++; 6256 6257 if (calib->cck_state != IWN_CCK_STATE_INIT && 6258 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 6259 calib->low_fa > 100)) { 6260 inc(calib->energy_cck, 2, limits->min_energy_cck); 6261 dec(calib->cck_x4, 3, limits->min_cck_x4); 6262 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 6263 } 6264 } else { 6265 /* Not worth to increase or decrease sensitivity. */ 6266 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6267 "%s: CCK normal false alarm count: %u\n", __func__, fa); 6268 calib->low_fa = 0; 6269 calib->noise_ref = noise_ref; 6270 6271 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 6272 /* Previous interval had many false alarms. */ 6273 dec(calib->energy_cck, 8, energy_min); 6274 } 6275 calib->cck_state = IWN_CCK_STATE_INIT; 6276 } 6277 6278 if (needs_update) 6279 (void)iwn_send_sensitivity(sc); 6280 6281 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6282 6283 #undef dec 6284 #undef inc 6285 } 6286 6287 static int 6288 iwn_send_sensitivity(struct iwn_softc *sc) 6289 { 6290 struct iwn_calib_state *calib = &sc->calib; 6291 struct iwn_enhanced_sensitivity_cmd cmd; 6292 int len; 6293 6294 memset(&cmd, 0, sizeof cmd); 6295 len = sizeof (struct iwn_sensitivity_cmd); 6296 cmd.which = IWN_SENSITIVITY_WORKTBL; 6297 /* OFDM modulation. */ 6298 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 6299 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 6300 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 6301 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 6302 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 6303 cmd.energy_ofdm_th = htole16(62); 6304 /* CCK modulation. */ 6305 cmd.corr_cck_x4 = htole16(calib->cck_x4); 6306 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 6307 cmd.energy_cck = htole16(calib->energy_cck); 6308 /* Barker modulation: use default values. */ 6309 cmd.corr_barker = htole16(190); 6310 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); 6311 6312 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6313 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 6314 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 6315 calib->ofdm_mrc_x4, calib->cck_x4, 6316 calib->cck_mrc_x4, calib->energy_cck); 6317 6318 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 6319 goto send; 6320 /* Enhanced sensitivity settings. */ 6321 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 6322 cmd.ofdm_det_slope_mrc = htole16(668); 6323 cmd.ofdm_det_icept_mrc = htole16(4); 6324 cmd.ofdm_det_slope = htole16(486); 6325 cmd.ofdm_det_icept = htole16(37); 6326 cmd.cck_det_slope_mrc = htole16(853); 6327 cmd.cck_det_icept_mrc = htole16(4); 6328 cmd.cck_det_slope = htole16(476); 6329 cmd.cck_det_icept = htole16(99); 6330 send: 6331 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 6332 } 6333 6334 /* 6335 * Look at the increase of PLCP errors over time; if it exceeds 6336 * a programmed threshold then trigger an RF retune. 6337 */ 6338 static void 6339 iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs) 6340 { 6341 int32_t delta_ofdm, delta_ht, delta_cck; 6342 struct iwn_calib_state *calib = &sc->calib; 6343 int delta_ticks, cur_ticks; 6344 int delta_msec; 6345 int thresh; 6346 6347 /* 6348 * Calculate the difference between the current and 6349 * previous statistics. 6350 */ 6351 delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck; 6352 delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm; 6353 delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht; 6354 6355 /* 6356 * Calculate the delta in time between successive statistics 6357 * messages. Yes, it can roll over; so we make sure that 6358 * this doesn't happen. 6359 * 6360 * XXX go figure out what to do about rollover 6361 * XXX go figure out what to do if ticks rolls over to -ve instead! 6362 * XXX go stab signed integer overflow undefined-ness in the face. 6363 */ 6364 cur_ticks = ticks; 6365 delta_ticks = cur_ticks - sc->last_calib_ticks; 6366 6367 /* 6368 * If any are negative, then the firmware likely reset; so just 6369 * bail. We'll pick this up next time. 6370 */ 6371 if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0) 6372 return; 6373 6374 /* 6375 * delta_ticks is in ticks; we need to convert it up to milliseconds 6376 * so we can do some useful math with it. 6377 */ 6378 delta_msec = ticks_to_msecs(delta_ticks); 6379 6380 /* 6381 * Calculate what our threshold is given the current delta_msec. 6382 */ 6383 thresh = sc->base_params->plcp_err_threshold * delta_msec; 6384 6385 DPRINTF(sc, IWN_DEBUG_STATE, 6386 "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n", 6387 __func__, 6388 delta_msec, 6389 delta_cck, 6390 delta_ofdm, 6391 delta_ht, 6392 (delta_msec + delta_cck + delta_ofdm + delta_ht), 6393 thresh); 6394 6395 /* 6396 * If we need a retune, then schedule a single channel scan 6397 * to a channel that isn't the currently active one! 6398 * 6399 * The math from linux iwlwifi: 6400 * 6401 * if ((delta * 100 / msecs) > threshold) 6402 */ 6403 if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) { 6404 DPRINTF(sc, IWN_DEBUG_ANY, 6405 "%s: PLCP error threshold raw (%d) comparison (%d) " 6406 "over limit (%d); retune!\n", 6407 __func__, 6408 (delta_cck + delta_ofdm + delta_ht), 6409 (delta_cck + delta_ofdm + delta_ht) * 100, 6410 thresh); 6411 } 6412 } 6413 6414 /* 6415 * Set STA mode power saving level (between 0 and 5). 6416 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 6417 */ 6418 static int 6419 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 6420 { 6421 struct iwn_pmgt_cmd cmd; 6422 const struct iwn_pmgt *pmgt; 6423 uint32_t max, skip_dtim; 6424 uint32_t reg; 6425 int i; 6426 6427 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 6428 "%s: dtim=%d, level=%d, async=%d\n", 6429 __func__, 6430 dtim, 6431 level, 6432 async); 6433 6434 /* Select which PS parameters to use. */ 6435 if (dtim <= 2) 6436 pmgt = &iwn_pmgt[0][level]; 6437 else if (dtim <= 10) 6438 pmgt = &iwn_pmgt[1][level]; 6439 else 6440 pmgt = &iwn_pmgt[2][level]; 6441 6442 memset(&cmd, 0, sizeof cmd); 6443 if (level != 0) /* not CAM */ 6444 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 6445 if (level == 5) 6446 cmd.flags |= htole16(IWN_PS_FAST_PD); 6447 /* Retrieve PCIe Active State Power Management (ASPM). */ 6448 #if defined(__DragonFly__) 6449 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINKCTRL, 4); 6450 if (!(reg & PCIEM_LNKCTL_ASPM_L0S)) /* L0s Entry disabled. */ 6451 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 6452 #else 6453 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 4); 6454 if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ 6455 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 6456 #endif 6457 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 6458 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 6459 6460 if (dtim == 0) { 6461 dtim = 1; 6462 skip_dtim = 0; 6463 } else 6464 skip_dtim = pmgt->skip_dtim; 6465 if (skip_dtim != 0) { 6466 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 6467 max = pmgt->intval[4]; 6468 if (max == (uint32_t)-1) 6469 max = dtim * (skip_dtim + 1); 6470 else if (max > dtim) 6471 max = rounddown(max, dtim); 6472 } else 6473 max = dtim; 6474 for (i = 0; i < 5; i++) 6475 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 6476 6477 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 6478 level); 6479 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 6480 } 6481 6482 static int 6483 iwn_send_btcoex(struct iwn_softc *sc) 6484 { 6485 struct iwn_bluetooth cmd; 6486 6487 memset(&cmd, 0, sizeof cmd); 6488 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 6489 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 6490 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 6491 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 6492 __func__); 6493 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 6494 } 6495 6496 static int 6497 iwn_send_advanced_btcoex(struct iwn_softc *sc) 6498 { 6499 static const uint32_t btcoex_3wire[12] = { 6500 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 6501 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 6502 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 6503 }; 6504 struct iwn6000_btcoex_config btconfig; 6505 struct iwn2000_btcoex_config btconfig2k; 6506 struct iwn_btcoex_priotable btprio; 6507 struct iwn_btcoex_prot btprot; 6508 int error, i; 6509 uint8_t flags; 6510 6511 memset(&btconfig, 0, sizeof btconfig); 6512 memset(&btconfig2k, 0, sizeof btconfig2k); 6513 6514 flags = IWN_BT_FLAG_COEX6000_MODE_3W << 6515 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 6516 6517 if (sc->base_params->bt_sco_disable) 6518 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6519 else 6520 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6521 6522 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; 6523 6524 /* Default flags result is 145 as old value */ 6525 6526 /* 6527 * Flags value has to be review. Values must change if we 6528 * which to disable it 6529 */ 6530 if (sc->base_params->bt_session_2) { 6531 btconfig2k.flags = flags; 6532 btconfig2k.max_kill = 5; 6533 btconfig2k.bt3_t7_timer = 1; 6534 btconfig2k.kill_ack = htole32(0xffff0000); 6535 btconfig2k.kill_cts = htole32(0xffff0000); 6536 btconfig2k.sample_time = 2; 6537 btconfig2k.bt3_t2_timer = 0xc; 6538 6539 for (i = 0; i < 12; i++) 6540 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); 6541 btconfig2k.valid = htole16(0xff); 6542 btconfig2k.prio_boost = htole32(0xf0); 6543 DPRINTF(sc, IWN_DEBUG_RESET, 6544 "%s: configuring advanced bluetooth coexistence" 6545 " session 2, flags : 0x%x\n", 6546 __func__, 6547 flags); 6548 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, 6549 sizeof(btconfig2k), 1); 6550 } else { 6551 btconfig.flags = flags; 6552 btconfig.max_kill = 5; 6553 btconfig.bt3_t7_timer = 1; 6554 btconfig.kill_ack = htole32(0xffff0000); 6555 btconfig.kill_cts = htole32(0xffff0000); 6556 btconfig.sample_time = 2; 6557 btconfig.bt3_t2_timer = 0xc; 6558 6559 for (i = 0; i < 12; i++) 6560 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 6561 btconfig.valid = htole16(0xff); 6562 btconfig.prio_boost = 0xf0; 6563 DPRINTF(sc, IWN_DEBUG_RESET, 6564 "%s: configuring advanced bluetooth coexistence," 6565 " flags : 0x%x\n", 6566 __func__, 6567 flags); 6568 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 6569 sizeof(btconfig), 1); 6570 } 6571 6572 if (error != 0) 6573 return error; 6574 6575 memset(&btprio, 0, sizeof btprio); 6576 btprio.calib_init1 = 0x6; 6577 btprio.calib_init2 = 0x7; 6578 btprio.calib_periodic_low1 = 0x2; 6579 btprio.calib_periodic_low2 = 0x3; 6580 btprio.calib_periodic_high1 = 0x4; 6581 btprio.calib_periodic_high2 = 0x5; 6582 btprio.dtim = 0x6; 6583 btprio.scan52 = 0x8; 6584 btprio.scan24 = 0xa; 6585 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 6586 1); 6587 if (error != 0) 6588 return error; 6589 6590 /* Force BT state machine change. */ 6591 memset(&btprot, 0, sizeof btprot); 6592 btprot.open = 1; 6593 btprot.type = 1; 6594 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6595 if (error != 0) 6596 return error; 6597 btprot.open = 0; 6598 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6599 } 6600 6601 static int 6602 iwn5000_runtime_calib(struct iwn_softc *sc) 6603 { 6604 struct iwn5000_calib_config cmd; 6605 6606 memset(&cmd, 0, sizeof cmd); 6607 cmd.ucode.once.enable = 0xffffffff; 6608 cmd.ucode.once.start = IWN5000_CALIB_DC; 6609 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6610 "%s: configuring runtime calibration\n", __func__); 6611 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 6612 } 6613 6614 static uint32_t 6615 iwn_get_rxon_ht_flags(struct iwn_softc *sc, struct ieee80211_channel *c) 6616 { 6617 struct ieee80211com *ic = &sc->sc_ic; 6618 uint32_t htflags = 0; 6619 6620 if (! IEEE80211_IS_CHAN_HT(c)) 6621 return (0); 6622 6623 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 6624 6625 if (IEEE80211_IS_CHAN_HT40(c)) { 6626 switch (ic->ic_curhtprotmode) { 6627 case IEEE80211_HTINFO_OPMODE_HT20PR: 6628 htflags |= IWN_RXON_HT_MODEPURE40; 6629 break; 6630 default: 6631 htflags |= IWN_RXON_HT_MODEMIXED; 6632 break; 6633 } 6634 } 6635 if (IEEE80211_IS_CHAN_HT40D(c)) 6636 htflags |= IWN_RXON_HT_HT40MINUS; 6637 6638 return (htflags); 6639 } 6640 6641 static int 6642 iwn_config(struct iwn_softc *sc) 6643 { 6644 struct iwn_ops *ops = &sc->ops; 6645 struct ieee80211com *ic = &sc->sc_ic; 6646 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6647 const uint8_t *macaddr; 6648 uint32_t txmask; 6649 uint16_t rxchain; 6650 int error; 6651 6652 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6653 6654 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) 6655 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { 6656 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" 6657 " exclusive each together. Review NIC config file. Conf" 6658 " : 0x%08x Flags : 0x%08x \n", __func__, 6659 sc->base_params->calib_need, 6660 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | 6661 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); 6662 return (EINVAL); 6663 } 6664 6665 /* Compute temperature calib if needed. Will be send by send calib */ 6666 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { 6667 error = iwn5000_temp_offset_calib(sc); 6668 if (error != 0) { 6669 device_printf(sc->sc_dev, 6670 "%s: could not set temperature offset\n", __func__); 6671 return (error); 6672 } 6673 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 6674 error = iwn5000_temp_offset_calibv2(sc); 6675 if (error != 0) { 6676 device_printf(sc->sc_dev, 6677 "%s: could not compute temperature offset v2\n", 6678 __func__); 6679 return (error); 6680 } 6681 } 6682 6683 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6684 /* Configure runtime DC calibration. */ 6685 error = iwn5000_runtime_calib(sc); 6686 if (error != 0) { 6687 device_printf(sc->sc_dev, 6688 "%s: could not configure runtime calibration\n", 6689 __func__); 6690 return error; 6691 } 6692 } 6693 6694 /* Configure valid TX chains for >=5000 Series. */ 6695 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6696 IWN_UCODE_API(sc->ucode_rev) > 1) { 6697 txmask = htole32(sc->txchainmask); 6698 DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT, 6699 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 6700 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 6701 sizeof txmask, 0); 6702 if (error != 0) { 6703 device_printf(sc->sc_dev, 6704 "%s: could not configure valid TX chains, " 6705 "error %d\n", __func__, error); 6706 return error; 6707 } 6708 } 6709 6710 /* Configure bluetooth coexistence. */ 6711 error = 0; 6712 6713 /* Configure bluetooth coexistence if needed. */ 6714 if (sc->base_params->bt_mode == IWN_BT_ADVANCED) 6715 error = iwn_send_advanced_btcoex(sc); 6716 if (sc->base_params->bt_mode == IWN_BT_SIMPLE) 6717 error = iwn_send_btcoex(sc); 6718 6719 if (error != 0) { 6720 device_printf(sc->sc_dev, 6721 "%s: could not configure bluetooth coexistence, error %d\n", 6722 __func__, error); 6723 return error; 6724 } 6725 6726 /* Set mode, channel, RX filter and enable RX. */ 6727 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6728 memset(sc->rxon, 0, sizeof (struct iwn_rxon)); 6729 macaddr = vap ? vap->iv_myaddr : ic->ic_macaddr; 6730 IEEE80211_ADDR_COPY(sc->rxon->myaddr, macaddr); 6731 IEEE80211_ADDR_COPY(sc->rxon->wlap, macaddr); 6732 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 6733 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6734 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 6735 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6736 switch (ic->ic_opmode) { 6737 case IEEE80211_M_STA: 6738 sc->rxon->mode = IWN_MODE_STA; 6739 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); 6740 break; 6741 case IEEE80211_M_MONITOR: 6742 sc->rxon->mode = IWN_MODE_MONITOR; 6743 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST | 6744 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 6745 break; 6746 default: 6747 /* Should not get there. */ 6748 break; 6749 } 6750 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ 6751 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ 6752 sc->rxon->ht_single_mask = 0xff; 6753 sc->rxon->ht_dual_mask = 0xff; 6754 sc->rxon->ht_triple_mask = 0xff; 6755 /* 6756 * In active association mode, ensure that 6757 * all the receive chains are enabled. 6758 * 6759 * Since we're not yet doing SMPS, don't allow the 6760 * number of idle RX chains to be less than the active 6761 * number. 6762 */ 6763 rxchain = 6764 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6765 IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) | 6766 IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains); 6767 sc->rxon->rxchain = htole16(rxchain); 6768 DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT, 6769 "%s: rxchainmask=0x%x, nrxchains=%d\n", 6770 __func__, 6771 sc->rxchainmask, 6772 sc->nrxchains); 6773 6774 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan)); 6775 6776 DPRINTF(sc, IWN_DEBUG_RESET, 6777 "%s: setting configuration; flags=0x%08x\n", 6778 __func__, le32toh(sc->rxon->flags)); 6779 if (sc->sc_is_scanning) 6780 device_printf(sc->sc_dev, 6781 "%s: is_scanning set, before RXON\n", 6782 __func__); 6783 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0); 6784 if (error != 0) { 6785 device_printf(sc->sc_dev, "%s: RXON command failed\n", 6786 __func__); 6787 return error; 6788 } 6789 6790 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 6791 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 6792 __func__); 6793 return error; 6794 } 6795 6796 /* Configuration has changed, set TX power accordingly. */ 6797 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 6798 device_printf(sc->sc_dev, "%s: could not set TX power\n", 6799 __func__); 6800 return error; 6801 } 6802 6803 if ((error = iwn_set_critical_temp(sc)) != 0) { 6804 device_printf(sc->sc_dev, 6805 "%s: could not set critical temperature\n", __func__); 6806 return error; 6807 } 6808 6809 /* Set power saving level to CAM during initialization. */ 6810 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 6811 device_printf(sc->sc_dev, 6812 "%s: could not set power saving level\n", __func__); 6813 return error; 6814 } 6815 6816 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6817 6818 return 0; 6819 } 6820 6821 static uint16_t 6822 iwn_get_active_dwell_time(struct iwn_softc *sc, 6823 struct ieee80211_channel *c, uint8_t n_probes) 6824 { 6825 /* No channel? Default to 2GHz settings */ 6826 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6827 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 6828 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 6829 } 6830 6831 /* 5GHz dwell time */ 6832 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 6833 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 6834 } 6835 6836 /* 6837 * Limit the total dwell time to 85% of the beacon interval. 6838 * 6839 * Returns the dwell time in milliseconds. 6840 */ 6841 static uint16_t 6842 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 6843 { 6844 struct ieee80211com *ic = &sc->sc_ic; 6845 struct ieee80211vap *vap = NULL; 6846 int bintval = 0; 6847 6848 /* bintval is in TU (1.024mS) */ 6849 if (! TAILQ_EMPTY(&ic->ic_vaps)) { 6850 vap = TAILQ_FIRST(&ic->ic_vaps); 6851 bintval = vap->iv_bss->ni_intval; 6852 } 6853 6854 /* 6855 * If it's non-zero, we should calculate the minimum of 6856 * it and the DWELL_BASE. 6857 * 6858 * XXX Yes, the math should take into account that bintval 6859 * is 1.024mS, not 1mS.. 6860 */ 6861 if (bintval > 0) { 6862 DPRINTF(sc, IWN_DEBUG_SCAN, 6863 "%s: bintval=%d\n", 6864 __func__, 6865 bintval); 6866 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 6867 } 6868 6869 /* No association context? Default */ 6870 return (IWN_PASSIVE_DWELL_BASE); 6871 } 6872 6873 static uint16_t 6874 iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) 6875 { 6876 uint16_t passive; 6877 6878 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6879 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 6880 } else { 6881 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 6882 } 6883 6884 /* Clamp to the beacon interval if we're associated */ 6885 return (iwn_limit_dwell(sc, passive)); 6886 } 6887 6888 static int 6889 iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap, 6890 struct ieee80211_scan_state *ss, struct ieee80211_channel *c) 6891 { 6892 struct ieee80211com *ic = &sc->sc_ic; 6893 struct ieee80211_node *ni = vap->iv_bss; 6894 struct iwn_scan_hdr *hdr; 6895 struct iwn_cmd_data *tx; 6896 struct iwn_scan_essid *essid; 6897 struct iwn_scan_chan *chan; 6898 struct ieee80211_frame *wh; 6899 struct ieee80211_rateset *rs; 6900 uint8_t *buf, *frm; 6901 uint16_t rxchain; 6902 uint8_t txant; 6903 int buflen, error; 6904 int is_active; 6905 uint16_t dwell_active, dwell_passive; 6906 uint32_t extra, scan_service_time; 6907 6908 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6909 6910 /* 6911 * We are absolutely not allowed to send a scan command when another 6912 * scan command is pending. 6913 */ 6914 if (sc->sc_is_scanning) { 6915 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 6916 __func__); 6917 return (EAGAIN); 6918 } 6919 6920 /* Assign the scan channel */ 6921 c = ic->ic_curchan; 6922 6923 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6924 buf = kmalloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_INTWAIT | M_ZERO); 6925 if (buf == NULL) { 6926 device_printf(sc->sc_dev, 6927 "%s: could not allocate buffer for scan command\n", 6928 __func__); 6929 return ENOMEM; 6930 } 6931 hdr = (struct iwn_scan_hdr *)buf; 6932 /* 6933 * Move to the next channel if no frames are received within 10ms 6934 * after sending the probe request. 6935 */ 6936 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 6937 hdr->quiet_threshold = htole16(1); /* min # of packets */ 6938 /* 6939 * Max needs to be greater than active and passive and quiet! 6940 * It's also in microseconds! 6941 */ 6942 hdr->max_svc = htole32(250 * 1024); 6943 6944 /* 6945 * Reset scan: interval=100 6946 * Normal scan: interval=becaon interval 6947 * suspend_time: 100 (TU) 6948 * 6949 */ 6950 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; 6951 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); 6952 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ 6953 hdr->pause_svc = htole32(scan_service_time); 6954 6955 /* Select antennas for scanning. */ 6956 rxchain = 6957 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6958 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 6959 IWN_RXCHAIN_DRIVER_FORCE; 6960 if (IEEE80211_IS_CHAN_A(c) && 6961 sc->hw_type == IWN_HW_REV_TYPE_4965) { 6962 /* Ant A must be avoided in 5GHz because of an HW bug. */ 6963 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 6964 } else /* Use all available RX antennas. */ 6965 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 6966 hdr->rxchain = htole16(rxchain); 6967 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 6968 6969 tx = (struct iwn_cmd_data *)(hdr + 1); 6970 tx->flags = htole32(IWN_TX_AUTO_SEQ); 6971 tx->id = sc->broadcast_id; 6972 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 6973 6974 if (IEEE80211_IS_CHAN_5GHZ(c)) { 6975 /* Send probe requests at 6Mbps. */ 6976 tx->rate = htole32(0xd); 6977 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 6978 } else { 6979 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 6980 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 6981 sc->rxon->associd && sc->rxon->chan > 14) 6982 tx->rate = htole32(0xd); 6983 else { 6984 /* Send probe requests at 1Mbps. */ 6985 tx->rate = htole32(10 | IWN_RFLAG_CCK); 6986 } 6987 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 6988 } 6989 /* Use the first valid TX antenna. */ 6990 txant = IWN_LSB(sc->txchainmask); 6991 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 6992 6993 /* 6994 * Only do active scanning if we're announcing a probe request 6995 * for a given SSID (or more, if we ever add it to the driver.) 6996 */ 6997 is_active = 0; 6998 6999 /* 7000 * If we're scanning for a specific SSID, add it to the command. 7001 * 7002 * XXX maybe look at adding support for scanning multiple SSIDs? 7003 */ 7004 essid = (struct iwn_scan_essid *)(tx + 1); 7005 if (ss != NULL) { 7006 if (ss->ss_ssid[0].len != 0) { 7007 essid[0].id = IEEE80211_ELEMID_SSID; 7008 essid[0].len = ss->ss_ssid[0].len; 7009 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 7010 } 7011 7012 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", 7013 __func__, 7014 ss->ss_ssid[0].len, 7015 ss->ss_ssid[0].len, 7016 ss->ss_ssid[0].ssid); 7017 7018 if (ss->ss_nssid > 0) 7019 is_active = 1; 7020 } 7021 7022 /* 7023 * Build a probe request frame. Most of the following code is a 7024 * copy & paste of what is done in net80211. 7025 */ 7026 wh = (struct ieee80211_frame *)(essid + 20); 7027 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 7028 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 7029 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 7030 IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_ifp->if_broadcastaddr); 7031 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(vap->iv_ifp)); 7032 IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_ifp->if_broadcastaddr); 7033 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 7034 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 7035 7036 frm = (uint8_t *)(wh + 1); 7037 frm = ieee80211_add_ssid(frm, NULL, 0); 7038 frm = ieee80211_add_rates(frm, rs); 7039 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 7040 frm = ieee80211_add_xrates(frm, rs); 7041 if (ic->ic_htcaps & IEEE80211_HTC_HT) 7042 frm = ieee80211_add_htcap(frm, ni); 7043 7044 /* Set length of probe request. */ 7045 tx->len = htole16(frm - (uint8_t *)wh); 7046 7047 /* 7048 * If active scanning is requested but a certain channel is 7049 * marked passive, we can do active scanning if we detect 7050 * transmissions. 7051 * 7052 * There is an issue with some firmware versions that triggers 7053 * a sysassert on a "good CRC threshold" of zero (== disabled), 7054 * on a radar channel even though this means that we should NOT 7055 * send probes. 7056 * 7057 * The "good CRC threshold" is the number of frames that we 7058 * need to receive during our dwell time on a channel before 7059 * sending out probes -- setting this to a huge value will 7060 * mean we never reach it, but at the same time work around 7061 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 7062 * here instead of IWL_GOOD_CRC_TH_DISABLED. 7063 * 7064 * This was fixed in later versions along with some other 7065 * scan changes, and the threshold behaves as a flag in those 7066 * versions. 7067 */ 7068 7069 /* 7070 * If we're doing active scanning, set the crc_threshold 7071 * to a suitable value. This is different to active veruss 7072 * passive scanning depending upon the channel flags; the 7073 * firmware will obey that particular check for us. 7074 */ 7075 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 7076 hdr->crc_threshold = is_active ? 7077 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 7078 else 7079 hdr->crc_threshold = is_active ? 7080 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 7081 7082 chan = (struct iwn_scan_chan *)frm; 7083 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 7084 chan->flags = 0; 7085 if (ss->ss_nssid > 0) 7086 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 7087 chan->dsp_gain = 0x6e; 7088 7089 /* 7090 * Set the passive/active flag depending upon the channel mode. 7091 * XXX TODO: take the is_active flag into account as well? 7092 */ 7093 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 7094 chan->flags |= htole32(IWN_CHAN_PASSIVE); 7095 else 7096 chan->flags |= htole32(IWN_CHAN_ACTIVE); 7097 7098 /* 7099 * Calculate the active/passive dwell times. 7100 */ 7101 7102 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); 7103 dwell_passive = iwn_get_passive_dwell_time(sc, c); 7104 7105 /* Make sure they're valid */ 7106 if (dwell_passive <= dwell_active) 7107 dwell_passive = dwell_active + 1; 7108 7109 chan->active = htole16(dwell_active); 7110 chan->passive = htole16(dwell_passive); 7111 7112 if (IEEE80211_IS_CHAN_5GHZ(c)) 7113 chan->rf_gain = 0x3b; 7114 else 7115 chan->rf_gain = 0x28; 7116 7117 DPRINTF(sc, IWN_DEBUG_STATE, 7118 "%s: chan %u flags 0x%x rf_gain 0x%x " 7119 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " 7120 "isactive=%d numssid=%d\n", __func__, 7121 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 7122 dwell_active, dwell_passive, scan_service_time, 7123 hdr->crc_threshold, is_active, ss->ss_nssid); 7124 7125 hdr->nchan++; 7126 chan++; 7127 buflen = (uint8_t *)chan - buf; 7128 hdr->len = htole16(buflen); 7129 7130 if (sc->sc_is_scanning) { 7131 device_printf(sc->sc_dev, 7132 "%s: called with is_scanning set!\n", 7133 __func__); 7134 } 7135 sc->sc_is_scanning = 1; 7136 7137 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 7138 hdr->nchan); 7139 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 7140 kfree(buf, M_DEVBUF); 7141 7142 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7143 7144 return error; 7145 } 7146 7147 static int 7148 iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 7149 { 7150 struct iwn_ops *ops = &sc->ops; 7151 struct ieee80211com *ic = &sc->sc_ic; 7152 struct ieee80211_node *ni = vap->iv_bss; 7153 int error; 7154 7155 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7156 7157 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 7158 /* Update adapter configuration. */ 7159 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 7160 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 7161 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 7162 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 7163 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 7164 if (ic->ic_flags & IEEE80211_F_SHSLOT) 7165 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 7166 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 7167 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 7168 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 7169 sc->rxon->cck_mask = 0; 7170 sc->rxon->ofdm_mask = 0x15; 7171 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 7172 sc->rxon->cck_mask = 0x03; 7173 sc->rxon->ofdm_mask = 0; 7174 } else { 7175 /* Assume 802.11b/g. */ 7176 sc->rxon->cck_mask = 0x03; 7177 sc->rxon->ofdm_mask = 0x15; 7178 } 7179 7180 /* try HT */ 7181 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan)); 7182 7183 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 7184 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, 7185 sc->rxon->ofdm_mask); 7186 if (sc->sc_is_scanning) 7187 device_printf(sc->sc_dev, 7188 "%s: is_scanning set, before RXON\n", 7189 __func__); 7190 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 7191 if (error != 0) { 7192 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 7193 __func__, error); 7194 return error; 7195 } 7196 7197 /* Configuration has changed, set TX power accordingly. */ 7198 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 7199 device_printf(sc->sc_dev, 7200 "%s: could not set TX power, error %d\n", __func__, error); 7201 return error; 7202 } 7203 /* 7204 * Reconfiguring RXON clears the firmware nodes table so we must 7205 * add the broadcast node again. 7206 */ 7207 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 7208 device_printf(sc->sc_dev, 7209 "%s: could not add broadcast node, error %d\n", __func__, 7210 error); 7211 return error; 7212 } 7213 7214 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7215 7216 return 0; 7217 } 7218 7219 static int 7220 iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 7221 { 7222 struct iwn_ops *ops = &sc->ops; 7223 struct ieee80211com *ic = &sc->sc_ic; 7224 struct ieee80211_node *ni = vap->iv_bss; 7225 struct iwn_node_info node; 7226 int error; 7227 7228 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7229 7230 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 7231 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 7232 /* Link LED blinks while monitoring. */ 7233 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 7234 return 0; 7235 } 7236 if ((error = iwn_set_timing(sc, ni)) != 0) { 7237 device_printf(sc->sc_dev, 7238 "%s: could not set timing, error %d\n", __func__, error); 7239 return error; 7240 } 7241 7242 /* Update adapter configuration. */ 7243 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 7244 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); 7245 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 7246 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 7247 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 7248 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 7249 if (ic->ic_flags & IEEE80211_F_SHSLOT) 7250 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 7251 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 7252 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 7253 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 7254 sc->rxon->cck_mask = 0; 7255 sc->rxon->ofdm_mask = 0x15; 7256 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 7257 sc->rxon->cck_mask = 0x03; 7258 sc->rxon->ofdm_mask = 0; 7259 } else { 7260 /* Assume 802.11b/g. */ 7261 sc->rxon->cck_mask = 0x0f; 7262 sc->rxon->ofdm_mask = 0x15; 7263 } 7264 /* try HT */ 7265 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ni->ni_chan)); 7266 sc->rxon->filter |= htole32(IWN_FILTER_BSS); 7267 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x, curhtprotmode=%d\n", 7268 sc->rxon->chan, le32toh(sc->rxon->flags), ic->ic_curhtprotmode); 7269 if (sc->sc_is_scanning) 7270 device_printf(sc->sc_dev, 7271 "%s: is_scanning set, before RXON\n", 7272 __func__); 7273 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 7274 if (error != 0) { 7275 device_printf(sc->sc_dev, 7276 "%s: could not update configuration, error %d\n", __func__, 7277 error); 7278 return error; 7279 } 7280 7281 /* Configuration has changed, set TX power accordingly. */ 7282 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 7283 device_printf(sc->sc_dev, 7284 "%s: could not set TX power, error %d\n", __func__, error); 7285 return error; 7286 } 7287 7288 /* Fake a join to initialize the TX rate. */ 7289 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 7290 iwn_newassoc(ni, 1); 7291 7292 /* Add BSS node. */ 7293 memset(&node, 0, sizeof node); 7294 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 7295 node.id = IWN_ID_BSS; 7296 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 7297 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 7298 case IEEE80211_HTCAP_SMPS_ENA: 7299 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 7300 break; 7301 case IEEE80211_HTCAP_SMPS_DYNAMIC: 7302 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 7303 break; 7304 } 7305 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 7306 IWN_AMDPU_DENSITY(5)); /* 4us */ 7307 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 7308 node.htflags |= htole32(IWN_NODE_HT40); 7309 } 7310 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 7311 error = ops->add_node(sc, &node, 1); 7312 if (error != 0) { 7313 device_printf(sc->sc_dev, 7314 "%s: could not add BSS node, error %d\n", __func__, error); 7315 return error; 7316 } 7317 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 7318 __func__, node.id); 7319 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 7320 device_printf(sc->sc_dev, 7321 "%s: could not setup link quality for node %d, error %d\n", 7322 __func__, node.id, error); 7323 return error; 7324 } 7325 7326 if ((error = iwn_init_sensitivity(sc)) != 0) { 7327 device_printf(sc->sc_dev, 7328 "%s: could not set sensitivity, error %d\n", __func__, 7329 error); 7330 return error; 7331 } 7332 /* Start periodic calibration timer. */ 7333 sc->calib.state = IWN_CALIB_STATE_ASSOC; 7334 sc->calib_cnt = 0; 7335 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 7336 sc); 7337 7338 /* Link LED always on while associated. */ 7339 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 7340 7341 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7342 7343 return 0; 7344 } 7345 7346 /* 7347 * This function is called by upper layer when an ADDBA request is received 7348 * from another STA and before the ADDBA response is sent. 7349 */ 7350 static int 7351 iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 7352 int baparamset, int batimeout, int baseqctl) 7353 { 7354 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 7355 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7356 struct iwn_ops *ops = &sc->ops; 7357 struct iwn_node *wn = (void *)ni; 7358 struct iwn_node_info node; 7359 uint16_t ssn; 7360 uint8_t tid; 7361 int error; 7362 7363 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7364 7365 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 7366 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 7367 7368 memset(&node, 0, sizeof node); 7369 node.id = wn->id; 7370 node.control = IWN_NODE_UPDATE; 7371 node.flags = IWN_FLAG_SET_ADDBA; 7372 node.addba_tid = tid; 7373 node.addba_ssn = htole16(ssn); 7374 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 7375 wn->id, tid, ssn); 7376 error = ops->add_node(sc, &node, 1); 7377 if (error != 0) 7378 return error; 7379 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 7380 #undef MS 7381 } 7382 7383 /* 7384 * This function is called by upper layer on teardown of an HT-immediate 7385 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 7386 */ 7387 static void 7388 iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 7389 { 7390 struct ieee80211com *ic = ni->ni_ic; 7391 struct iwn_softc *sc = ic->ic_softc; 7392 struct iwn_ops *ops = &sc->ops; 7393 struct iwn_node *wn = (void *)ni; 7394 struct iwn_node_info node; 7395 uint8_t tid; 7396 7397 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7398 7399 /* XXX: tid as an argument */ 7400 for (tid = 0; tid < WME_NUM_TID; tid++) { 7401 if (&ni->ni_rx_ampdu[tid] == rap) 7402 break; 7403 } 7404 7405 memset(&node, 0, sizeof node); 7406 node.id = wn->id; 7407 node.control = IWN_NODE_UPDATE; 7408 node.flags = IWN_FLAG_SET_DELBA; 7409 node.delba_tid = tid; 7410 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 7411 (void)ops->add_node(sc, &node, 1); 7412 sc->sc_ampdu_rx_stop(ni, rap); 7413 } 7414 7415 static int 7416 iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 7417 int dialogtoken, int baparamset, int batimeout) 7418 { 7419 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7420 int qid; 7421 7422 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7423 7424 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 7425 if (sc->qid2tap[qid] == NULL) 7426 break; 7427 } 7428 if (qid == sc->ntxqs) { 7429 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 7430 __func__); 7431 return 0; 7432 } 7433 tap->txa_private = kmalloc(sizeof(int), M_DEVBUF, M_INTWAIT); 7434 if (tap->txa_private == NULL) { 7435 device_printf(sc->sc_dev, 7436 "%s: failed to alloc TX aggregation structure\n", __func__); 7437 return 0; 7438 } 7439 sc->qid2tap[qid] = tap; 7440 *(int *)tap->txa_private = qid; 7441 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 7442 batimeout); 7443 } 7444 7445 static int 7446 iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 7447 int code, int baparamset, int batimeout) 7448 { 7449 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7450 int qid = *(int *)tap->txa_private; 7451 uint8_t tid = tap->txa_tid; 7452 int ret; 7453 7454 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7455 7456 if (code == IEEE80211_STATUS_SUCCESS) { 7457 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 7458 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 7459 if (ret != 1) 7460 return ret; 7461 } else { 7462 sc->qid2tap[qid] = NULL; 7463 kfree(tap->txa_private, M_DEVBUF); 7464 tap->txa_private = NULL; 7465 } 7466 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 7467 } 7468 7469 /* 7470 * This function is called by upper layer when an ADDBA response is received 7471 * from another STA. 7472 */ 7473 static int 7474 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 7475 uint8_t tid) 7476 { 7477 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 7478 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7479 struct iwn_ops *ops = &sc->ops; 7480 struct iwn_node *wn = (void *)ni; 7481 struct iwn_node_info node; 7482 int error, qid; 7483 7484 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7485 7486 /* Enable TX for the specified RA/TID. */ 7487 wn->disable_tid &= ~(1 << tid); 7488 memset(&node, 0, sizeof node); 7489 node.id = wn->id; 7490 node.control = IWN_NODE_UPDATE; 7491 node.flags = IWN_FLAG_SET_DISABLE_TID; 7492 node.disable_tid = htole16(wn->disable_tid); 7493 error = ops->add_node(sc, &node, 1); 7494 if (error != 0) 7495 return 0; 7496 7497 if ((error = iwn_nic_lock(sc)) != 0) 7498 return 0; 7499 qid = *(int *)tap->txa_private; 7500 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 7501 __func__, wn->id, tid, tap->txa_start, qid); 7502 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 7503 iwn_nic_unlock(sc); 7504 7505 iwn_set_link_quality(sc, ni); 7506 return 1; 7507 } 7508 7509 static void 7510 iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 7511 { 7512 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7513 struct iwn_ops *ops = &sc->ops; 7514 uint8_t tid = tap->txa_tid; 7515 int qid; 7516 7517 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7518 7519 sc->sc_addba_stop(ni, tap); 7520 7521 if (tap->txa_private == NULL) 7522 return; 7523 7524 qid = *(int *)tap->txa_private; 7525 if (sc->txq[qid].queued != 0) 7526 return; 7527 if (iwn_nic_lock(sc) != 0) 7528 return; 7529 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 7530 iwn_nic_unlock(sc); 7531 sc->qid2tap[qid] = NULL; 7532 kfree(tap->txa_private, M_DEVBUF); 7533 tap->txa_private = NULL; 7534 } 7535 7536 static void 7537 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7538 int qid, uint8_t tid, uint16_t ssn) 7539 { 7540 struct iwn_node *wn = (void *)ni; 7541 7542 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7543 7544 /* Stop TX scheduler while we're changing its configuration. */ 7545 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7546 IWN4965_TXQ_STATUS_CHGACT); 7547 7548 /* Assign RA/TID translation to the queue. */ 7549 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 7550 wn->id << 4 | tid); 7551 7552 /* Enable chain-building mode for the queue. */ 7553 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 7554 7555 /* Set starting sequence number from the ADDBA request. */ 7556 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7557 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7558 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7559 7560 /* Set scheduler window size. */ 7561 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 7562 IWN_SCHED_WINSZ); 7563 /* Set scheduler frame limit. */ 7564 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7565 IWN_SCHED_LIMIT << 16); 7566 7567 /* Enable interrupts for the queue. */ 7568 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7569 7570 /* Mark the queue as active. */ 7571 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7572 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 7573 iwn_tid2fifo[tid] << 1); 7574 } 7575 7576 static void 7577 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7578 { 7579 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7580 7581 /* Stop TX scheduler while we're changing its configuration. */ 7582 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7583 IWN4965_TXQ_STATUS_CHGACT); 7584 7585 /* Set starting sequence number from the ADDBA request. */ 7586 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7587 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7588 7589 /* Disable interrupts for the queue. */ 7590 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7591 7592 /* Mark the queue as inactive. */ 7593 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7594 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 7595 } 7596 7597 static void 7598 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7599 int qid, uint8_t tid, uint16_t ssn) 7600 { 7601 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7602 7603 struct iwn_node *wn = (void *)ni; 7604 7605 /* Stop TX scheduler while we're changing its configuration. */ 7606 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7607 IWN5000_TXQ_STATUS_CHGACT); 7608 7609 /* Assign RA/TID translation to the queue. */ 7610 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 7611 wn->id << 4 | tid); 7612 7613 /* Enable chain-building mode for the queue. */ 7614 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 7615 7616 /* Enable aggregation for the queue. */ 7617 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7618 7619 /* Set starting sequence number from the ADDBA request. */ 7620 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7621 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7622 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7623 7624 /* Set scheduler window size and frame limit. */ 7625 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7626 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7627 7628 /* Enable interrupts for the queue. */ 7629 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7630 7631 /* Mark the queue as active. */ 7632 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7633 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 7634 } 7635 7636 static void 7637 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7638 { 7639 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7640 7641 /* Stop TX scheduler while we're changing its configuration. */ 7642 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7643 IWN5000_TXQ_STATUS_CHGACT); 7644 7645 /* Disable aggregation for the queue. */ 7646 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7647 7648 /* Set starting sequence number from the ADDBA request. */ 7649 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7650 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7651 7652 /* Disable interrupts for the queue. */ 7653 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7654 7655 /* Mark the queue as inactive. */ 7656 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7657 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 7658 } 7659 7660 /* 7661 * Query calibration tables from the initialization firmware. We do this 7662 * only once at first boot. Called from a process context. 7663 */ 7664 static int 7665 iwn5000_query_calibration(struct iwn_softc *sc) 7666 { 7667 struct iwn5000_calib_config cmd; 7668 int error; 7669 7670 memset(&cmd, 0, sizeof cmd); 7671 cmd.ucode.once.enable = htole32(0xffffffff); 7672 cmd.ucode.once.start = htole32(0xffffffff); 7673 cmd.ucode.once.send = htole32(0xffffffff); 7674 cmd.ucode.flags = htole32(0xffffffff); 7675 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 7676 __func__); 7677 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 7678 if (error != 0) 7679 return error; 7680 7681 /* Wait at most two seconds for calibration to complete. */ 7682 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 7683 #if defined(__DragonFly__) 7684 error = lksleep(sc, &sc->sc_lk, PCATCH, "iwncal", 2 * hz); 7685 #else 7686 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); 7687 #endif 7688 return error; 7689 } 7690 7691 /* 7692 * Send calibration results to the runtime firmware. These results were 7693 * obtained on first boot from the initialization firmware. 7694 */ 7695 static int 7696 iwn5000_send_calibration(struct iwn_softc *sc) 7697 { 7698 int idx, error; 7699 7700 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { 7701 if (!(sc->base_params->calib_need & (1<<idx))) { 7702 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7703 "No need of calib %d\n", 7704 idx); 7705 continue; /* no need for this calib */ 7706 } 7707 if (sc->calibcmd[idx].buf == NULL) { 7708 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7709 "Need calib idx : %d but no available data\n", 7710 idx); 7711 continue; 7712 } 7713 7714 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7715 "send calibration result idx=%d len=%d\n", idx, 7716 sc->calibcmd[idx].len); 7717 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 7718 sc->calibcmd[idx].len, 0); 7719 if (error != 0) { 7720 device_printf(sc->sc_dev, 7721 "%s: could not send calibration result, error %d\n", 7722 __func__, error); 7723 return error; 7724 } 7725 } 7726 return 0; 7727 } 7728 7729 static int 7730 iwn5000_send_wimax_coex(struct iwn_softc *sc) 7731 { 7732 struct iwn5000_wimax_coex wimax; 7733 7734 #if 0 7735 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 7736 /* Enable WiMAX coexistence for combo adapters. */ 7737 wimax.flags = 7738 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 7739 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 7740 IWN_WIMAX_COEX_STA_TABLE_VALID | 7741 IWN_WIMAX_COEX_ENABLE; 7742 memcpy(wimax.events, iwn6050_wimax_events, 7743 sizeof iwn6050_wimax_events); 7744 } else 7745 #endif 7746 { 7747 /* Disable WiMAX coexistence. */ 7748 wimax.flags = 0; 7749 memset(wimax.events, 0, sizeof wimax.events); 7750 } 7751 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 7752 __func__); 7753 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 7754 } 7755 7756 static int 7757 iwn5000_crystal_calib(struct iwn_softc *sc) 7758 { 7759 struct iwn5000_phy_calib_crystal cmd; 7760 7761 memset(&cmd, 0, sizeof cmd); 7762 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 7763 cmd.ngroups = 1; 7764 cmd.isvalid = 1; 7765 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 7766 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 7767 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 7768 cmd.cap_pin[0], cmd.cap_pin[1]); 7769 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7770 } 7771 7772 static int 7773 iwn5000_temp_offset_calib(struct iwn_softc *sc) 7774 { 7775 struct iwn5000_phy_calib_temp_offset cmd; 7776 7777 memset(&cmd, 0, sizeof cmd); 7778 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7779 cmd.ngroups = 1; 7780 cmd.isvalid = 1; 7781 if (sc->eeprom_temp != 0) 7782 cmd.offset = htole16(sc->eeprom_temp); 7783 else 7784 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 7785 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 7786 le16toh(cmd.offset)); 7787 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7788 } 7789 7790 static int 7791 iwn5000_temp_offset_calibv2(struct iwn_softc *sc) 7792 { 7793 struct iwn5000_phy_calib_temp_offsetv2 cmd; 7794 7795 memset(&cmd, 0, sizeof cmd); 7796 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7797 cmd.ngroups = 1; 7798 cmd.isvalid = 1; 7799 if (sc->eeprom_temp != 0) { 7800 cmd.offset_low = htole16(sc->eeprom_temp); 7801 cmd.offset_high = htole16(sc->eeprom_temp_high); 7802 } else { 7803 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 7804 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 7805 } 7806 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 7807 7808 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7809 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", 7810 le16toh(cmd.offset_low), 7811 le16toh(cmd.offset_high), 7812 le16toh(cmd.burnt_voltage_ref)); 7813 7814 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7815 } 7816 7817 /* 7818 * This function is called after the runtime firmware notifies us of its 7819 * readiness (called in a process context). 7820 */ 7821 static int 7822 iwn4965_post_alive(struct iwn_softc *sc) 7823 { 7824 int error, qid; 7825 7826 if ((error = iwn_nic_lock(sc)) != 0) 7827 return error; 7828 7829 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7830 7831 /* Clear TX scheduler state in SRAM. */ 7832 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7833 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 7834 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 7835 7836 /* Set physical address of TX scheduler rings (1KB aligned). */ 7837 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7838 7839 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7840 7841 /* Disable chain mode for all our 16 queues. */ 7842 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 7843 7844 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 7845 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 7846 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7847 7848 /* Set scheduler window size. */ 7849 iwn_mem_write(sc, sc->sched_base + 7850 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 7851 /* Set scheduler frame limit. */ 7852 iwn_mem_write(sc, sc->sched_base + 7853 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7854 IWN_SCHED_LIMIT << 16); 7855 } 7856 7857 /* Enable interrupts for all our 16 queues. */ 7858 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 7859 /* Identify TX FIFO rings (0-7). */ 7860 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 7861 7862 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7863 for (qid = 0; qid < 7; qid++) { 7864 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 7865 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7866 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 7867 } 7868 iwn_nic_unlock(sc); 7869 return 0; 7870 } 7871 7872 /* 7873 * This function is called after the initialization or runtime firmware 7874 * notifies us of its readiness (called in a process context). 7875 */ 7876 static int 7877 iwn5000_post_alive(struct iwn_softc *sc) 7878 { 7879 int error, qid; 7880 7881 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7882 7883 /* Switch to using ICT interrupt mode. */ 7884 iwn5000_ict_reset(sc); 7885 7886 if ((error = iwn_nic_lock(sc)) != 0){ 7887 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 7888 return error; 7889 } 7890 7891 /* Clear TX scheduler state in SRAM. */ 7892 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7893 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 7894 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 7895 7896 /* Set physical address of TX scheduler rings (1KB aligned). */ 7897 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7898 7899 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7900 7901 /* Enable chain mode for all queues, except command queue. */ 7902 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 7903 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); 7904 else 7905 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 7906 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 7907 7908 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 7909 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 7910 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7911 7912 iwn_mem_write(sc, sc->sched_base + 7913 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 7914 /* Set scheduler window size and frame limit. */ 7915 iwn_mem_write(sc, sc->sched_base + 7916 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7917 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7918 } 7919 7920 /* Enable interrupts for all our 20 queues. */ 7921 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 7922 /* Identify TX FIFO rings (0-7). */ 7923 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 7924 7925 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7926 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { 7927 /* Mark TX rings as active. */ 7928 for (qid = 0; qid < 11; qid++) { 7929 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; 7930 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7931 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7932 } 7933 } else { 7934 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7935 for (qid = 0; qid < 7; qid++) { 7936 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 7937 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7938 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 7939 } 7940 } 7941 iwn_nic_unlock(sc); 7942 7943 /* Configure WiMAX coexistence for combo adapters. */ 7944 error = iwn5000_send_wimax_coex(sc); 7945 if (error != 0) { 7946 device_printf(sc->sc_dev, 7947 "%s: could not configure WiMAX coexistence, error %d\n", 7948 __func__, error); 7949 return error; 7950 } 7951 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 7952 /* Perform crystal calibration. */ 7953 error = iwn5000_crystal_calib(sc); 7954 if (error != 0) { 7955 device_printf(sc->sc_dev, 7956 "%s: crystal calibration failed, error %d\n", 7957 __func__, error); 7958 return error; 7959 } 7960 } 7961 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 7962 /* Query calibration from the initialization firmware. */ 7963 if ((error = iwn5000_query_calibration(sc)) != 0) { 7964 device_printf(sc->sc_dev, 7965 "%s: could not query calibration, error %d\n", 7966 __func__, error); 7967 return error; 7968 } 7969 /* 7970 * We have the calibration results now, reboot with the 7971 * runtime firmware (call ourselves recursively!) 7972 */ 7973 iwn_hw_stop(sc); 7974 error = iwn_hw_init(sc); 7975 } else { 7976 /* Send calibration results to runtime firmware. */ 7977 error = iwn5000_send_calibration(sc); 7978 } 7979 7980 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7981 7982 return error; 7983 } 7984 7985 /* 7986 * The firmware boot code is small and is intended to be copied directly into 7987 * the NIC internal memory (no DMA transfer). 7988 */ 7989 static int 7990 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 7991 { 7992 int error, ntries; 7993 7994 size /= sizeof (uint32_t); 7995 7996 if ((error = iwn_nic_lock(sc)) != 0) 7997 return error; 7998 7999 /* Copy microcode image into NIC memory. */ 8000 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 8001 (const uint32_t *)ucode, size); 8002 8003 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 8004 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 8005 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 8006 8007 /* Start boot load now. */ 8008 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 8009 8010 /* Wait for transfer to complete. */ 8011 for (ntries = 0; ntries < 1000; ntries++) { 8012 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 8013 IWN_BSM_WR_CTRL_START)) 8014 break; 8015 DELAY(10); 8016 } 8017 if (ntries == 1000) { 8018 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 8019 __func__); 8020 iwn_nic_unlock(sc); 8021 return ETIMEDOUT; 8022 } 8023 8024 /* Enable boot after power up. */ 8025 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 8026 8027 iwn_nic_unlock(sc); 8028 return 0; 8029 } 8030 8031 static int 8032 iwn4965_load_firmware(struct iwn_softc *sc) 8033 { 8034 struct iwn_fw_info *fw = &sc->fw; 8035 struct iwn_dma_info *dma = &sc->fw_dma; 8036 int error; 8037 8038 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 8039 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 8040 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8041 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 8042 fw->init.text, fw->init.textsz); 8043 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8044 8045 /* Tell adapter where to find initialization sections. */ 8046 if ((error = iwn_nic_lock(sc)) != 0) 8047 return error; 8048 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 8049 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 8050 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 8051 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 8052 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 8053 iwn_nic_unlock(sc); 8054 8055 /* Load firmware boot code. */ 8056 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 8057 if (error != 0) { 8058 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 8059 __func__); 8060 return error; 8061 } 8062 /* Now press "execute". */ 8063 IWN_WRITE(sc, IWN_RESET, 0); 8064 8065 /* Wait at most one second for first alive notification. */ 8066 #if defined(__DragonFly__) 8067 if ((error = lksleep(sc, &sc->sc_lk, PCATCH, "iwninit", hz)) != 0) { 8068 #else 8069 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 8070 #endif 8071 device_printf(sc->sc_dev, 8072 "%s: timeout waiting for adapter to initialize, error %d\n", 8073 __func__, error); 8074 return error; 8075 } 8076 8077 /* Retrieve current temperature for initial TX power calibration. */ 8078 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 8079 sc->temp = iwn4965_get_temperature(sc); 8080 8081 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 8082 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 8083 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8084 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 8085 fw->main.text, fw->main.textsz); 8086 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8087 8088 /* Tell adapter where to find runtime sections. */ 8089 if ((error = iwn_nic_lock(sc)) != 0) 8090 return error; 8091 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 8092 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 8093 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 8094 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 8095 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 8096 IWN_FW_UPDATED | fw->main.textsz); 8097 iwn_nic_unlock(sc); 8098 8099 return 0; 8100 } 8101 8102 static int 8103 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 8104 const uint8_t *section, int size) 8105 { 8106 struct iwn_dma_info *dma = &sc->fw_dma; 8107 int error; 8108 8109 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8110 8111 /* Copy firmware section into pre-allocated DMA-safe memory. */ 8112 memcpy(dma->vaddr, section, size); 8113 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8114 8115 if ((error = iwn_nic_lock(sc)) != 0) 8116 return error; 8117 8118 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 8119 IWN_FH_TX_CONFIG_DMA_PAUSE); 8120 8121 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 8122 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 8123 IWN_LOADDR(dma->paddr)); 8124 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 8125 IWN_HIADDR(dma->paddr) << 28 | size); 8126 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 8127 IWN_FH_TXBUF_STATUS_TBNUM(1) | 8128 IWN_FH_TXBUF_STATUS_TBIDX(1) | 8129 IWN_FH_TXBUF_STATUS_TFBD_VALID); 8130 8131 /* Kick Flow Handler to start DMA transfer. */ 8132 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 8133 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 8134 8135 iwn_nic_unlock(sc); 8136 8137 /* Wait at most five seconds for FH DMA transfer to complete. */ 8138 #if defined(__DragonFly__) 8139 return lksleep(sc, &sc->sc_lk, PCATCH, "iwninit", 5 * hz); 8140 #else 8141 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); 8142 #endif 8143 } 8144 8145 static int 8146 iwn5000_load_firmware(struct iwn_softc *sc) 8147 { 8148 struct iwn_fw_part *fw; 8149 int error; 8150 8151 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8152 8153 /* Load the initialization firmware on first boot only. */ 8154 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 8155 &sc->fw.main : &sc->fw.init; 8156 8157 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 8158 fw->text, fw->textsz); 8159 if (error != 0) { 8160 device_printf(sc->sc_dev, 8161 "%s: could not load firmware %s section, error %d\n", 8162 __func__, ".text", error); 8163 return error; 8164 } 8165 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 8166 fw->data, fw->datasz); 8167 if (error != 0) { 8168 device_printf(sc->sc_dev, 8169 "%s: could not load firmware %s section, error %d\n", 8170 __func__, ".data", error); 8171 return error; 8172 } 8173 8174 /* Now press "execute". */ 8175 IWN_WRITE(sc, IWN_RESET, 0); 8176 return 0; 8177 } 8178 8179 /* 8180 * Extract text and data sections from a legacy firmware image. 8181 */ 8182 static int 8183 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 8184 { 8185 const uint32_t *ptr; 8186 size_t hdrlen = 24; 8187 uint32_t rev; 8188 8189 ptr = (const uint32_t *)fw->data; 8190 rev = le32toh(*ptr++); 8191 8192 sc->ucode_rev = rev; 8193 8194 /* Check firmware API version. */ 8195 if (IWN_FW_API(rev) <= 1) { 8196 device_printf(sc->sc_dev, 8197 "%s: bad firmware, need API version >=2\n", __func__); 8198 return EINVAL; 8199 } 8200 if (IWN_FW_API(rev) >= 3) { 8201 /* Skip build number (version 2 header). */ 8202 hdrlen += 4; 8203 ptr++; 8204 } 8205 if (fw->size < hdrlen) { 8206 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8207 __func__, fw->size); 8208 return EINVAL; 8209 } 8210 fw->main.textsz = le32toh(*ptr++); 8211 fw->main.datasz = le32toh(*ptr++); 8212 fw->init.textsz = le32toh(*ptr++); 8213 fw->init.datasz = le32toh(*ptr++); 8214 fw->boot.textsz = le32toh(*ptr++); 8215 8216 /* Check that all firmware sections fit. */ 8217 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 8218 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 8219 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8220 __func__, fw->size); 8221 return EINVAL; 8222 } 8223 8224 /* Get pointers to firmware sections. */ 8225 fw->main.text = (const uint8_t *)ptr; 8226 fw->main.data = fw->main.text + fw->main.textsz; 8227 fw->init.text = fw->main.data + fw->main.datasz; 8228 fw->init.data = fw->init.text + fw->init.textsz; 8229 fw->boot.text = fw->init.data + fw->init.datasz; 8230 return 0; 8231 } 8232 8233 /* 8234 * Extract text and data sections from a TLV firmware image. 8235 */ 8236 static int 8237 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 8238 uint16_t alt) 8239 { 8240 const struct iwn_fw_tlv_hdr *hdr; 8241 const struct iwn_fw_tlv *tlv; 8242 const uint8_t *ptr, *end; 8243 uint64_t altmask; 8244 uint32_t len, tmp; 8245 8246 if (fw->size < sizeof (*hdr)) { 8247 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8248 __func__, fw->size); 8249 return EINVAL; 8250 } 8251 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 8252 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 8253 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 8254 __func__, le32toh(hdr->signature)); 8255 return EINVAL; 8256 } 8257 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 8258 le32toh(hdr->build)); 8259 sc->ucode_rev = le32toh(hdr->rev); 8260 8261 /* 8262 * Select the closest supported alternative that is less than 8263 * or equal to the specified one. 8264 */ 8265 altmask = le64toh(hdr->altmask); 8266 while (alt > 0 && !(altmask & (1ULL << alt))) 8267 alt--; /* Downgrade. */ 8268 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 8269 8270 ptr = (const uint8_t *)(hdr + 1); 8271 end = (const uint8_t *)(fw->data + fw->size); 8272 8273 /* Parse type-length-value fields. */ 8274 while (ptr + sizeof (*tlv) <= end) { 8275 tlv = (const struct iwn_fw_tlv *)ptr; 8276 len = le32toh(tlv->len); 8277 8278 ptr += sizeof (*tlv); 8279 if (ptr + len > end) { 8280 device_printf(sc->sc_dev, 8281 "%s: firmware too short: %zu bytes\n", __func__, 8282 fw->size); 8283 return EINVAL; 8284 } 8285 /* Skip other alternatives. */ 8286 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 8287 goto next; 8288 8289 switch (le16toh(tlv->type)) { 8290 case IWN_FW_TLV_MAIN_TEXT: 8291 fw->main.text = ptr; 8292 fw->main.textsz = len; 8293 break; 8294 case IWN_FW_TLV_MAIN_DATA: 8295 fw->main.data = ptr; 8296 fw->main.datasz = len; 8297 break; 8298 case IWN_FW_TLV_INIT_TEXT: 8299 fw->init.text = ptr; 8300 fw->init.textsz = len; 8301 break; 8302 case IWN_FW_TLV_INIT_DATA: 8303 fw->init.data = ptr; 8304 fw->init.datasz = len; 8305 break; 8306 case IWN_FW_TLV_BOOT_TEXT: 8307 fw->boot.text = ptr; 8308 fw->boot.textsz = len; 8309 break; 8310 case IWN_FW_TLV_ENH_SENS: 8311 if (!len) 8312 sc->sc_flags |= IWN_FLAG_ENH_SENS; 8313 break; 8314 case IWN_FW_TLV_PHY_CALIB: 8315 tmp = le32toh(*ptr); 8316 if (tmp < 253) { 8317 sc->reset_noise_gain = tmp; 8318 sc->noise_gain = tmp + 1; 8319 } 8320 break; 8321 case IWN_FW_TLV_PAN: 8322 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; 8323 DPRINTF(sc, IWN_DEBUG_RESET, 8324 "PAN Support found: %d\n", 1); 8325 break; 8326 case IWN_FW_TLV_FLAGS: 8327 if (len < sizeof(uint32_t)) 8328 break; 8329 if (len % sizeof(uint32_t)) 8330 break; 8331 sc->tlv_feature_flags = le32toh(*ptr); 8332 DPRINTF(sc, IWN_DEBUG_RESET, 8333 "%s: feature: 0x%08x\n", 8334 __func__, 8335 sc->tlv_feature_flags); 8336 break; 8337 case IWN_FW_TLV_PBREQ_MAXLEN: 8338 case IWN_FW_TLV_RUNT_EVTLOG_PTR: 8339 case IWN_FW_TLV_RUNT_EVTLOG_SIZE: 8340 case IWN_FW_TLV_RUNT_ERRLOG_PTR: 8341 case IWN_FW_TLV_INIT_EVTLOG_PTR: 8342 case IWN_FW_TLV_INIT_EVTLOG_SIZE: 8343 case IWN_FW_TLV_INIT_ERRLOG_PTR: 8344 case IWN_FW_TLV_WOWLAN_INST: 8345 case IWN_FW_TLV_WOWLAN_DATA: 8346 DPRINTF(sc, IWN_DEBUG_RESET, 8347 "TLV type %d recognized but not handled\n", 8348 le16toh(tlv->type)); 8349 break; 8350 default: 8351 DPRINTF(sc, IWN_DEBUG_RESET, 8352 "TLV type %d not handled\n", le16toh(tlv->type)); 8353 break; 8354 } 8355 next: /* TLV fields are 32-bit aligned. */ 8356 ptr += (len + 3) & ~3; 8357 } 8358 return 0; 8359 } 8360 8361 static int 8362 iwn_read_firmware(struct iwn_softc *sc) 8363 { 8364 struct iwn_fw_info *fw = &sc->fw; 8365 int error; 8366 8367 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8368 8369 IWN_UNLOCK(sc); 8370 8371 memset(fw, 0, sizeof (*fw)); 8372 8373 /* Read firmware image from filesystem. */ 8374 sc->fw_fp = firmware_get(sc->fwname); 8375 if (sc->fw_fp == NULL) { 8376 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 8377 __func__, sc->fwname); 8378 IWN_LOCK(sc); 8379 return EINVAL; 8380 } 8381 IWN_LOCK(sc); 8382 8383 fw->size = sc->fw_fp->datasize; 8384 fw->data = (const uint8_t *)sc->fw_fp->data; 8385 if (fw->size < sizeof (uint32_t)) { 8386 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8387 __func__, fw->size); 8388 error = EINVAL; 8389 goto fail; 8390 } 8391 8392 /* Retrieve text and data sections. */ 8393 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 8394 error = iwn_read_firmware_leg(sc, fw); 8395 else 8396 error = iwn_read_firmware_tlv(sc, fw, 1); 8397 if (error != 0) { 8398 device_printf(sc->sc_dev, 8399 "%s: could not read firmware sections, error %d\n", 8400 __func__, error); 8401 goto fail; 8402 } 8403 8404 device_printf(sc->sc_dev, "%s: ucode rev=0x%08x\n", __func__, sc->ucode_rev); 8405 8406 /* Make sure text and data sections fit in hardware memory. */ 8407 if (fw->main.textsz > sc->fw_text_maxsz || 8408 fw->main.datasz > sc->fw_data_maxsz || 8409 fw->init.textsz > sc->fw_text_maxsz || 8410 fw->init.datasz > sc->fw_data_maxsz || 8411 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 8412 (fw->boot.textsz & 3) != 0) { 8413 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 8414 __func__); 8415 error = EINVAL; 8416 goto fail; 8417 } 8418 8419 /* We can proceed with loading the firmware. */ 8420 return 0; 8421 8422 fail: iwn_unload_firmware(sc); 8423 return error; 8424 } 8425 8426 static void 8427 iwn_unload_firmware(struct iwn_softc *sc) 8428 { 8429 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8430 sc->fw_fp = NULL; 8431 } 8432 8433 static int 8434 iwn_clock_wait(struct iwn_softc *sc) 8435 { 8436 int ntries; 8437 8438 /* Set "initialization complete" bit. */ 8439 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 8440 8441 /* Wait for clock stabilization. */ 8442 for (ntries = 0; ntries < 2500; ntries++) { 8443 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 8444 return 0; 8445 DELAY(10); 8446 } 8447 device_printf(sc->sc_dev, 8448 "%s: timeout waiting for clock stabilization\n", __func__); 8449 return ETIMEDOUT; 8450 } 8451 8452 static int 8453 iwn_apm_init(struct iwn_softc *sc) 8454 { 8455 uint32_t reg; 8456 int error; 8457 8458 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8459 8460 /* Disable L0s exit timer (NMI bug workaround). */ 8461 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 8462 /* Don't wait for ICH L0s (ICH bug workaround). */ 8463 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 8464 8465 /* Set FH wait threshold to max (HW bug under stress workaround). */ 8466 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 8467 8468 /* Enable HAP INTA to move adapter from L1a to L0s. */ 8469 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 8470 8471 /* Retrieve PCIe Active State Power Management (ASPM). */ 8472 #if defined(__DragonFly__) 8473 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINKCTRL, 4); 8474 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 8475 if (reg & PCIEM_LNKCTL_ASPM_L1) /* L1 Entry enabled. */ 8476 #else 8477 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 4); 8478 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 8479 if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ 8480 #endif 8481 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 8482 else 8483 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 8484 8485 if (sc->base_params->pll_cfg_val) 8486 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); 8487 8488 /* Wait for clock stabilization before accessing prph. */ 8489 if ((error = iwn_clock_wait(sc)) != 0) 8490 return error; 8491 8492 if ((error = iwn_nic_lock(sc)) != 0) 8493 return error; 8494 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 8495 /* Enable DMA and BSM (Bootstrap State Machine). */ 8496 iwn_prph_write(sc, IWN_APMG_CLK_EN, 8497 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 8498 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 8499 } else { 8500 /* Enable DMA. */ 8501 iwn_prph_write(sc, IWN_APMG_CLK_EN, 8502 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8503 } 8504 DELAY(20); 8505 /* Disable L1-Active. */ 8506 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 8507 iwn_nic_unlock(sc); 8508 8509 return 0; 8510 } 8511 8512 static void 8513 iwn_apm_stop_master(struct iwn_softc *sc) 8514 { 8515 int ntries; 8516 8517 /* Stop busmaster DMA activity. */ 8518 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 8519 for (ntries = 0; ntries < 100; ntries++) { 8520 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 8521 return; 8522 DELAY(10); 8523 } 8524 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 8525 } 8526 8527 static void 8528 iwn_apm_stop(struct iwn_softc *sc) 8529 { 8530 iwn_apm_stop_master(sc); 8531 8532 /* Reset the entire device. */ 8533 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 8534 DELAY(10); 8535 /* Clear "initialization complete" bit. */ 8536 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 8537 } 8538 8539 static int 8540 iwn4965_nic_config(struct iwn_softc *sc) 8541 { 8542 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8543 8544 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 8545 /* 8546 * I don't believe this to be correct but this is what the 8547 * vendor driver is doing. Probably the bits should not be 8548 * shifted in IWN_RFCFG_*. 8549 */ 8550 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8551 IWN_RFCFG_TYPE(sc->rfcfg) | 8552 IWN_RFCFG_STEP(sc->rfcfg) | 8553 IWN_RFCFG_DASH(sc->rfcfg)); 8554 } 8555 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8556 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8557 return 0; 8558 } 8559 8560 static int 8561 iwn5000_nic_config(struct iwn_softc *sc) 8562 { 8563 uint32_t tmp; 8564 int error; 8565 8566 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8567 8568 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 8569 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8570 IWN_RFCFG_TYPE(sc->rfcfg) | 8571 IWN_RFCFG_STEP(sc->rfcfg) | 8572 IWN_RFCFG_DASH(sc->rfcfg)); 8573 } 8574 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8575 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8576 8577 if ((error = iwn_nic_lock(sc)) != 0) 8578 return error; 8579 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 8580 8581 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 8582 /* 8583 * Select first Switching Voltage Regulator (1.32V) to 8584 * solve a stability issue related to noisy DC2DC line 8585 * in the silicon of 1000 Series. 8586 */ 8587 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 8588 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 8589 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 8590 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 8591 } 8592 iwn_nic_unlock(sc); 8593 8594 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 8595 /* Use internal power amplifier only. */ 8596 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 8597 } 8598 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { 8599 /* Indicate that ROM calibration version is >=6. */ 8600 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 8601 } 8602 if (sc->base_params->additional_gp_drv_bit) 8603 IWN_SETBITS(sc, IWN_GP_DRIVER, 8604 sc->base_params->additional_gp_drv_bit); 8605 return 0; 8606 } 8607 8608 /* 8609 * Take NIC ownership over Intel Active Management Technology (AMT). 8610 */ 8611 static int 8612 iwn_hw_prepare(struct iwn_softc *sc) 8613 { 8614 int ntries; 8615 8616 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8617 8618 /* Check if hardware is ready. */ 8619 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8620 for (ntries = 0; ntries < 5; ntries++) { 8621 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8622 IWN_HW_IF_CONFIG_NIC_READY) 8623 return 0; 8624 DELAY(10); 8625 } 8626 8627 /* Hardware not ready, force into ready state. */ 8628 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 8629 for (ntries = 0; ntries < 15000; ntries++) { 8630 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 8631 IWN_HW_IF_CONFIG_PREPARE_DONE)) 8632 break; 8633 DELAY(10); 8634 } 8635 if (ntries == 15000) 8636 return ETIMEDOUT; 8637 8638 /* Hardware should be ready now. */ 8639 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8640 for (ntries = 0; ntries < 5; ntries++) { 8641 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8642 IWN_HW_IF_CONFIG_NIC_READY) 8643 return 0; 8644 DELAY(10); 8645 } 8646 return ETIMEDOUT; 8647 } 8648 8649 static int 8650 iwn_hw_init(struct iwn_softc *sc) 8651 { 8652 struct iwn_ops *ops = &sc->ops; 8653 int error, chnl, qid; 8654 8655 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8656 8657 /* Clear pending interrupts. */ 8658 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8659 8660 if ((error = iwn_apm_init(sc)) != 0) { 8661 device_printf(sc->sc_dev, 8662 "%s: could not power ON adapter, error %d\n", __func__, 8663 error); 8664 return error; 8665 } 8666 8667 /* Select VMAIN power source. */ 8668 if ((error = iwn_nic_lock(sc)) != 0) 8669 return error; 8670 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 8671 iwn_nic_unlock(sc); 8672 8673 /* Perform adapter-specific initialization. */ 8674 if ((error = ops->nic_config(sc)) != 0) 8675 return error; 8676 8677 /* Initialize RX ring. */ 8678 if ((error = iwn_nic_lock(sc)) != 0) 8679 return error; 8680 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 8681 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 8682 /* Set physical address of RX ring (256-byte aligned). */ 8683 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 8684 /* Set physical address of RX status (16-byte aligned). */ 8685 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 8686 /* Enable RX. */ 8687 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 8688 IWN_FH_RX_CONFIG_ENA | 8689 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 8690 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 8691 IWN_FH_RX_CONFIG_SINGLE_FRAME | 8692 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 8693 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 8694 iwn_nic_unlock(sc); 8695 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 8696 8697 if ((error = iwn_nic_lock(sc)) != 0) 8698 return error; 8699 8700 /* Initialize TX scheduler. */ 8701 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8702 8703 /* Set physical address of "keep warm" page (16-byte aligned). */ 8704 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 8705 8706 /* Initialize TX rings. */ 8707 for (qid = 0; qid < sc->ntxqs; qid++) { 8708 struct iwn_tx_ring *txq = &sc->txq[qid]; 8709 8710 /* Set physical address of TX ring (256-byte aligned). */ 8711 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 8712 txq->desc_dma.paddr >> 8); 8713 } 8714 iwn_nic_unlock(sc); 8715 8716 /* Enable DMA channels. */ 8717 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8718 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 8719 IWN_FH_TX_CONFIG_DMA_ENA | 8720 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 8721 } 8722 8723 /* Clear "radio off" and "commands blocked" bits. */ 8724 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8725 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 8726 8727 /* Clear pending interrupts. */ 8728 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8729 /* Enable interrupt coalescing. */ 8730 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 8731 /* Enable interrupts. */ 8732 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8733 8734 /* _Really_ make sure "radio off" bit is cleared! */ 8735 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8736 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8737 8738 /* Enable shadow registers. */ 8739 if (sc->base_params->shadow_reg_enable) 8740 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 8741 8742 if ((error = ops->load_firmware(sc)) != 0) { 8743 device_printf(sc->sc_dev, 8744 "%s: could not load firmware, error %d\n", __func__, 8745 error); 8746 return error; 8747 } 8748 /* Wait at most one second for firmware alive notification. */ 8749 #if defined(__DragonFly__) 8750 if ((error = lksleep(sc, &sc->sc_lk, PCATCH, "iwninit", hz)) != 0) { 8751 #else 8752 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 8753 #endif 8754 device_printf(sc->sc_dev, 8755 "%s: timeout waiting for adapter to initialize, error %d\n", 8756 __func__, error); 8757 return error; 8758 } 8759 /* Do post-firmware initialization. */ 8760 8761 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8762 8763 return ops->post_alive(sc); 8764 } 8765 8766 static void 8767 iwn_hw_stop(struct iwn_softc *sc) 8768 { 8769 int chnl, qid, ntries; 8770 8771 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8772 8773 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 8774 8775 /* Disable interrupts. */ 8776 IWN_WRITE(sc, IWN_INT_MASK, 0); 8777 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8778 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 8779 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8780 8781 /* Make sure we no longer hold the NIC lock. */ 8782 iwn_nic_unlock(sc); 8783 8784 /* Stop TX scheduler. */ 8785 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8786 8787 /* Stop all DMA channels. */ 8788 if (iwn_nic_lock(sc) == 0) { 8789 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8790 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 8791 for (ntries = 0; ntries < 200; ntries++) { 8792 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 8793 IWN_FH_TX_STATUS_IDLE(chnl)) 8794 break; 8795 DELAY(10); 8796 } 8797 } 8798 iwn_nic_unlock(sc); 8799 } 8800 8801 /* Stop RX ring. */ 8802 iwn_reset_rx_ring(sc, &sc->rxq); 8803 8804 /* Reset all TX rings. */ 8805 for (qid = 0; qid < sc->ntxqs; qid++) 8806 iwn_reset_tx_ring(sc, &sc->txq[qid]); 8807 8808 if (iwn_nic_lock(sc) == 0) { 8809 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 8810 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8811 iwn_nic_unlock(sc); 8812 } 8813 DELAY(5); 8814 /* Power OFF adapter. */ 8815 iwn_apm_stop(sc); 8816 } 8817 8818 static void 8819 iwn_radio_on(void *arg0, int pending) 8820 { 8821 struct iwn_softc *sc = arg0; 8822 struct ieee80211com *ic = &sc->sc_ic; 8823 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8824 8825 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8826 8827 if (vap != NULL) { 8828 iwn_init(sc); 8829 ieee80211_init(vap); 8830 } 8831 } 8832 8833 static void 8834 iwn_radio_off(void *arg0, int pending) 8835 { 8836 struct iwn_softc *sc = arg0; 8837 struct ieee80211com *ic = &sc->sc_ic; 8838 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8839 8840 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8841 8842 iwn_stop(sc); 8843 if (vap != NULL) 8844 ieee80211_stop(vap); 8845 8846 /* Enable interrupts to get RF toggle notification. */ 8847 IWN_LOCK(sc); 8848 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8849 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8850 IWN_UNLOCK(sc); 8851 } 8852 8853 static void 8854 iwn_panicked(void *arg0, int pending) 8855 { 8856 struct iwn_softc *sc = arg0; 8857 struct ieee80211com *ic = &sc->sc_ic; 8858 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8859 #if 0 8860 int error; 8861 #endif 8862 8863 if (vap == NULL) { 8864 kprintf("%s: null vap\n", __func__); 8865 return; 8866 } 8867 8868 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 8869 "restarting\n", __func__, vap->iv_state); 8870 8871 /* 8872 * This is not enough work. We need to also reinitialise 8873 * the correct transmit state for aggregation enabled queues, 8874 * which has a very specific requirement of 8875 * ring index = 802.11 seqno % 256. If we don't do this (which 8876 * we definitely don't!) then the firmware will just panic again. 8877 */ 8878 #if 1 8879 ieee80211_restart_all(ic); 8880 #else 8881 IWN_LOCK(sc); 8882 8883 iwn_stop_locked(sc); 8884 iwn_init_locked(sc); 8885 if (vap->iv_state >= IEEE80211_S_AUTH && 8886 (error = iwn_auth(sc, vap)) != 0) { 8887 device_printf(sc->sc_dev, 8888 "%s: could not move to auth state\n", __func__); 8889 } 8890 if (vap->iv_state >= IEEE80211_S_RUN && 8891 (error = iwn_run(sc, vap)) != 0) { 8892 device_printf(sc->sc_dev, 8893 "%s: could not move to run state\n", __func__); 8894 } 8895 8896 IWN_UNLOCK(sc); 8897 #endif 8898 } 8899 8900 static void 8901 iwn_init_locked(struct iwn_softc *sc) 8902 { 8903 int error; 8904 8905 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8906 8907 IWN_LOCK_ASSERT(sc); 8908 8909 sc->sc_flags |= IWN_FLAG_RUNNING; 8910 8911 if ((error = iwn_hw_prepare(sc)) != 0) { 8912 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 8913 __func__, error); 8914 goto fail; 8915 } 8916 8917 /* Initialize interrupt mask to default value. */ 8918 sc->int_mask = IWN_INT_MASK_DEF; 8919 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8920 8921 /* Check that the radio is not disabled by hardware switch. */ 8922 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 8923 device_printf(sc->sc_dev, 8924 "radio is disabled by hardware switch\n"); 8925 /* Enable interrupts to get RF toggle notifications. */ 8926 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8927 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8928 return; 8929 } 8930 8931 /* Read firmware images from the filesystem. */ 8932 if ((error = iwn_read_firmware(sc)) != 0) { 8933 device_printf(sc->sc_dev, 8934 "%s: could not read firmware, error %d\n", __func__, 8935 error); 8936 goto fail; 8937 } 8938 8939 /* Initialize hardware and upload firmware. */ 8940 error = iwn_hw_init(sc); 8941 iwn_unload_firmware(sc); 8942 if (error != 0) { 8943 device_printf(sc->sc_dev, 8944 "%s: could not initialize hardware, error %d\n", __func__, 8945 error); 8946 goto fail; 8947 } 8948 8949 /* Configure adapter now that it is ready. */ 8950 if ((error = iwn_config(sc)) != 0) { 8951 device_printf(sc->sc_dev, 8952 "%s: could not configure device, error %d\n", __func__, 8953 error); 8954 goto fail; 8955 } 8956 8957 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 8958 8959 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8960 8961 return; 8962 8963 fail: 8964 sc->sc_flags &= ~IWN_FLAG_RUNNING; 8965 iwn_stop_locked(sc); 8966 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 8967 } 8968 8969 static void 8970 iwn_init(struct iwn_softc *sc) 8971 { 8972 8973 IWN_LOCK(sc); 8974 iwn_init_locked(sc); 8975 IWN_UNLOCK(sc); 8976 8977 if (sc->sc_flags & IWN_FLAG_RUNNING) 8978 ieee80211_start_all(&sc->sc_ic); 8979 } 8980 8981 static void 8982 iwn_stop_locked(struct iwn_softc *sc) 8983 { 8984 8985 IWN_LOCK_ASSERT(sc); 8986 8987 sc->sc_is_scanning = 0; 8988 sc->sc_tx_timer = 0; 8989 #if defined(__DragonFly__) 8990 callout_stop_sync(&sc->watchdog_to); 8991 callout_stop_sync(&sc->calib_to); 8992 #else 8993 callout_stop(&sc->watchdog_to); 8994 callout_stop(&sc->calib_to); 8995 #endif 8996 sc->sc_flags &= ~IWN_FLAG_RUNNING; 8997 8998 /* Power OFF hardware. */ 8999 iwn_hw_stop(sc); 9000 } 9001 9002 static void 9003 iwn_stop(struct iwn_softc *sc) 9004 { 9005 IWN_LOCK(sc); 9006 iwn_stop_locked(sc); 9007 IWN_UNLOCK(sc); 9008 } 9009 9010 /* 9011 * Callback from net80211 to start a scan. 9012 */ 9013 static void 9014 iwn_scan_start(struct ieee80211com *ic) 9015 { 9016 struct iwn_softc *sc = ic->ic_softc; 9017 9018 IWN_LOCK(sc); 9019 /* make the link LED blink while we're scanning */ 9020 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 9021 IWN_UNLOCK(sc); 9022 } 9023 9024 /* 9025 * Callback from net80211 to terminate a scan. 9026 */ 9027 static void 9028 iwn_scan_end(struct ieee80211com *ic) 9029 { 9030 struct iwn_softc *sc = ic->ic_softc; 9031 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 9032 9033 IWN_LOCK(sc); 9034 if (vap->iv_state == IEEE80211_S_RUN) { 9035 /* Set link LED to ON status if we are associated */ 9036 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 9037 } 9038 IWN_UNLOCK(sc); 9039 } 9040 9041 /* 9042 * Callback from net80211 to force a channel change. 9043 */ 9044 static void 9045 iwn_set_channel(struct ieee80211com *ic) 9046 { 9047 const struct ieee80211_channel *c = ic->ic_curchan; 9048 struct iwn_softc *sc = ic->ic_softc; 9049 int error; 9050 9051 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 9052 9053 IWN_LOCK(sc); 9054 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 9055 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 9056 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 9057 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 9058 9059 /* 9060 * Only need to set the channel in Monitor mode. AP scanning and auth 9061 * are already taken care of by their respective firmware commands. 9062 */ 9063 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 9064 error = iwn_config(sc); 9065 if (error != 0) 9066 device_printf(sc->sc_dev, 9067 "%s: error %d settting channel\n", __func__, error); 9068 } 9069 IWN_UNLOCK(sc); 9070 } 9071 9072 /* 9073 * Callback from net80211 to start scanning of the current channel. 9074 */ 9075 static void 9076 iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 9077 { 9078 struct ieee80211vap *vap = ss->ss_vap; 9079 struct ieee80211com *ic = vap->iv_ic; 9080 struct iwn_softc *sc = ic->ic_softc; 9081 int error; 9082 9083 IWN_LOCK(sc); 9084 error = iwn_scan(sc, vap, ss, ic->ic_curchan); 9085 IWN_UNLOCK(sc); 9086 if (error != 0) 9087 ieee80211_cancel_scan(vap); 9088 } 9089 9090 /* 9091 * Callback from net80211 to handle the minimum dwell time being met. 9092 * The intent is to terminate the scan but we just let the firmware 9093 * notify us when it's finished as we have no safe way to abort it. 9094 */ 9095 static void 9096 iwn_scan_mindwell(struct ieee80211_scan_state *ss) 9097 { 9098 /* NB: don't try to abort scan; wait for firmware to finish */ 9099 } 9100 #ifdef IWN_DEBUG 9101 #define IWN_DESC(x) case x: return #x 9102 9103 /* 9104 * Translate CSR code to string 9105 */ 9106 static char *iwn_get_csr_string(int csr) 9107 { 9108 switch (csr) { 9109 IWN_DESC(IWN_HW_IF_CONFIG); 9110 IWN_DESC(IWN_INT_COALESCING); 9111 IWN_DESC(IWN_INT); 9112 IWN_DESC(IWN_INT_MASK); 9113 IWN_DESC(IWN_FH_INT); 9114 IWN_DESC(IWN_GPIO_IN); 9115 IWN_DESC(IWN_RESET); 9116 IWN_DESC(IWN_GP_CNTRL); 9117 IWN_DESC(IWN_HW_REV); 9118 IWN_DESC(IWN_EEPROM); 9119 IWN_DESC(IWN_EEPROM_GP); 9120 IWN_DESC(IWN_OTP_GP); 9121 IWN_DESC(IWN_GIO); 9122 IWN_DESC(IWN_GP_UCODE); 9123 IWN_DESC(IWN_GP_DRIVER); 9124 IWN_DESC(IWN_UCODE_GP1); 9125 IWN_DESC(IWN_UCODE_GP2); 9126 IWN_DESC(IWN_LED); 9127 IWN_DESC(IWN_DRAM_INT_TBL); 9128 IWN_DESC(IWN_GIO_CHICKEN); 9129 IWN_DESC(IWN_ANA_PLL); 9130 IWN_DESC(IWN_HW_REV_WA); 9131 IWN_DESC(IWN_DBG_HPET_MEM); 9132 default: 9133 return "UNKNOWN CSR"; 9134 } 9135 } 9136 9137 /* 9138 * This function print firmware register 9139 */ 9140 static void 9141 iwn_debug_register(struct iwn_softc *sc) 9142 { 9143 int i; 9144 static const uint32_t csr_tbl[] = { 9145 IWN_HW_IF_CONFIG, 9146 IWN_INT_COALESCING, 9147 IWN_INT, 9148 IWN_INT_MASK, 9149 IWN_FH_INT, 9150 IWN_GPIO_IN, 9151 IWN_RESET, 9152 IWN_GP_CNTRL, 9153 IWN_HW_REV, 9154 IWN_EEPROM, 9155 IWN_EEPROM_GP, 9156 IWN_OTP_GP, 9157 IWN_GIO, 9158 IWN_GP_UCODE, 9159 IWN_GP_DRIVER, 9160 IWN_UCODE_GP1, 9161 IWN_UCODE_GP2, 9162 IWN_LED, 9163 IWN_DRAM_INT_TBL, 9164 IWN_GIO_CHICKEN, 9165 IWN_ANA_PLL, 9166 IWN_HW_REV_WA, 9167 IWN_DBG_HPET_MEM, 9168 }; 9169 DPRINTF(sc, IWN_DEBUG_REGISTER, 9170 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", 9171 "\n"); 9172 for (i = 0; i < nitems(csr_tbl); i++){ 9173 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", 9174 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); 9175 if ((i+1) % 3 == 0) 9176 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 9177 } 9178 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 9179 } 9180 #endif 9181 9182 9183