1 /* $OpenBSD: if_iwn.c,v 1.260 2022/06/19 18:27:06 stsp Exp $ */ 2 3 /*- 4 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 21 * adapters. 22 */ 23 24 #include "bpfilter.h" 25 26 #include <sys/param.h> 27 #include <sys/sockio.h> 28 #include <sys/mbuf.h> 29 #include <sys/kernel.h> 30 #include <sys/rwlock.h> 31 #include <sys/socket.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/conf.h> 35 #include <sys/device.h> 36 #include <sys/task.h> 37 #include <sys/endian.h> 38 39 #include <machine/bus.h> 40 #include <machine/intr.h> 41 42 #include <dev/pci/pcireg.h> 43 #include <dev/pci/pcivar.h> 44 #include <dev/pci/pcidevs.h> 45 46 #if NBPFILTER > 0 47 #include <net/bpf.h> 48 #endif 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #include <netinet/in.h> 54 #include <netinet/if_ether.h> 55 56 #include <net80211/ieee80211_var.h> 57 #include <net80211/ieee80211_amrr.h> 58 #include <net80211/ieee80211_ra.h> 59 #include <net80211/ieee80211_radiotap.h> 60 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */ 61 #undef DPRINTF /* defined in ieee80211_priv.h */ 62 63 #include <dev/pci/if_iwnreg.h> 64 #include <dev/pci/if_iwnvar.h> 65 66 static const struct pci_matchid iwn_devices[] = { 67 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_1 }, 68 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_4965_2 }, 69 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_1 }, 70 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5100_2 }, 71 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_1 }, 72 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5150_2 }, 73 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_1 }, 74 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5300_2 }, 75 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_1 }, 76 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_5350_2 }, 77 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_1 }, 78 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1000_2 }, 79 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_1 }, 80 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6300_2 }, 81 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_1 }, 82 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6200_2 }, 83 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_1 }, 84 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6050_2 }, 85 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_1 }, 86 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6005_2 }, 87 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_1 }, 88 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6030_2 }, 89 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_1 }, 90 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_1030_2 }, 91 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_1 }, 92 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_100_2 }, 93 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_1 }, 94 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_130_2 }, 95 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_1 }, 96 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_6235_2 }, 97 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_1 }, 98 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2230_2 }, 99 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_1 }, 100 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_2200_2 }, 101 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_1 }, 102 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_135_2 }, 103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_1 }, 104 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_105_2 }, 105 }; 106 107 int iwn_match(struct device *, void *, void *); 108 void iwn_attach(struct device *, struct device *, void *); 109 int iwn4965_attach(struct iwn_softc *, pci_product_id_t); 110 int iwn5000_attach(struct iwn_softc *, pci_product_id_t); 111 #if NBPFILTER > 0 112 void iwn_radiotap_attach(struct iwn_softc *); 113 #endif 114 int iwn_detach(struct device *, int); 115 int iwn_activate(struct device *, int); 116 void iwn_wakeup(struct iwn_softc *); 117 void iwn_init_task(void *); 118 int iwn_nic_lock(struct iwn_softc *); 119 int iwn_eeprom_lock(struct iwn_softc *); 120 int iwn_init_otprom(struct iwn_softc *); 121 int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 122 int iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *, 123 void **, bus_size_t, bus_size_t); 124 void iwn_dma_contig_free(struct iwn_dma_info *); 125 int iwn_alloc_sched(struct iwn_softc *); 126 void iwn_free_sched(struct iwn_softc *); 127 int iwn_alloc_kw(struct iwn_softc *); 128 void iwn_free_kw(struct iwn_softc *); 129 int iwn_alloc_ict(struct iwn_softc *); 130 void iwn_free_ict(struct iwn_softc *); 131 int iwn_alloc_fwmem(struct iwn_softc *); 132 void iwn_free_fwmem(struct iwn_softc *); 133 int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 134 void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 135 void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 136 int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 137 int); 138 void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 139 void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 140 void iwn5000_ict_reset(struct iwn_softc *); 141 int iwn_read_eeprom(struct iwn_softc *); 142 void iwn4965_read_eeprom(struct iwn_softc *); 143 void iwn4965_print_power_group(struct iwn_softc *, int); 144 void iwn5000_read_eeprom(struct iwn_softc *); 145 void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 146 void iwn_read_eeprom_enhinfo(struct iwn_softc *); 147 struct ieee80211_node *iwn_node_alloc(struct ieee80211com *); 148 void iwn_newassoc(struct ieee80211com *, struct ieee80211_node *, 149 int); 150 int iwn_media_change(struct ifnet *); 151 int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int); 152 void iwn_iter_func(void *, struct ieee80211_node *); 153 void iwn_calib_timeout(void *); 154 int iwn_ccmp_decap(struct iwn_softc *, struct mbuf *, 155 struct ieee80211_node *); 156 void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 157 struct iwn_rx_data *); 158 void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 159 struct iwn_rx_data *, struct mbuf_list *); 160 void iwn_ra_choose(struct iwn_softc *, struct ieee80211_node *); 161 void iwn_ampdu_rate_control(struct iwn_softc *, struct ieee80211_node *, 162 struct iwn_tx_ring *, uint16_t, uint16_t); 163 void iwn_ht_single_rate_control(struct iwn_softc *, 164 struct ieee80211_node *, uint8_t, uint8_t, uint8_t, int); 165 void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 166 struct iwn_rx_data *); 167 void iwn5000_rx_calib_results(struct iwn_softc *, 168 struct iwn_rx_desc *, struct iwn_rx_data *); 169 void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 170 struct iwn_rx_data *); 171 void iwn_ampdu_txq_advance(struct iwn_softc *, struct iwn_tx_ring *, 172 int, int); 173 void iwn_ampdu_tx_done(struct iwn_softc *, struct iwn_tx_ring *, 174 struct iwn_rx_desc *, uint16_t, uint8_t, uint8_t, uint8_t, 175 int, uint32_t, struct iwn_txagg_status *); 176 void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 177 struct iwn_rx_data *); 178 void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 179 struct iwn_rx_data *); 180 void iwn_tx_done_free_txdata(struct iwn_softc *, 181 struct iwn_tx_data *); 182 void iwn_clear_oactive(struct iwn_softc *, struct iwn_tx_ring *); 183 void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 184 uint8_t, uint8_t, uint8_t, int, int, uint16_t); 185 void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 186 void iwn_notif_intr(struct iwn_softc *); 187 void iwn_wakeup_intr(struct iwn_softc *); 188 void iwn_fatal_intr(struct iwn_softc *); 189 int iwn_intr(void *); 190 void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 191 uint16_t); 192 void iwn4965_reset_sched(struct iwn_softc *, int, int); 193 void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 194 uint16_t); 195 void iwn5000_reset_sched(struct iwn_softc *, int, int); 196 int iwn_tx(struct iwn_softc *, struct mbuf *, 197 struct ieee80211_node *); 198 int iwn_rval2ridx(int); 199 void iwn_start(struct ifnet *); 200 void iwn_watchdog(struct ifnet *); 201 int iwn_ioctl(struct ifnet *, u_long, caddr_t); 202 int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 203 int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 204 int); 205 int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 206 int); 207 int iwn_set_link_quality(struct iwn_softc *, 208 struct ieee80211_node *); 209 int iwn_add_broadcast_node(struct iwn_softc *, int, int); 210 void iwn_updateedca(struct ieee80211com *); 211 void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 212 int iwn_set_critical_temp(struct iwn_softc *); 213 int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 214 void iwn4965_power_calibration(struct iwn_softc *, int); 215 int iwn4965_set_txpower(struct iwn_softc *, int); 216 int iwn5000_set_txpower(struct iwn_softc *, int); 217 int iwn4965_get_rssi(const struct iwn_rx_stat *); 218 int iwn5000_get_rssi(const struct iwn_rx_stat *); 219 int iwn_get_noise(const struct iwn_rx_general_stats *); 220 int iwn4965_get_temperature(struct iwn_softc *); 221 int iwn5000_get_temperature(struct iwn_softc *); 222 int iwn_init_sensitivity(struct iwn_softc *); 223 void iwn_collect_noise(struct iwn_softc *, 224 const struct iwn_rx_general_stats *); 225 int iwn4965_init_gains(struct iwn_softc *); 226 int iwn5000_init_gains(struct iwn_softc *); 227 int iwn4965_set_gains(struct iwn_softc *); 228 int iwn5000_set_gains(struct iwn_softc *); 229 void iwn_tune_sensitivity(struct iwn_softc *, 230 const struct iwn_rx_stats *); 231 int iwn_send_sensitivity(struct iwn_softc *); 232 int iwn_set_pslevel(struct iwn_softc *, int, int, int); 233 int iwn_send_temperature_offset(struct iwn_softc *); 234 int iwn_send_btcoex(struct iwn_softc *); 235 int iwn_send_advanced_btcoex(struct iwn_softc *); 236 int iwn5000_runtime_calib(struct iwn_softc *); 237 int iwn_config(struct iwn_softc *); 238 uint16_t iwn_get_active_dwell_time(struct iwn_softc *, uint16_t, uint8_t); 239 uint16_t iwn_limit_dwell(struct iwn_softc *, uint16_t); 240 uint16_t iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t); 241 int iwn_scan(struct iwn_softc *, uint16_t, int); 242 void iwn_scan_abort(struct iwn_softc *); 243 int iwn_bgscan(struct ieee80211com *); 244 void iwn_rxon_configure_ht40(struct ieee80211com *, 245 struct ieee80211_node *); 246 int iwn_rxon_ht40_enabled(struct iwn_softc *); 247 int iwn_auth(struct iwn_softc *, int); 248 int iwn_run(struct iwn_softc *); 249 int iwn_set_key(struct ieee80211com *, struct ieee80211_node *, 250 struct ieee80211_key *); 251 void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *, 252 struct ieee80211_key *); 253 void iwn_updatechan(struct ieee80211com *); 254 void iwn_updateprot(struct ieee80211com *); 255 void iwn_updateslot(struct ieee80211com *); 256 void iwn_update_rxon_restore_power(struct iwn_softc *); 257 void iwn5000_update_rxon(struct iwn_softc *); 258 void iwn4965_update_rxon(struct iwn_softc *); 259 int iwn_ampdu_rx_start(struct ieee80211com *, 260 struct ieee80211_node *, uint8_t); 261 void iwn_ampdu_rx_stop(struct ieee80211com *, 262 struct ieee80211_node *, uint8_t); 263 int iwn_ampdu_tx_start(struct ieee80211com *, 264 struct ieee80211_node *, uint8_t); 265 void iwn_ampdu_tx_stop(struct ieee80211com *, 266 struct ieee80211_node *, uint8_t); 267 void iwn4965_ampdu_tx_start(struct iwn_softc *, 268 struct ieee80211_node *, uint8_t, uint16_t); 269 void iwn4965_ampdu_tx_stop(struct iwn_softc *, 270 uint8_t, uint16_t); 271 void iwn5000_ampdu_tx_start(struct iwn_softc *, 272 struct ieee80211_node *, uint8_t, uint16_t); 273 void iwn5000_ampdu_tx_stop(struct iwn_softc *, 274 uint8_t, uint16_t); 275 int iwn5000_query_calibration(struct iwn_softc *); 276 int iwn5000_send_calibration(struct iwn_softc *); 277 int iwn5000_send_wimax_coex(struct iwn_softc *); 278 int iwn5000_crystal_calib(struct iwn_softc *); 279 int iwn6000_temp_offset_calib(struct iwn_softc *); 280 int iwn2000_temp_offset_calib(struct iwn_softc *); 281 int iwn4965_post_alive(struct iwn_softc *); 282 int iwn5000_post_alive(struct iwn_softc *); 283 int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 284 int); 285 int iwn4965_load_firmware(struct iwn_softc *); 286 int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 287 const uint8_t *, int); 288 int iwn5000_load_firmware(struct iwn_softc *); 289 int iwn_read_firmware_leg(struct iwn_softc *, 290 struct iwn_fw_info *); 291 int iwn_read_firmware_tlv(struct iwn_softc *, 292 struct iwn_fw_info *, uint16_t); 293 int iwn_read_firmware(struct iwn_softc *); 294 int iwn_clock_wait(struct iwn_softc *); 295 int iwn_apm_init(struct iwn_softc *); 296 void iwn_apm_stop_master(struct iwn_softc *); 297 void iwn_apm_stop(struct iwn_softc *); 298 int iwn4965_nic_config(struct iwn_softc *); 299 int iwn5000_nic_config(struct iwn_softc *); 300 int iwn_hw_prepare(struct iwn_softc *); 301 int iwn_hw_init(struct iwn_softc *); 302 void iwn_hw_stop(struct iwn_softc *); 303 int iwn_init(struct ifnet *); 304 void iwn_stop(struct ifnet *); 305 306 #ifdef IWN_DEBUG 307 #define DPRINTF(x) do { if (iwn_debug > 0) printf x; } while (0) 308 #define DPRINTFN(n, x) do { if (iwn_debug >= (n)) printf x; } while (0) 309 int iwn_debug = 1; 310 #else 311 #define DPRINTF(x) 312 #define DPRINTFN(n, x) 313 #endif 314 315 struct cfdriver iwn_cd = { 316 NULL, "iwn", DV_IFNET 317 }; 318 319 const struct cfattach iwn_ca = { 320 sizeof (struct iwn_softc), iwn_match, iwn_attach, iwn_detach, 321 iwn_activate 322 }; 323 324 int 325 iwn_match(struct device *parent, void *match, void *aux) 326 { 327 return pci_matchbyid((struct pci_attach_args *)aux, iwn_devices, 328 nitems(iwn_devices)); 329 } 330 331 void 332 iwn_attach(struct device *parent, struct device *self, void *aux) 333 { 334 struct iwn_softc *sc = (struct iwn_softc *)self; 335 struct ieee80211com *ic = &sc->sc_ic; 336 struct ifnet *ifp = &ic->ic_if; 337 struct pci_attach_args *pa = aux; 338 const char *intrstr; 339 pci_intr_handle_t ih; 340 pcireg_t memtype, reg; 341 int i, error; 342 343 sc->sc_pct = pa->pa_pc; 344 sc->sc_pcitag = pa->pa_tag; 345 sc->sc_dmat = pa->pa_dmat; 346 347 /* 348 * Get the offset of the PCI Express Capability Structure in PCI 349 * Configuration Space. 350 */ 351 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag, 352 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL); 353 if (error == 0) { 354 printf(": PCIe capability structure not found!\n"); 355 return; 356 } 357 358 /* Clear device-specific "PCI retry timeout" register (41h). */ 359 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 360 if (reg & 0xff00) 361 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 362 363 /* Hardware bug workaround. */ 364 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 365 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) { 366 DPRINTF(("PCIe INTx Disable set\n")); 367 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE; 368 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 369 PCI_COMMAND_STATUS_REG, reg); 370 } 371 372 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0); 373 error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st, 374 &sc->sc_sh, NULL, &sc->sc_sz, 0); 375 if (error != 0) { 376 printf(": can't map mem space\n"); 377 return; 378 } 379 380 /* Install interrupt handler. */ 381 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 382 printf(": can't map interrupt\n"); 383 return; 384 } 385 intrstr = pci_intr_string(sc->sc_pct, ih); 386 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwn_intr, sc, 387 sc->sc_dev.dv_xname); 388 if (sc->sc_ih == NULL) { 389 printf(": can't establish interrupt"); 390 if (intrstr != NULL) 391 printf(" at %s", intrstr); 392 printf("\n"); 393 return; 394 } 395 printf(": %s", intrstr); 396 397 /* Read hardware revision and attach. */ 398 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0x1f; 399 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 400 error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id)); 401 else 402 error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id)); 403 if (error != 0) { 404 printf(": could not attach device\n"); 405 return; 406 } 407 408 if ((error = iwn_hw_prepare(sc)) != 0) { 409 printf(": hardware not ready\n"); 410 return; 411 } 412 413 /* Read MAC address, channels, etc from EEPROM. */ 414 if ((error = iwn_read_eeprom(sc)) != 0) { 415 printf(": could not read EEPROM\n"); 416 return; 417 } 418 419 /* Allocate DMA memory for firmware transfers. */ 420 if ((error = iwn_alloc_fwmem(sc)) != 0) { 421 printf(": could not allocate memory for firmware\n"); 422 return; 423 } 424 425 /* Allocate "Keep Warm" page. */ 426 if ((error = iwn_alloc_kw(sc)) != 0) { 427 printf(": could not allocate keep warm page\n"); 428 goto fail1; 429 } 430 431 /* Allocate ICT table for 5000 Series. */ 432 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 433 (error = iwn_alloc_ict(sc)) != 0) { 434 printf(": could not allocate ICT table\n"); 435 goto fail2; 436 } 437 438 /* Allocate TX scheduler "rings". */ 439 if ((error = iwn_alloc_sched(sc)) != 0) { 440 printf(": could not allocate TX scheduler rings\n"); 441 goto fail3; 442 } 443 444 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 445 for (i = 0; i < sc->ntxqs; i++) { 446 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 447 printf(": could not allocate TX ring %d\n", i); 448 goto fail4; 449 } 450 } 451 452 /* Allocate RX ring. */ 453 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 454 printf(": could not allocate RX ring\n"); 455 goto fail4; 456 } 457 458 /* Clear pending interrupts. */ 459 IWN_WRITE(sc, IWN_INT, 0xffffffff); 460 461 /* Count the number of available chains. */ 462 sc->ntxchains = 463 ((sc->txchainmask >> 2) & 1) + 464 ((sc->txchainmask >> 1) & 1) + 465 ((sc->txchainmask >> 0) & 1); 466 sc->nrxchains = 467 ((sc->rxchainmask >> 2) & 1) + 468 ((sc->rxchainmask >> 1) & 1) + 469 ((sc->rxchainmask >> 0) & 1); 470 printf(", MIMO %dT%dR, %.4s, address %s\n", sc->ntxchains, 471 sc->nrxchains, sc->eeprom_domain, ether_sprintf(ic->ic_myaddr)); 472 473 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 474 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 475 ic->ic_state = IEEE80211_S_INIT; 476 477 /* Set device capabilities. */ 478 ic->ic_caps = 479 IEEE80211_C_WEP | /* WEP */ 480 IEEE80211_C_RSN | /* WPA/RSN */ 481 IEEE80211_C_SCANALL | /* device scans all channels at once */ 482 IEEE80211_C_SCANALLBAND | /* driver scans all bands at once */ 483 IEEE80211_C_MONITOR | /* monitor mode supported */ 484 IEEE80211_C_SHSLOT | /* short slot time supported */ 485 IEEE80211_C_SHPREAMBLE | /* short preamble supported */ 486 IEEE80211_C_PMGT; /* power saving supported */ 487 488 /* No optional HT features supported for now, */ 489 ic->ic_htcaps = 0; 490 ic->ic_htxcaps = 0; 491 ic->ic_txbfcaps = 0; 492 ic->ic_aselcaps = 0; 493 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */); 494 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 495 ic->ic_caps |= (IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU); 496 /* Set HT capabilities. */ 497 ic->ic_htcaps = IEEE80211_HTCAP_SGI20; 498 /* 6200 devices have issues with SGI40 for some reason. */ 499 if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0) 500 ic->ic_htcaps |= IEEE80211_HTCAP_SGI40; 501 ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40; 502 #ifdef notyet 503 ic->ic_htcaps |= 504 #if IWN_RBUF_SIZE == 8192 505 IEEE80211_HTCAP_AMSDU7935 | 506 #endif 507 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 508 ic->ic_htcaps |= IEEE80211_HTCAP_GF; 509 if (sc->hw_type == IWN_HW_REV_TYPE_6050) 510 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN; 511 else 512 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS; 513 #endif /* notyet */ 514 } 515 516 /* Set supported legacy rates. */ 517 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b; 518 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g; 519 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) { 520 ic->ic_sup_rates[IEEE80211_MODE_11A] = 521 ieee80211_std_rateset_11a; 522 } 523 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 524 /* Set supported HT rates. */ 525 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */ 526 #ifdef notyet 527 if (sc->nrxchains > 1) 528 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */ 529 if (sc->nrxchains > 2) 530 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */ 531 #endif 532 } 533 534 /* IBSS channel undefined for now. */ 535 ic->ic_ibss_chan = &ic->ic_channels[0]; 536 537 ifp->if_softc = sc; 538 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 539 ifp->if_ioctl = iwn_ioctl; 540 ifp->if_start = iwn_start; 541 ifp->if_watchdog = iwn_watchdog; 542 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 543 544 if_attach(ifp); 545 ieee80211_ifattach(ifp); 546 ic->ic_node_alloc = iwn_node_alloc; 547 ic->ic_bgscan_start = iwn_bgscan; 548 ic->ic_newassoc = iwn_newassoc; 549 ic->ic_updateedca = iwn_updateedca; 550 ic->ic_set_key = iwn_set_key; 551 ic->ic_delete_key = iwn_delete_key; 552 ic->ic_updatechan = iwn_updatechan; 553 ic->ic_updateprot = iwn_updateprot; 554 ic->ic_updateslot = iwn_updateslot; 555 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 556 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 557 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start; 558 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop; 559 560 /* Override 802.11 state transition machine. */ 561 sc->sc_newstate = ic->ic_newstate; 562 ic->ic_newstate = iwn_newstate; 563 ieee80211_media_init(ifp, iwn_media_change, ieee80211_media_status); 564 565 sc->amrr.amrr_min_success_threshold = 1; 566 sc->amrr.amrr_max_success_threshold = 15; 567 568 #if NBPFILTER > 0 569 iwn_radiotap_attach(sc); 570 #endif 571 timeout_set(&sc->calib_to, iwn_calib_timeout, sc); 572 rw_init(&sc->sc_rwlock, "iwnlock"); 573 task_set(&sc->init_task, iwn_init_task, sc); 574 return; 575 576 /* Free allocated memory if something failed during attachment. */ 577 fail4: while (--i >= 0) 578 iwn_free_tx_ring(sc, &sc->txq[i]); 579 iwn_free_sched(sc); 580 fail3: if (sc->ict != NULL) 581 iwn_free_ict(sc); 582 fail2: iwn_free_kw(sc); 583 fail1: iwn_free_fwmem(sc); 584 } 585 586 int 587 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid) 588 { 589 struct iwn_ops *ops = &sc->ops; 590 591 ops->load_firmware = iwn4965_load_firmware; 592 ops->read_eeprom = iwn4965_read_eeprom; 593 ops->post_alive = iwn4965_post_alive; 594 ops->nic_config = iwn4965_nic_config; 595 ops->reset_sched = iwn4965_reset_sched; 596 ops->update_sched = iwn4965_update_sched; 597 ops->update_rxon = iwn4965_update_rxon; 598 ops->get_temperature = iwn4965_get_temperature; 599 ops->get_rssi = iwn4965_get_rssi; 600 ops->set_txpower = iwn4965_set_txpower; 601 ops->init_gains = iwn4965_init_gains; 602 ops->set_gains = iwn4965_set_gains; 603 ops->add_node = iwn4965_add_node; 604 ops->tx_done = iwn4965_tx_done; 605 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 606 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 607 sc->ntxqs = IWN4965_NTXQUEUES; 608 sc->first_agg_txq = IWN4965_FIRST_AGG_TXQUEUE; 609 sc->ndmachnls = IWN4965_NDMACHNLS; 610 sc->broadcast_id = IWN4965_ID_BROADCAST; 611 sc->rxonsz = IWN4965_RXONSZ; 612 sc->schedsz = IWN4965_SCHEDSZ; 613 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 614 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 615 sc->fwsz = IWN4965_FWSZ; 616 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 617 sc->limits = &iwn4965_sensitivity_limits; 618 sc->fwname = "iwn-4965"; 619 /* Override chains masks, ROM is known to be broken. */ 620 sc->txchainmask = IWN_ANT_AB; 621 sc->rxchainmask = IWN_ANT_ABC; 622 623 return 0; 624 } 625 626 int 627 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid) 628 { 629 struct iwn_ops *ops = &sc->ops; 630 631 ops->load_firmware = iwn5000_load_firmware; 632 ops->read_eeprom = iwn5000_read_eeprom; 633 ops->post_alive = iwn5000_post_alive; 634 ops->nic_config = iwn5000_nic_config; 635 ops->reset_sched = iwn5000_reset_sched; 636 ops->update_sched = iwn5000_update_sched; 637 ops->update_rxon = iwn5000_update_rxon; 638 ops->get_temperature = iwn5000_get_temperature; 639 ops->get_rssi = iwn5000_get_rssi; 640 ops->set_txpower = iwn5000_set_txpower; 641 ops->init_gains = iwn5000_init_gains; 642 ops->set_gains = iwn5000_set_gains; 643 ops->add_node = iwn5000_add_node; 644 ops->tx_done = iwn5000_tx_done; 645 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 646 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 647 sc->ntxqs = IWN5000_NTXQUEUES; 648 sc->first_agg_txq = IWN5000_FIRST_AGG_TXQUEUE; 649 sc->ndmachnls = IWN5000_NDMACHNLS; 650 sc->broadcast_id = IWN5000_ID_BROADCAST; 651 sc->rxonsz = IWN5000_RXONSZ; 652 sc->schedsz = IWN5000_SCHEDSZ; 653 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 654 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 655 sc->fwsz = IWN5000_FWSZ; 656 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 657 658 switch (sc->hw_type) { 659 case IWN_HW_REV_TYPE_5100: 660 sc->limits = &iwn5000_sensitivity_limits; 661 sc->fwname = "iwn-5000"; 662 /* Override chains masks, ROM is known to be broken. */ 663 sc->txchainmask = IWN_ANT_B; 664 sc->rxchainmask = IWN_ANT_AB; 665 break; 666 case IWN_HW_REV_TYPE_5150: 667 sc->limits = &iwn5150_sensitivity_limits; 668 sc->fwname = "iwn-5150"; 669 break; 670 case IWN_HW_REV_TYPE_5300: 671 case IWN_HW_REV_TYPE_5350: 672 sc->limits = &iwn5000_sensitivity_limits; 673 sc->fwname = "iwn-5000"; 674 break; 675 case IWN_HW_REV_TYPE_1000: 676 sc->limits = &iwn1000_sensitivity_limits; 677 sc->fwname = "iwn-1000"; 678 break; 679 case IWN_HW_REV_TYPE_6000: 680 sc->limits = &iwn6000_sensitivity_limits; 681 sc->fwname = "iwn-6000"; 682 if (pid == PCI_PRODUCT_INTEL_WL_6200_1 || 683 pid == PCI_PRODUCT_INTEL_WL_6200_2) { 684 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 685 /* Override chains masks, ROM is known to be broken. */ 686 sc->txchainmask = IWN_ANT_BC; 687 sc->rxchainmask = IWN_ANT_BC; 688 } 689 break; 690 case IWN_HW_REV_TYPE_6050: 691 sc->limits = &iwn6000_sensitivity_limits; 692 sc->fwname = "iwn-6050"; 693 break; 694 case IWN_HW_REV_TYPE_6005: 695 sc->limits = &iwn6000_sensitivity_limits; 696 if (pid != PCI_PRODUCT_INTEL_WL_6005_1 && 697 pid != PCI_PRODUCT_INTEL_WL_6005_2) { 698 sc->fwname = "iwn-6030"; 699 sc->sc_flags |= IWN_FLAG_ADV_BT_COEX; 700 } else 701 sc->fwname = "iwn-6005"; 702 break; 703 case IWN_HW_REV_TYPE_2030: 704 sc->limits = &iwn2000_sensitivity_limits; 705 sc->fwname = "iwn-2030"; 706 sc->sc_flags |= IWN_FLAG_ADV_BT_COEX; 707 break; 708 case IWN_HW_REV_TYPE_2000: 709 sc->limits = &iwn2000_sensitivity_limits; 710 sc->fwname = "iwn-2000"; 711 break; 712 case IWN_HW_REV_TYPE_135: 713 sc->limits = &iwn2000_sensitivity_limits; 714 sc->fwname = "iwn-135"; 715 sc->sc_flags |= IWN_FLAG_ADV_BT_COEX; 716 break; 717 case IWN_HW_REV_TYPE_105: 718 sc->limits = &iwn2000_sensitivity_limits; 719 sc->fwname = "iwn-105"; 720 break; 721 default: 722 printf(": adapter type %d not supported\n", sc->hw_type); 723 return ENOTSUP; 724 } 725 return 0; 726 } 727 728 #if NBPFILTER > 0 729 /* 730 * Attach the interface to 802.11 radiotap. 731 */ 732 void 733 iwn_radiotap_attach(struct iwn_softc *sc) 734 { 735 bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO, 736 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN); 737 738 sc->sc_rxtap_len = sizeof sc->sc_rxtapu; 739 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 740 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT); 741 742 sc->sc_txtap_len = sizeof sc->sc_txtapu; 743 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 744 sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT); 745 } 746 #endif 747 748 int 749 iwn_detach(struct device *self, int flags) 750 { 751 struct iwn_softc *sc = (struct iwn_softc *)self; 752 struct ifnet *ifp = &sc->sc_ic.ic_if; 753 int qid; 754 755 timeout_del(&sc->calib_to); 756 task_del(systq, &sc->init_task); 757 758 /* Uninstall interrupt handler. */ 759 if (sc->sc_ih != NULL) 760 pci_intr_disestablish(sc->sc_pct, sc->sc_ih); 761 762 /* Free DMA resources. */ 763 iwn_free_rx_ring(sc, &sc->rxq); 764 for (qid = 0; qid < sc->ntxqs; qid++) 765 iwn_free_tx_ring(sc, &sc->txq[qid]); 766 iwn_free_sched(sc); 767 iwn_free_kw(sc); 768 if (sc->ict != NULL) 769 iwn_free_ict(sc); 770 iwn_free_fwmem(sc); 771 772 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 773 774 ieee80211_ifdetach(ifp); 775 if_detach(ifp); 776 777 return 0; 778 } 779 780 int 781 iwn_activate(struct device *self, int act) 782 { 783 struct iwn_softc *sc = (struct iwn_softc *)self; 784 struct ifnet *ifp = &sc->sc_ic.ic_if; 785 786 switch (act) { 787 case DVACT_SUSPEND: 788 if (ifp->if_flags & IFF_RUNNING) 789 iwn_stop(ifp); 790 break; 791 case DVACT_WAKEUP: 792 iwn_wakeup(sc); 793 break; 794 } 795 796 return 0; 797 } 798 799 void 800 iwn_wakeup(struct iwn_softc *sc) 801 { 802 pcireg_t reg; 803 804 /* Clear device-specific "PCI retry timeout" register (41h). */ 805 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40); 806 if (reg & 0xff00) 807 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00); 808 iwn_init_task(sc); 809 } 810 811 void 812 iwn_init_task(void *arg1) 813 { 814 struct iwn_softc *sc = arg1; 815 struct ifnet *ifp = &sc->sc_ic.ic_if; 816 int s; 817 818 rw_enter_write(&sc->sc_rwlock); 819 s = splnet(); 820 821 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) 822 iwn_init(ifp); 823 824 splx(s); 825 rw_exit_write(&sc->sc_rwlock); 826 } 827 828 int 829 iwn_nic_lock(struct iwn_softc *sc) 830 { 831 int ntries; 832 833 /* Request exclusive access to NIC. */ 834 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 835 836 /* Spin until we actually get the lock. */ 837 for (ntries = 0; ntries < 1000; ntries++) { 838 if ((IWN_READ(sc, IWN_GP_CNTRL) & 839 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 840 IWN_GP_CNTRL_MAC_ACCESS_ENA) 841 return 0; 842 DELAY(10); 843 } 844 return ETIMEDOUT; 845 } 846 847 static __inline void 848 iwn_nic_unlock(struct iwn_softc *sc) 849 { 850 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 851 } 852 853 static __inline uint32_t 854 iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 855 { 856 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 857 IWN_BARRIER_READ_WRITE(sc); 858 return IWN_READ(sc, IWN_PRPH_RDATA); 859 } 860 861 static __inline void 862 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 863 { 864 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 865 IWN_BARRIER_WRITE(sc); 866 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 867 } 868 869 static __inline void 870 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 871 { 872 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 873 } 874 875 static __inline void 876 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 877 { 878 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 879 } 880 881 static __inline void 882 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 883 const uint32_t *data, int count) 884 { 885 for (; count > 0; count--, data++, addr += 4) 886 iwn_prph_write(sc, addr, *data); 887 } 888 889 static __inline uint32_t 890 iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 891 { 892 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 893 IWN_BARRIER_READ_WRITE(sc); 894 return IWN_READ(sc, IWN_MEM_RDATA); 895 } 896 897 static __inline void 898 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 899 { 900 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 901 IWN_BARRIER_WRITE(sc); 902 IWN_WRITE(sc, IWN_MEM_WDATA, data); 903 } 904 905 static __inline void 906 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 907 { 908 uint32_t tmp; 909 910 tmp = iwn_mem_read(sc, addr & ~3); 911 if (addr & 3) 912 tmp = (tmp & 0x0000ffff) | data << 16; 913 else 914 tmp = (tmp & 0xffff0000) | data; 915 iwn_mem_write(sc, addr & ~3, tmp); 916 } 917 918 static __inline void 919 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 920 int count) 921 { 922 for (; count > 0; count--, addr += 4) 923 *data++ = iwn_mem_read(sc, addr); 924 } 925 926 static __inline void 927 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 928 int count) 929 { 930 for (; count > 0; count--, addr += 4) 931 iwn_mem_write(sc, addr, val); 932 } 933 934 int 935 iwn_eeprom_lock(struct iwn_softc *sc) 936 { 937 int i, ntries; 938 939 for (i = 0; i < 100; i++) { 940 /* Request exclusive access to EEPROM. */ 941 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 942 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 943 944 /* Spin until we actually get the lock. */ 945 for (ntries = 0; ntries < 100; ntries++) { 946 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 947 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 948 return 0; 949 DELAY(10); 950 } 951 } 952 return ETIMEDOUT; 953 } 954 955 static __inline void 956 iwn_eeprom_unlock(struct iwn_softc *sc) 957 { 958 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 959 } 960 961 /* 962 * Initialize access by host to One Time Programmable ROM. 963 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 964 */ 965 int 966 iwn_init_otprom(struct iwn_softc *sc) 967 { 968 uint16_t prev, base, next; 969 int count, error; 970 971 /* Wait for clock stabilization before accessing prph. */ 972 if ((error = iwn_clock_wait(sc)) != 0) 973 return error; 974 975 if ((error = iwn_nic_lock(sc)) != 0) 976 return error; 977 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 978 DELAY(5); 979 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 980 iwn_nic_unlock(sc); 981 982 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 983 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 984 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 985 IWN_RESET_LINK_PWR_MGMT_DIS); 986 } 987 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 988 /* Clear ECC status. */ 989 IWN_SETBITS(sc, IWN_OTP_GP, 990 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 991 992 /* 993 * Find the block before last block (contains the EEPROM image) 994 * for HW without OTP shadow RAM. 995 */ 996 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 997 /* Switch to absolute addressing mode. */ 998 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 999 base = 0; 1000 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1001 error = iwn_read_prom_data(sc, base, &next, 2); 1002 if (error != 0) 1003 return error; 1004 if (next == 0) /* End of linked-list. */ 1005 break; 1006 prev = base; 1007 base = letoh16(next); 1008 } 1009 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1010 return EIO; 1011 /* Skip "next" word. */ 1012 sc->prom_base = prev + 1; 1013 } 1014 return 0; 1015 } 1016 1017 int 1018 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1019 { 1020 uint8_t *out = data; 1021 uint32_t val, tmp; 1022 int ntries; 1023 1024 addr += sc->prom_base; 1025 for (; count > 0; count -= 2, addr++) { 1026 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1027 for (ntries = 0; ntries < 10; ntries++) { 1028 val = IWN_READ(sc, IWN_EEPROM); 1029 if (val & IWN_EEPROM_READ_VALID) 1030 break; 1031 DELAY(5); 1032 } 1033 if (ntries == 10) { 1034 printf("%s: timeout reading ROM at 0x%x\n", 1035 sc->sc_dev.dv_xname, addr); 1036 return ETIMEDOUT; 1037 } 1038 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1039 /* OTPROM, check for ECC errors. */ 1040 tmp = IWN_READ(sc, IWN_OTP_GP); 1041 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1042 printf("%s: OTPROM ECC error at 0x%x\n", 1043 sc->sc_dev.dv_xname, addr); 1044 return EIO; 1045 } 1046 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1047 /* Correctable ECC error, clear bit. */ 1048 IWN_SETBITS(sc, IWN_OTP_GP, 1049 IWN_OTP_GP_ECC_CORR_STTS); 1050 } 1051 } 1052 *out++ = val >> 16; 1053 if (count > 1) 1054 *out++ = val >> 24; 1055 } 1056 return 0; 1057 } 1058 1059 int 1060 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap, 1061 bus_size_t size, bus_size_t alignment) 1062 { 1063 int nsegs, error; 1064 1065 dma->tag = tag; 1066 dma->size = size; 1067 1068 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT, 1069 &dma->map); 1070 if (error != 0) 1071 goto fail; 1072 1073 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs, 1074 BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1075 if (error != 0) 1076 goto fail; 1077 1078 error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr, 1079 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1080 if (error != 0) 1081 goto fail; 1082 1083 error = bus_dmamap_load_raw(tag, dma->map, &dma->seg, 1, size, 1084 BUS_DMA_NOWAIT); 1085 if (error != 0) 1086 goto fail; 1087 1088 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 1089 1090 dma->paddr = dma->map->dm_segs[0].ds_addr; 1091 if (kvap != NULL) 1092 *kvap = dma->vaddr; 1093 1094 return 0; 1095 1096 fail: iwn_dma_contig_free(dma); 1097 return error; 1098 } 1099 1100 void 1101 iwn_dma_contig_free(struct iwn_dma_info *dma) 1102 { 1103 if (dma->map != NULL) { 1104 if (dma->vaddr != NULL) { 1105 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size, 1106 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1107 bus_dmamap_unload(dma->tag, dma->map); 1108 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size); 1109 bus_dmamem_free(dma->tag, &dma->seg, 1); 1110 dma->vaddr = NULL; 1111 } 1112 bus_dmamap_destroy(dma->tag, dma->map); 1113 dma->map = NULL; 1114 } 1115 } 1116 1117 int 1118 iwn_alloc_sched(struct iwn_softc *sc) 1119 { 1120 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1121 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 1122 (void **)&sc->sched, sc->schedsz, 1024); 1123 } 1124 1125 void 1126 iwn_free_sched(struct iwn_softc *sc) 1127 { 1128 iwn_dma_contig_free(&sc->sched_dma); 1129 } 1130 1131 int 1132 iwn_alloc_kw(struct iwn_softc *sc) 1133 { 1134 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1135 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096, 1136 4096); 1137 } 1138 1139 void 1140 iwn_free_kw(struct iwn_softc *sc) 1141 { 1142 iwn_dma_contig_free(&sc->kw_dma); 1143 } 1144 1145 int 1146 iwn_alloc_ict(struct iwn_softc *sc) 1147 { 1148 /* ICT table must be aligned on a 4KB boundary. */ 1149 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 1150 (void **)&sc->ict, IWN_ICT_SIZE, 4096); 1151 } 1152 1153 void 1154 iwn_free_ict(struct iwn_softc *sc) 1155 { 1156 iwn_dma_contig_free(&sc->ict_dma); 1157 } 1158 1159 int 1160 iwn_alloc_fwmem(struct iwn_softc *sc) 1161 { 1162 /* Must be aligned on a 16-byte boundary. */ 1163 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL, 1164 sc->fwsz, 16); 1165 } 1166 1167 void 1168 iwn_free_fwmem(struct iwn_softc *sc) 1169 { 1170 iwn_dma_contig_free(&sc->fw_dma); 1171 } 1172 1173 int 1174 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1175 { 1176 bus_size_t size; 1177 int i, error; 1178 1179 ring->cur = 0; 1180 1181 /* Allocate RX descriptors (256-byte aligned). */ 1182 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1183 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1184 (void **)&ring->desc, size, 256); 1185 if (error != 0) { 1186 printf("%s: could not allocate RX ring DMA memory\n", 1187 sc->sc_dev.dv_xname); 1188 goto fail; 1189 } 1190 1191 /* Allocate RX status area (16-byte aligned). */ 1192 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 1193 (void **)&ring->stat, sizeof (struct iwn_rx_status), 16); 1194 if (error != 0) { 1195 printf("%s: could not allocate RX status DMA memory\n", 1196 sc->sc_dev.dv_xname); 1197 goto fail; 1198 } 1199 1200 /* 1201 * Allocate and map RX buffers. 1202 */ 1203 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1204 struct iwn_rx_data *data = &ring->data[i]; 1205 1206 error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1, 1207 IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1208 &data->map); 1209 if (error != 0) { 1210 printf("%s: could not create RX buf DMA map\n", 1211 sc->sc_dev.dv_xname); 1212 goto fail; 1213 } 1214 1215 data->m = MCLGETL(NULL, M_DONTWAIT, IWN_RBUF_SIZE); 1216 if (data->m == NULL) { 1217 printf("%s: could not allocate RX mbuf\n", 1218 sc->sc_dev.dv_xname); 1219 error = ENOBUFS; 1220 goto fail; 1221 } 1222 1223 error = bus_dmamap_load(sc->sc_dmat, data->map, 1224 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 1225 BUS_DMA_NOWAIT | BUS_DMA_READ); 1226 if (error != 0) { 1227 printf("%s: can't map mbuf (error %d)\n", 1228 sc->sc_dev.dv_xname, error); 1229 goto fail; 1230 } 1231 1232 /* Set physical address of RX buffer (256-byte aligned). */ 1233 ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8); 1234 } 1235 1236 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size, 1237 BUS_DMASYNC_PREWRITE); 1238 1239 return 0; 1240 1241 fail: iwn_free_rx_ring(sc, ring); 1242 return error; 1243 } 1244 1245 void 1246 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1247 { 1248 int ntries; 1249 1250 if (iwn_nic_lock(sc) == 0) { 1251 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1252 for (ntries = 0; ntries < 1000; ntries++) { 1253 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1254 IWN_FH_RX_STATUS_IDLE) 1255 break; 1256 DELAY(10); 1257 } 1258 iwn_nic_unlock(sc); 1259 } 1260 ring->cur = 0; 1261 sc->last_rx_valid = 0; 1262 } 1263 1264 void 1265 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1266 { 1267 int i; 1268 1269 iwn_dma_contig_free(&ring->desc_dma); 1270 iwn_dma_contig_free(&ring->stat_dma); 1271 1272 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1273 struct iwn_rx_data *data = &ring->data[i]; 1274 1275 if (data->m != NULL) { 1276 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1277 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1278 bus_dmamap_unload(sc->sc_dmat, data->map); 1279 m_freem(data->m); 1280 } 1281 if (data->map != NULL) 1282 bus_dmamap_destroy(sc->sc_dmat, data->map); 1283 } 1284 } 1285 1286 int 1287 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1288 { 1289 bus_addr_t paddr; 1290 bus_size_t size; 1291 int i, error; 1292 1293 ring->qid = qid; 1294 ring->queued = 0; 1295 ring->cur = 0; 1296 1297 /* Allocate TX descriptors (256-byte aligned). */ 1298 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1299 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, 1300 (void **)&ring->desc, size, 256); 1301 if (error != 0) { 1302 printf("%s: could not allocate TX ring DMA memory\n", 1303 sc->sc_dev.dv_xname); 1304 goto fail; 1305 } 1306 1307 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1308 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, 1309 (void **)&ring->cmd, size, 4); 1310 if (error != 0) { 1311 printf("%s: could not allocate TX cmd DMA memory\n", 1312 sc->sc_dev.dv_xname); 1313 goto fail; 1314 } 1315 1316 paddr = ring->cmd_dma.paddr; 1317 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1318 struct iwn_tx_data *data = &ring->data[i]; 1319 1320 data->cmd_paddr = paddr; 1321 data->scratch_paddr = paddr + 12; 1322 paddr += sizeof (struct iwn_tx_cmd); 1323 1324 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1325 IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1326 &data->map); 1327 if (error != 0) { 1328 printf("%s: could not create TX buf DMA map\n", 1329 sc->sc_dev.dv_xname); 1330 goto fail; 1331 } 1332 } 1333 return 0; 1334 1335 fail: iwn_free_tx_ring(sc, ring); 1336 return error; 1337 } 1338 1339 void 1340 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1341 { 1342 int i; 1343 1344 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1345 struct iwn_tx_data *data = &ring->data[i]; 1346 1347 if (data->m != NULL) { 1348 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1349 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1350 bus_dmamap_unload(sc->sc_dmat, data->map); 1351 m_freem(data->m); 1352 data->m = NULL; 1353 } 1354 } 1355 /* Clear TX descriptors. */ 1356 memset(ring->desc, 0, ring->desc_dma.size); 1357 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, 1358 ring->desc_dma.size, BUS_DMASYNC_PREWRITE); 1359 sc->qfullmsk &= ~(1 << ring->qid); 1360 ring->queued = 0; 1361 ring->cur = 0; 1362 } 1363 1364 void 1365 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1366 { 1367 int i; 1368 1369 iwn_dma_contig_free(&ring->desc_dma); 1370 iwn_dma_contig_free(&ring->cmd_dma); 1371 1372 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1373 struct iwn_tx_data *data = &ring->data[i]; 1374 1375 if (data->m != NULL) { 1376 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1377 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1378 bus_dmamap_unload(sc->sc_dmat, data->map); 1379 m_freem(data->m); 1380 } 1381 if (data->map != NULL) 1382 bus_dmamap_destroy(sc->sc_dmat, data->map); 1383 } 1384 } 1385 1386 void 1387 iwn5000_ict_reset(struct iwn_softc *sc) 1388 { 1389 /* Disable interrupts. */ 1390 IWN_WRITE(sc, IWN_INT_MASK, 0); 1391 1392 /* Reset ICT table. */ 1393 memset(sc->ict, 0, IWN_ICT_SIZE); 1394 sc->ict_cur = 0; 1395 1396 /* Set physical address of ICT table (4KB aligned). */ 1397 DPRINTF(("enabling ICT\n")); 1398 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1399 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1400 1401 /* Enable periodic RX interrupt. */ 1402 sc->int_mask |= IWN_INT_RX_PERIODIC; 1403 /* Switch to ICT interrupt mode in driver. */ 1404 sc->sc_flags |= IWN_FLAG_USE_ICT; 1405 1406 /* Re-enable interrupts. */ 1407 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1408 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1409 } 1410 1411 int 1412 iwn_read_eeprom(struct iwn_softc *sc) 1413 { 1414 struct iwn_ops *ops = &sc->ops; 1415 struct ieee80211com *ic = &sc->sc_ic; 1416 uint16_t val; 1417 int error; 1418 1419 /* Check whether adapter has an EEPROM or an OTPROM. */ 1420 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1421 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1422 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1423 DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? 1424 "OTPROM" : "EEPROM")); 1425 1426 /* Adapter has to be powered on for EEPROM access to work. */ 1427 if ((error = iwn_apm_init(sc)) != 0) { 1428 printf("%s: could not power ON adapter\n", 1429 sc->sc_dev.dv_xname); 1430 return error; 1431 } 1432 1433 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1434 printf("%s: bad ROM signature\n", sc->sc_dev.dv_xname); 1435 return EIO; 1436 } 1437 if ((error = iwn_eeprom_lock(sc)) != 0) { 1438 printf("%s: could not lock ROM (error=%d)\n", 1439 sc->sc_dev.dv_xname, error); 1440 return error; 1441 } 1442 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1443 if ((error = iwn_init_otprom(sc)) != 0) { 1444 printf("%s: could not initialize OTPROM\n", 1445 sc->sc_dev.dv_xname); 1446 return error; 1447 } 1448 } 1449 1450 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1451 DPRINTF(("SKU capabilities=0x%04x\n", letoh16(val))); 1452 /* Check if HT support is bonded out. */ 1453 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1454 sc->sc_flags |= IWN_FLAG_HAS_11N; 1455 1456 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1457 sc->rfcfg = letoh16(val); 1458 DPRINTF(("radio config=0x%04x\n", sc->rfcfg)); 1459 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1460 if (sc->txchainmask == 0) 1461 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1462 if (sc->rxchainmask == 0) 1463 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1464 1465 /* Read MAC address. */ 1466 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6); 1467 1468 /* Read adapter-specific information from EEPROM. */ 1469 ops->read_eeprom(sc); 1470 1471 iwn_apm_stop(sc); /* Power OFF adapter. */ 1472 1473 iwn_eeprom_unlock(sc); 1474 return 0; 1475 } 1476 1477 void 1478 iwn4965_read_eeprom(struct iwn_softc *sc) 1479 { 1480 uint32_t addr; 1481 uint16_t val; 1482 int i; 1483 1484 /* Read regulatory domain (4 ASCII characters). */ 1485 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1486 1487 /* Read the list of authorized channels. */ 1488 for (i = 0; i < 7; i++) { 1489 addr = iwn4965_regulatory_bands[i]; 1490 iwn_read_eeprom_channels(sc, i, addr); 1491 } 1492 1493 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1494 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1495 sc->maxpwr2GHz = val & 0xff; 1496 sc->maxpwr5GHz = val >> 8; 1497 /* Check that EEPROM values are within valid range. */ 1498 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1499 sc->maxpwr5GHz = 38; 1500 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1501 sc->maxpwr2GHz = 38; 1502 DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz)); 1503 1504 /* Read samples for each TX power group. */ 1505 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1506 sizeof sc->bands); 1507 1508 /* Read voltage at which samples were taken. */ 1509 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1510 sc->eeprom_voltage = (int16_t)letoh16(val); 1511 DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage)); 1512 1513 #ifdef IWN_DEBUG 1514 /* Print samples. */ 1515 if (iwn_debug > 0) { 1516 for (i = 0; i < IWN_NBANDS; i++) 1517 iwn4965_print_power_group(sc, i); 1518 } 1519 #endif 1520 } 1521 1522 #ifdef IWN_DEBUG 1523 void 1524 iwn4965_print_power_group(struct iwn_softc *sc, int i) 1525 { 1526 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1527 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1528 int j, c; 1529 1530 printf("===band %d===\n", i); 1531 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1532 printf("chan1 num=%d\n", chans[0].num); 1533 for (c = 0; c < 2; c++) { 1534 for (j = 0; j < IWN_NSAMPLES; j++) { 1535 printf("chain %d, sample %d: temp=%d gain=%d " 1536 "power=%d pa_det=%d\n", c, j, 1537 chans[0].samples[c][j].temp, 1538 chans[0].samples[c][j].gain, 1539 chans[0].samples[c][j].power, 1540 chans[0].samples[c][j].pa_det); 1541 } 1542 } 1543 printf("chan2 num=%d\n", chans[1].num); 1544 for (c = 0; c < 2; c++) { 1545 for (j = 0; j < IWN_NSAMPLES; j++) { 1546 printf("chain %d, sample %d: temp=%d gain=%d " 1547 "power=%d pa_det=%d\n", c, j, 1548 chans[1].samples[c][j].temp, 1549 chans[1].samples[c][j].gain, 1550 chans[1].samples[c][j].power, 1551 chans[1].samples[c][j].pa_det); 1552 } 1553 } 1554 } 1555 #endif 1556 1557 void 1558 iwn5000_read_eeprom(struct iwn_softc *sc) 1559 { 1560 struct iwn5000_eeprom_calib_hdr hdr; 1561 int32_t volt; 1562 uint32_t base, addr; 1563 uint16_t val; 1564 int i; 1565 1566 /* Read regulatory domain (4 ASCII characters). */ 1567 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1568 base = letoh16(val); 1569 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1570 sc->eeprom_domain, 4); 1571 1572 /* Read the list of authorized channels. */ 1573 for (i = 0; i < 7; i++) { 1574 addr = base + iwn5000_regulatory_bands[i]; 1575 iwn_read_eeprom_channels(sc, i, addr); 1576 } 1577 1578 /* Read enhanced TX power information for 6000 Series. */ 1579 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1580 iwn_read_eeprom_enhinfo(sc); 1581 1582 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1583 base = letoh16(val); 1584 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1585 DPRINTF(("calib version=%u pa type=%u voltage=%u\n", 1586 hdr.version, hdr.pa_type, letoh16(hdr.volt))); 1587 sc->calib_ver = hdr.version; 1588 1589 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 1590 sc->hw_type == IWN_HW_REV_TYPE_2000 || 1591 sc->hw_type == IWN_HW_REV_TYPE_135 || 1592 sc->hw_type == IWN_HW_REV_TYPE_105) { 1593 sc->eeprom_voltage = letoh16(hdr.volt); 1594 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1595 sc->eeprom_temp = letoh16(val); 1596 iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2); 1597 sc->eeprom_rawtemp = letoh16(val); 1598 } 1599 1600 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1601 /* Compute temperature offset. */ 1602 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1603 sc->eeprom_temp = letoh16(val); 1604 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1605 volt = letoh16(val); 1606 sc->temp_off = sc->eeprom_temp - (volt / -5); 1607 DPRINTF(("temp=%d volt=%d offset=%dK\n", 1608 sc->eeprom_temp, volt, sc->temp_off)); 1609 } else { 1610 /* Read crystal calibration. */ 1611 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1612 &sc->eeprom_crystal, sizeof (uint32_t)); 1613 DPRINTF(("crystal calibration 0x%08x\n", 1614 letoh32(sc->eeprom_crystal))); 1615 } 1616 } 1617 1618 void 1619 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 1620 { 1621 struct ieee80211com *ic = &sc->sc_ic; 1622 const struct iwn_chan_band *band = &iwn_bands[n]; 1623 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; 1624 uint8_t chan; 1625 int i; 1626 1627 iwn_read_prom_data(sc, addr, channels, 1628 band->nchan * sizeof (struct iwn_eeprom_chan)); 1629 1630 for (i = 0; i < band->nchan; i++) { 1631 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) 1632 continue; 1633 1634 chan = band->chan[i]; 1635 1636 if (n == 0) { /* 2GHz band */ 1637 ic->ic_channels[chan].ic_freq = 1638 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); 1639 ic->ic_channels[chan].ic_flags = 1640 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | 1641 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 1642 1643 } else if (n < 5) { /* 5GHz band */ 1644 /* 1645 * Some adapters support channels 7, 8, 11 and 12 1646 * both in the 2GHz and 4.9GHz bands. 1647 * Because of limitations in our net80211 layer, 1648 * we don't support them in the 4.9GHz band. 1649 */ 1650 if (chan <= 14) 1651 continue; 1652 1653 ic->ic_channels[chan].ic_freq = 1654 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ); 1655 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A; 1656 /* We have at least one valid 5GHz channel. */ 1657 sc->sc_flags |= IWN_FLAG_HAS_5GHZ; 1658 } else { /* 40 MHz */ 1659 sc->maxpwr40[chan] = channels[i].maxpwr; 1660 ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_40MHZ; 1661 } 1662 1663 if (n < 5) { 1664 /* Is active scan allowed on this channel? */ 1665 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) { 1666 ic->ic_channels[chan].ic_flags |= 1667 IEEE80211_CHAN_PASSIVE; 1668 } 1669 1670 /* Save maximum allowed TX power for this channel. */ 1671 sc->maxpwr[chan] = channels[i].maxpwr; 1672 1673 if (sc->sc_flags & IWN_FLAG_HAS_11N) 1674 ic->ic_channels[chan].ic_flags |= 1675 IEEE80211_CHAN_HT; 1676 } 1677 1678 DPRINTF(("adding chan %d flags=0x%x maxpwr=%d maxpwr40=%d\n", 1679 chan, channels[i].flags, sc->maxpwr[chan], 1680 sc->maxpwr40[chan])); 1681 } 1682 } 1683 1684 void 1685 iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 1686 { 1687 struct iwn_eeprom_enhinfo enhinfo[35]; 1688 uint16_t val, base; 1689 int8_t maxpwr; 1690 int i; 1691 1692 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1693 base = letoh16(val); 1694 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 1695 enhinfo, sizeof enhinfo); 1696 1697 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr); 1698 for (i = 0; i < nitems(enhinfo); i++) { 1699 if ((enhinfo[i].flags & IWN_TXP_VALID) == 0) 1700 continue; /* Skip invalid entries. */ 1701 1702 maxpwr = 0; 1703 if (sc->txchainmask & IWN_ANT_A) 1704 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 1705 if (sc->txchainmask & IWN_ANT_B) 1706 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 1707 if (sc->txchainmask & IWN_ANT_C) 1708 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 1709 if (sc->ntxchains == 2) 1710 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 1711 else if (sc->ntxchains == 3) 1712 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 1713 maxpwr /= 2; /* Convert half-dBm to dBm. */ 1714 1715 DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr)); 1716 sc->enh_maxpwr[i] = maxpwr; 1717 } 1718 } 1719 1720 struct ieee80211_node * 1721 iwn_node_alloc(struct ieee80211com *ic) 1722 { 1723 return malloc(sizeof (struct iwn_node), M_DEVBUF, M_NOWAIT | M_ZERO); 1724 } 1725 1726 void 1727 iwn_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew) 1728 { 1729 struct iwn_softc *sc = ic->ic_if.if_softc; 1730 struct iwn_node *wn = (void *)ni; 1731 uint8_t rate; 1732 int ridx, i; 1733 1734 if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) 1735 ieee80211_amrr_node_init(&sc->amrr, &wn->amn); 1736 1737 /* Start at lowest available bit-rate, AMRR/MiRA will raise. */ 1738 ni->ni_txrate = 0; 1739 ni->ni_txmcs = 0; 1740 1741 for (i = 0; i < ni->ni_rates.rs_nrates; i++) { 1742 rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL; 1743 /* Map 802.11 rate to HW rate index. */ 1744 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 1745 if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID && 1746 iwn_rates[ridx].rate == rate) 1747 break; 1748 } 1749 wn->ridx[i] = ridx; 1750 } 1751 } 1752 1753 int 1754 iwn_media_change(struct ifnet *ifp) 1755 { 1756 struct iwn_softc *sc = ifp->if_softc; 1757 struct ieee80211com *ic = &sc->sc_ic; 1758 uint8_t rate, ridx; 1759 int error; 1760 1761 error = ieee80211_media_change(ifp); 1762 if (error != ENETRESET) 1763 return error; 1764 1765 if (ic->ic_fixed_mcs != -1) 1766 sc->fixed_ridx = iwn_mcs2ridx[ic->ic_fixed_mcs]; 1767 if (ic->ic_fixed_rate != -1) { 1768 rate = ic->ic_sup_rates[ic->ic_curmode]. 1769 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; 1770 /* Map 802.11 rate to HW rate index. */ 1771 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) 1772 if (iwn_rates[ridx].plcp != IWN_PLCP_INVALID && 1773 iwn_rates[ridx].rate == rate) 1774 break; 1775 sc->fixed_ridx = ridx; 1776 } 1777 1778 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 1779 (IFF_UP | IFF_RUNNING)) { 1780 iwn_stop(ifp); 1781 error = iwn_init(ifp); 1782 } 1783 return error; 1784 } 1785 1786 int 1787 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 1788 { 1789 struct ifnet *ifp = &ic->ic_if; 1790 struct iwn_softc *sc = ifp->if_softc; 1791 struct ieee80211_node *ni = ic->ic_bss; 1792 int error; 1793 1794 if (ic->ic_state == IEEE80211_S_RUN) { 1795 if (nstate == IEEE80211_S_SCAN) { 1796 /* 1797 * During RUN->SCAN we don't call sc_newstate() so 1798 * we must stop A-MPDU Tx ourselves in this case. 1799 */ 1800 ieee80211_stop_ampdu_tx(ic, ni, -1); 1801 ieee80211_ba_del(ni); 1802 } 1803 timeout_del(&sc->calib_to); 1804 sc->calib.state = IWN_CALIB_STATE_INIT; 1805 if (sc->sc_flags & IWN_FLAG_BGSCAN) 1806 iwn_scan_abort(sc); 1807 } 1808 1809 if (ic->ic_state == IEEE80211_S_SCAN) { 1810 if (nstate == IEEE80211_S_SCAN) { 1811 if (sc->sc_flags & IWN_FLAG_SCANNING) 1812 return 0; 1813 } else 1814 sc->sc_flags &= ~IWN_FLAG_SCANNING; 1815 /* Turn LED off when leaving scan state. */ 1816 iwn_set_led(sc, IWN_LED_LINK, 1, 0); 1817 } 1818 1819 if (ic->ic_state >= IEEE80211_S_ASSOC && 1820 nstate <= IEEE80211_S_ASSOC) { 1821 /* Reset state to handle re- and disassociations. */ 1822 sc->rxon.associd = 0; 1823 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS); 1824 sc->rxon.flags &= ~htole32(IWN_RXON_HT_CHANMODE_MIXED2040 | 1825 IWN_RXON_HT_CHANMODE_PURE40 | IWN_RXON_HT_HT40MINUS); 1826 sc->calib.state = IWN_CALIB_STATE_INIT; 1827 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 1828 if (error != 0) 1829 printf("%s: RXON command failed\n", 1830 sc->sc_dev.dv_xname); 1831 } 1832 1833 switch (nstate) { 1834 case IEEE80211_S_SCAN: 1835 /* Make the link LED blink while we're scanning. */ 1836 iwn_set_led(sc, IWN_LED_LINK, 10, 10); 1837 1838 if ((sc->sc_flags & IWN_FLAG_BGSCAN) == 0) { 1839 ieee80211_set_link_state(ic, LINK_STATE_DOWN); 1840 ieee80211_node_cleanup(ic, ic->ic_bss); 1841 } 1842 if (ifp->if_flags & IFF_DEBUG) 1843 printf("%s: %s -> %s\n", ifp->if_xname, 1844 ieee80211_state_name[ic->ic_state], 1845 ieee80211_state_name[nstate]); 1846 ic->ic_state = nstate; 1847 if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ, 0)) != 0) { 1848 printf("%s: could not initiate scan\n", 1849 sc->sc_dev.dv_xname); 1850 } 1851 return error; 1852 1853 case IEEE80211_S_ASSOC: 1854 if (ic->ic_state != IEEE80211_S_RUN) 1855 break; 1856 /* FALLTHROUGH */ 1857 case IEEE80211_S_AUTH: 1858 if ((error = iwn_auth(sc, arg)) != 0) { 1859 printf("%s: could not move to auth state\n", 1860 sc->sc_dev.dv_xname); 1861 return error; 1862 } 1863 break; 1864 1865 case IEEE80211_S_RUN: 1866 if ((error = iwn_run(sc)) != 0) { 1867 printf("%s: could not move to run state\n", 1868 sc->sc_dev.dv_xname); 1869 return error; 1870 } 1871 break; 1872 1873 case IEEE80211_S_INIT: 1874 sc->calib.state = IWN_CALIB_STATE_INIT; 1875 break; 1876 } 1877 1878 return sc->sc_newstate(ic, nstate, arg); 1879 } 1880 1881 void 1882 iwn_iter_func(void *arg, struct ieee80211_node *ni) 1883 { 1884 struct iwn_softc *sc = arg; 1885 struct iwn_node *wn = (void *)ni; 1886 1887 if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) { 1888 int old_txrate = ni->ni_txrate; 1889 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn); 1890 if (old_txrate != ni->ni_txrate) 1891 iwn_set_link_quality(sc, ni); 1892 } 1893 } 1894 1895 void 1896 iwn_calib_timeout(void *arg) 1897 { 1898 struct iwn_softc *sc = arg; 1899 struct ieee80211com *ic = &sc->sc_ic; 1900 int s; 1901 1902 s = splnet(); 1903 if (ic->ic_fixed_rate == -1) { 1904 if (ic->ic_opmode == IEEE80211_M_STA) 1905 iwn_iter_func(sc, ic->ic_bss); 1906 else 1907 ieee80211_iterate_nodes(ic, iwn_iter_func, sc); 1908 } 1909 /* Force automatic TX power calibration every 60 secs. */ 1910 if (++sc->calib_cnt >= 120) { 1911 uint32_t flags = 0; 1912 1913 DPRINTFN(2, ("sending request for statistics\n")); 1914 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 1915 sizeof flags, 1); 1916 sc->calib_cnt = 0; 1917 } 1918 splx(s); 1919 1920 /* Automatic rate control triggered every 500ms. */ 1921 timeout_add_msec(&sc->calib_to, 500); 1922 } 1923 1924 int 1925 iwn_ccmp_decap(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 1926 { 1927 struct ieee80211com *ic = &sc->sc_ic; 1928 struct ieee80211_key *k = &ni->ni_pairwise_key; 1929 struct ieee80211_frame *wh; 1930 uint64_t pn, *prsc; 1931 uint8_t *ivp; 1932 uint8_t tid; 1933 int hdrlen, hasqos; 1934 1935 wh = mtod(m, struct ieee80211_frame *); 1936 hdrlen = ieee80211_get_hdrlen(wh); 1937 ivp = (uint8_t *)wh + hdrlen; 1938 1939 /* Check that ExtIV bit is set. */ 1940 if (!(ivp[3] & IEEE80211_WEP_EXTIV)) { 1941 DPRINTF(("CCMP decap ExtIV not set\n")); 1942 return 1; 1943 } 1944 hasqos = ieee80211_has_qos(wh); 1945 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0; 1946 prsc = &k->k_rsc[tid]; 1947 1948 /* Extract the 48-bit PN from the CCMP header. */ 1949 pn = (uint64_t)ivp[0] | 1950 (uint64_t)ivp[1] << 8 | 1951 (uint64_t)ivp[4] << 16 | 1952 (uint64_t)ivp[5] << 24 | 1953 (uint64_t)ivp[6] << 32 | 1954 (uint64_t)ivp[7] << 40; 1955 if (pn <= *prsc) { 1956 DPRINTF(("CCMP replayed\n")); 1957 ic->ic_stats.is_ccmp_replays++; 1958 return 1; 1959 } 1960 /* Last seen packet number is updated in ieee80211_inputm(). */ 1961 1962 /* Strip MIC. IV will be stripped by ieee80211_inputm(). */ 1963 m_adj(m, -IEEE80211_CCMP_MICLEN); 1964 return 0; 1965 } 1966 1967 /* 1968 * Process an RX_PHY firmware notification. This is usually immediately 1969 * followed by an MPDU_RX_DONE notification. 1970 */ 1971 void 1972 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1973 struct iwn_rx_data *data) 1974 { 1975 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 1976 1977 DPRINTFN(2, ("received PHY stats\n")); 1978 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 1979 sizeof (*stat), BUS_DMASYNC_POSTREAD); 1980 1981 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 1982 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 1983 sc->last_rx_valid = IWN_LAST_RX_VALID; 1984 /* 1985 * The firmware does not send separate RX_PHY 1986 * notifications for A-MPDU subframes. 1987 */ 1988 if (stat->flags & htole16(IWN_STAT_FLAG_AGG)) 1989 sc->last_rx_valid |= IWN_LAST_RX_AMPDU; 1990 } 1991 1992 /* 1993 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 1994 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 1995 */ 1996 void 1997 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 1998 struct iwn_rx_data *data, struct mbuf_list *ml) 1999 { 2000 struct iwn_ops *ops = &sc->ops; 2001 struct ieee80211com *ic = &sc->sc_ic; 2002 struct ifnet *ifp = &ic->ic_if; 2003 struct iwn_rx_ring *ring = &sc->rxq; 2004 struct ieee80211_frame *wh; 2005 struct ieee80211_rxinfo rxi; 2006 struct ieee80211_node *ni; 2007 struct mbuf *m, *m1; 2008 struct iwn_rx_stat *stat; 2009 caddr_t head; 2010 uint32_t flags; 2011 int error, len, rssi; 2012 uint16_t chan; 2013 2014 if (desc->type == IWN_MPDU_RX_DONE) { 2015 /* Check for prior RX_PHY notification. */ 2016 if (!sc->last_rx_valid) { 2017 DPRINTF(("missing RX_PHY\n")); 2018 return; 2019 } 2020 sc->last_rx_valid &= ~IWN_LAST_RX_VALID; 2021 stat = &sc->last_rx_stat; 2022 if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) && 2023 (stat->flags & htole16(IWN_STAT_FLAG_AGG)) == 0) { 2024 DPRINTF(("missing RX_PHY (expecting A-MPDU)\n")); 2025 return; 2026 } 2027 if ((sc->last_rx_valid & IWN_LAST_RX_AMPDU) == 0 && 2028 (stat->flags & htole16(IWN_STAT_FLAG_AGG))) { 2029 DPRINTF(("missing RX_PHY (unexpected A-MPDU)\n")); 2030 return; 2031 } 2032 } else 2033 stat = (struct iwn_rx_stat *)(desc + 1); 2034 2035 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE, 2036 BUS_DMASYNC_POSTREAD); 2037 2038 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2039 printf("%s: invalid RX statistic header\n", 2040 sc->sc_dev.dv_xname); 2041 return; 2042 } 2043 if (desc->type == IWN_MPDU_RX_DONE) { 2044 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2045 head = (caddr_t)(mpdu + 1); 2046 len = letoh16(mpdu->len); 2047 } else { 2048 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2049 len = letoh16(stat->len); 2050 } 2051 2052 flags = letoh32(*(uint32_t *)(head + len)); 2053 2054 /* Discard frames with a bad FCS early. */ 2055 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2056 DPRINTFN(2, ("RX flags error %x\n", flags)); 2057 ifp->if_ierrors++; 2058 return; 2059 } 2060 /* Discard frames that are too short. */ 2061 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 2062 /* Allow control frames in monitor mode. */ 2063 if (len < sizeof (struct ieee80211_frame_cts)) { 2064 DPRINTF(("frame too short: %d\n", len)); 2065 ic->ic_stats.is_rx_tooshort++; 2066 ifp->if_ierrors++; 2067 return; 2068 } 2069 } else if (len < sizeof (*wh)) { 2070 DPRINTF(("frame too short: %d\n", len)); 2071 ic->ic_stats.is_rx_tooshort++; 2072 ifp->if_ierrors++; 2073 return; 2074 } 2075 2076 m1 = MCLGETL(NULL, M_DONTWAIT, IWN_RBUF_SIZE); 2077 if (m1 == NULL) { 2078 ic->ic_stats.is_rx_nombuf++; 2079 ifp->if_ierrors++; 2080 return; 2081 } 2082 bus_dmamap_unload(sc->sc_dmat, data->map); 2083 2084 error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *), 2085 IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ); 2086 if (error != 0) { 2087 m_freem(m1); 2088 2089 /* Try to reload the old mbuf. */ 2090 error = bus_dmamap_load(sc->sc_dmat, data->map, 2091 mtod(data->m, void *), IWN_RBUF_SIZE, NULL, 2092 BUS_DMA_NOWAIT | BUS_DMA_READ); 2093 if (error != 0) { 2094 panic("%s: could not load old RX mbuf", 2095 sc->sc_dev.dv_xname); 2096 } 2097 /* Physical address may have changed. */ 2098 ring->desc[ring->cur] = 2099 htole32(data->map->dm_segs[0].ds_addr >> 8); 2100 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2101 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2102 BUS_DMASYNC_PREWRITE); 2103 ifp->if_ierrors++; 2104 return; 2105 } 2106 2107 m = data->m; 2108 data->m = m1; 2109 /* Update RX descriptor. */ 2110 ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8); 2111 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 2112 ring->cur * sizeof (uint32_t), sizeof (uint32_t), 2113 BUS_DMASYNC_PREWRITE); 2114 2115 /* Finalize mbuf. */ 2116 m->m_data = head; 2117 m->m_pkthdr.len = m->m_len = len; 2118 2119 /* 2120 * Grab a reference to the source node. Note that control frames are 2121 * shorter than struct ieee80211_frame but ieee80211_find_rxnode() 2122 * is being careful about control frames. 2123 */ 2124 wh = mtod(m, struct ieee80211_frame *); 2125 if (len < sizeof (*wh) && 2126 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 2127 ic->ic_stats.is_rx_tooshort++; 2128 ifp->if_ierrors++; 2129 m_freem(m); 2130 return; 2131 } 2132 ni = ieee80211_find_rxnode(ic, wh); 2133 2134 memset(&rxi, 0, sizeof(rxi)); 2135 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) 2136 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 2137 !IEEE80211_IS_MULTICAST(wh->i_addr1) && 2138 (ni->ni_flags & IEEE80211_NODE_RXPROT) && 2139 ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) { 2140 if ((flags & IWN_RX_CIPHER_MASK) != IWN_RX_CIPHER_CCMP) { 2141 ic->ic_stats.is_ccmp_dec_errs++; 2142 ifp->if_ierrors++; 2143 m_freem(m); 2144 ieee80211_release_node(ic, ni); 2145 return; 2146 } 2147 /* Check whether decryption was successful or not. */ 2148 if ((desc->type == IWN_MPDU_RX_DONE && 2149 (flags & (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) != 2150 (IWN_RX_MPDU_DEC | IWN_RX_MPDU_MIC_OK)) || 2151 (desc->type != IWN_MPDU_RX_DONE && 2152 (flags & IWN_RX_DECRYPT_MASK) != IWN_RX_DECRYPT_OK)) { 2153 DPRINTF(("CCMP decryption failed 0x%x\n", flags)); 2154 ic->ic_stats.is_ccmp_dec_errs++; 2155 ifp->if_ierrors++; 2156 m_freem(m); 2157 ieee80211_release_node(ic, ni); 2158 return; 2159 } 2160 if (iwn_ccmp_decap(sc, m, ni) != 0) { 2161 ifp->if_ierrors++; 2162 m_freem(m); 2163 ieee80211_release_node(ic, ni); 2164 return; 2165 } 2166 rxi.rxi_flags |= IEEE80211_RXI_HWDEC; 2167 } 2168 2169 rssi = ops->get_rssi(stat); 2170 2171 chan = stat->chan; 2172 if (chan > IEEE80211_CHAN_MAX) 2173 chan = IEEE80211_CHAN_MAX; 2174 2175 #if NBPFILTER > 0 2176 if (sc->sc_drvbpf != NULL) { 2177 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2178 uint16_t chan_flags; 2179 2180 tap->wr_flags = 0; 2181 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2182 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2183 tap->wr_chan_freq = htole16(ic->ic_channels[chan].ic_freq); 2184 chan_flags = ic->ic_channels[chan].ic_flags; 2185 if (ic->ic_curmode != IEEE80211_MODE_11N) 2186 chan_flags &= ~IEEE80211_CHAN_HT; 2187 tap->wr_chan_flags = htole16(chan_flags); 2188 tap->wr_dbm_antsignal = (int8_t)rssi; 2189 tap->wr_dbm_antnoise = (int8_t)sc->noise; 2190 tap->wr_tsft = stat->tstamp; 2191 if (stat->rflags & IWN_RFLAG_MCS) { 2192 tap->wr_rate = (0x80 | stat->rate); /* HT MCS index */ 2193 } else { 2194 switch (stat->rate) { 2195 /* CCK rates. */ 2196 case 10: tap->wr_rate = 2; break; 2197 case 20: tap->wr_rate = 4; break; 2198 case 55: tap->wr_rate = 11; break; 2199 case 110: tap->wr_rate = 22; break; 2200 /* OFDM rates. */ 2201 case 0xd: tap->wr_rate = 12; break; 2202 case 0xf: tap->wr_rate = 18; break; 2203 case 0x5: tap->wr_rate = 24; break; 2204 case 0x7: tap->wr_rate = 36; break; 2205 case 0x9: tap->wr_rate = 48; break; 2206 case 0xb: tap->wr_rate = 72; break; 2207 case 0x1: tap->wr_rate = 96; break; 2208 case 0x3: tap->wr_rate = 108; break; 2209 /* Unknown rate: should not happen. */ 2210 default: tap->wr_rate = 0; 2211 } 2212 } 2213 2214 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len, 2215 m, BPF_DIRECTION_IN); 2216 } 2217 #endif 2218 2219 /* Send the frame to the 802.11 layer. */ 2220 rxi.rxi_rssi = rssi; 2221 rxi.rxi_chan = chan; 2222 ieee80211_inputm(ifp, m, ni, &rxi, ml); 2223 2224 /* Node is no longer needed. */ 2225 ieee80211_release_node(ic, ni); 2226 } 2227 2228 void 2229 iwn_ra_choose(struct iwn_softc *sc, struct ieee80211_node *ni) 2230 { 2231 struct ieee80211com *ic = &sc->sc_ic; 2232 struct iwn_node *wn = (void *)ni; 2233 int old_txmcs = ni->ni_txmcs; 2234 2235 ieee80211_ra_choose(&wn->rn, ic, ni); 2236 2237 /* Update firmware's LQ retry table if RA has chosen a new MCS. */ 2238 if (ni->ni_txmcs != old_txmcs) 2239 iwn_set_link_quality(sc, ni); 2240 } 2241 2242 void 2243 iwn_ampdu_rate_control(struct iwn_softc *sc, struct ieee80211_node *ni, 2244 struct iwn_tx_ring *txq, uint16_t seq, uint16_t ssn) 2245 { 2246 struct ieee80211com *ic = &sc->sc_ic; 2247 struct iwn_node *wn = (void *)ni; 2248 int idx, end_idx; 2249 2250 /* 2251 * Update Tx rate statistics for A-MPDUs before firmware's BA window. 2252 */ 2253 idx = IWN_AGG_SSN_TO_TXQ_IDX(seq); 2254 end_idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn); 2255 while (idx != end_idx) { 2256 struct iwn_tx_data *txdata = &txq->data[idx]; 2257 if (txdata->m != NULL && txdata->ampdu_nframes > 1) { 2258 /* 2259 * We can assume that this subframe has been ACKed 2260 * because ACK failures come as single frames and 2261 * before failing an A-MPDU subframe the firmware 2262 * sends it as a single frame at least once. 2263 */ 2264 ieee80211_ra_add_stats_ht(&wn->rn, ic, ni, 2265 txdata->ampdu_txmcs, 1, 0); 2266 2267 /* Report this frame only once. */ 2268 txdata->ampdu_nframes = 0; 2269 } 2270 2271 idx = (idx + 1) % IWN_TX_RING_COUNT; 2272 } 2273 2274 iwn_ra_choose(sc, ni); 2275 } 2276 2277 void 2278 iwn_ht_single_rate_control(struct iwn_softc *sc, struct ieee80211_node *ni, 2279 uint8_t rate, uint8_t rflags, uint8_t ackfailcnt, int txfail) 2280 { 2281 struct ieee80211com *ic = &sc->sc_ic; 2282 struct iwn_node *wn = (void *)ni; 2283 int mcs = rate; 2284 const struct ieee80211_ht_rateset *rs = 2285 ieee80211_ra_get_ht_rateset(rate, 2286 ieee80211_node_supports_ht_chan40(ni), 2287 ieee80211_ra_use_ht_sgi(ni)); 2288 unsigned int retries = 0, i; 2289 2290 /* 2291 * Ignore Tx reports which don't match our last LQ command. 2292 */ 2293 if (rate != ni->ni_txmcs) { 2294 if (++wn->lq_rate_mismatch > 15) { 2295 /* Try to sync firmware with driver. */ 2296 iwn_set_link_quality(sc, ni); 2297 wn->lq_rate_mismatch = 0; 2298 } 2299 return; 2300 } 2301 2302 wn->lq_rate_mismatch = 0; 2303 2304 /* 2305 * Firmware has attempted rates in this rate set in sequence. 2306 * Retries at a basic rate are counted against the minimum MCS. 2307 */ 2308 for (i = 0; i < ackfailcnt; i++) { 2309 if (mcs > rs->min_mcs) { 2310 ieee80211_ra_add_stats_ht(&wn->rn, ic, ni, mcs, 1, 1); 2311 mcs--; 2312 } else 2313 retries++; 2314 } 2315 2316 if (txfail && ackfailcnt == 0) 2317 ieee80211_ra_add_stats_ht(&wn->rn, ic, ni, mcs, 1, 1); 2318 else 2319 ieee80211_ra_add_stats_ht(&wn->rn, ic, ni, mcs, retries + 1, retries); 2320 2321 iwn_ra_choose(sc, ni); 2322 } 2323 2324 /* 2325 * Process an incoming Compressed BlockAck. 2326 * Note that these block ack notifications are generated by firmware and do 2327 * not necessarily correspond to contents of block ack frames seen on the air. 2328 */ 2329 void 2330 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2331 struct iwn_rx_data *data) 2332 { 2333 struct iwn_compressed_ba *cba = (struct iwn_compressed_ba *)(desc + 1); 2334 struct ieee80211com *ic = &sc->sc_ic; 2335 struct ieee80211_node *ni; 2336 struct ieee80211_tx_ba *ba; 2337 struct iwn_tx_ring *txq; 2338 uint16_t seq, ssn; 2339 int qid; 2340 2341 if (ic->ic_state != IEEE80211_S_RUN) 2342 return; 2343 2344 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*cba), 2345 BUS_DMASYNC_POSTREAD); 2346 2347 if (!IEEE80211_ADDR_EQ(ic->ic_bss->ni_macaddr, cba->macaddr)) 2348 return; 2349 2350 ni = ic->ic_bss; 2351 2352 qid = le16toh(cba->qid); 2353 if (qid < sc->first_agg_txq || qid >= sc->ntxqs) 2354 return; 2355 2356 txq = &sc->txq[qid]; 2357 2358 /* Protect against a firmware bug where the queue/TID are off. */ 2359 if (qid != sc->first_agg_txq + cba->tid) 2360 return; 2361 2362 ba = &ni->ni_tx_ba[cba->tid]; 2363 if (ba->ba_state != IEEE80211_BA_AGREED) 2364 return; 2365 2366 /* 2367 * The first bit in cba->bitmap corresponds to the sequence number 2368 * stored in the sequence control field cba->seq. 2369 * Multiple BA notifications in a row may be using this number, with 2370 * additional bits being set in cba->bitmap. It is unclear how the 2371 * firmware decides to shift this window forward. 2372 * We rely on ba->ba_winstart instead. 2373 */ 2374 seq = le16toh(cba->seq) >> IEEE80211_SEQ_SEQ_SHIFT; 2375 2376 /* 2377 * The firmware's new BA window starting sequence number 2378 * corresponds to the first hole in cba->bitmap, implying 2379 * that all frames between 'seq' and 'ssn' (non-inclusive) 2380 * have been acked. 2381 */ 2382 ssn = le16toh(cba->ssn); 2383 2384 if (SEQ_LT(ssn, ba->ba_winstart)) 2385 return; 2386 2387 /* Skip rate control if our Tx rate is fixed. */ 2388 if (ic->ic_fixed_mcs == -1) 2389 iwn_ampdu_rate_control(sc, ni, txq, ba->ba_winstart, ssn); 2390 2391 /* 2392 * SSN corresponds to the first (perhaps not yet transmitted) frame 2393 * in firmware's BA window. Firmware is not going to retransmit any 2394 * frames before its BA window so mark them all as done. 2395 */ 2396 ieee80211_output_ba_move_window(ic, ni, cba->tid, ssn); 2397 iwn_ampdu_txq_advance(sc, txq, qid, 2398 IWN_AGG_SSN_TO_TXQ_IDX(ssn)); 2399 iwn_clear_oactive(sc, txq); 2400 } 2401 2402 /* 2403 * Process a CALIBRATION_RESULT notification sent by the initialization 2404 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2405 */ 2406 void 2407 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2408 struct iwn_rx_data *data) 2409 { 2410 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2411 int len, idx = -1; 2412 2413 /* Runtime firmware should not send such a notification. */ 2414 if (sc->sc_flags & IWN_FLAG_CALIB_DONE) 2415 return; 2416 2417 len = (letoh32(desc->len) & IWN_RX_DESC_LEN_MASK) - 4; 2418 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len, 2419 BUS_DMASYNC_POSTREAD); 2420 2421 switch (calib->code) { 2422 case IWN5000_PHY_CALIB_DC: 2423 if (sc->hw_type == IWN_HW_REV_TYPE_5150 || 2424 sc->hw_type == IWN_HW_REV_TYPE_2030 || 2425 sc->hw_type == IWN_HW_REV_TYPE_2000 || 2426 sc->hw_type == IWN_HW_REV_TYPE_135 || 2427 sc->hw_type == IWN_HW_REV_TYPE_105) 2428 idx = 0; 2429 break; 2430 case IWN5000_PHY_CALIB_LO: 2431 idx = 1; 2432 break; 2433 case IWN5000_PHY_CALIB_TX_IQ: 2434 idx = 2; 2435 break; 2436 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2437 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2438 sc->hw_type != IWN_HW_REV_TYPE_5150) 2439 idx = 3; 2440 break; 2441 case IWN5000_PHY_CALIB_BASE_BAND: 2442 idx = 4; 2443 break; 2444 } 2445 if (idx == -1) /* Ignore other results. */ 2446 return; 2447 2448 /* Save calibration result. */ 2449 if (sc->calibcmd[idx].buf != NULL) 2450 free(sc->calibcmd[idx].buf, M_DEVBUF, 0); 2451 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2452 if (sc->calibcmd[idx].buf == NULL) { 2453 DPRINTF(("not enough memory for calibration result %d\n", 2454 calib->code)); 2455 return; 2456 } 2457 DPRINTF(("saving calibration result code=%d len=%d\n", 2458 calib->code, len)); 2459 sc->calibcmd[idx].len = len; 2460 memcpy(sc->calibcmd[idx].buf, calib, len); 2461 } 2462 2463 /* 2464 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2465 * The latter is sent by the firmware after each received beacon. 2466 */ 2467 void 2468 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2469 struct iwn_rx_data *data) 2470 { 2471 struct iwn_ops *ops = &sc->ops; 2472 struct ieee80211com *ic = &sc->sc_ic; 2473 struct iwn_calib_state *calib = &sc->calib; 2474 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2475 int temp; 2476 2477 /* Ignore statistics received during a scan. */ 2478 if (ic->ic_state != IEEE80211_S_RUN) 2479 return; 2480 2481 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2482 sizeof (*stats), BUS_DMASYNC_POSTREAD); 2483 2484 DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type)); 2485 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2486 2487 sc->rx_stats_flags = htole32(stats->flags); 2488 2489 /* Test if temperature has changed. */ 2490 if (stats->general.temp != sc->rawtemp) { 2491 /* Convert "raw" temperature to degC. */ 2492 sc->rawtemp = stats->general.temp; 2493 temp = ops->get_temperature(sc); 2494 DPRINTFN(2, ("temperature=%dC\n", temp)); 2495 2496 /* Update TX power if need be (4965AGN only). */ 2497 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2498 iwn4965_power_calibration(sc, temp); 2499 } 2500 2501 if (desc->type != IWN_BEACON_STATISTICS) 2502 return; /* Reply to a statistics request. */ 2503 2504 sc->noise = iwn_get_noise(&stats->rx.general); 2505 2506 /* Test that RSSI and noise are present in stats report. */ 2507 if (sc->noise == -127) 2508 return; 2509 2510 if (letoh32(stats->rx.general.flags) != 1) { 2511 DPRINTF(("received statistics without RSSI\n")); 2512 return; 2513 } 2514 2515 /* 2516 * XXX Differential gain calibration makes the 6005 firmware 2517 * crap out, so skip it for now. This effectively disables 2518 * sensitivity tuning as well. 2519 */ 2520 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 2521 return; 2522 2523 if (calib->state == IWN_CALIB_STATE_ASSOC) 2524 iwn_collect_noise(sc, &stats->rx.general); 2525 else if (calib->state == IWN_CALIB_STATE_RUN) 2526 iwn_tune_sensitivity(sc, &stats->rx); 2527 } 2528 2529 void 2530 iwn_ampdu_txq_advance(struct iwn_softc *sc, struct iwn_tx_ring *txq, int qid, 2531 int idx) 2532 { 2533 struct iwn_ops *ops = &sc->ops; 2534 2535 DPRINTFN(3, ("%s: txq->cur=%d txq->read=%d txq->queued=%d qid=%d " 2536 "idx=%d\n", __func__, txq->cur, txq->read, txq->queued, qid, idx)); 2537 2538 while (txq->read != idx) { 2539 struct iwn_tx_data *txdata = &txq->data[txq->read]; 2540 if (txdata->m != NULL) { 2541 ops->reset_sched(sc, qid, txq->read); 2542 iwn_tx_done_free_txdata(sc, txdata); 2543 txq->queued--; 2544 } 2545 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 2546 } 2547 } 2548 2549 /* 2550 * Handle A-MPDU Tx queue status report. 2551 * Tx failures come as single frames (perhaps out of order), and before failing 2552 * an A-MPDU subframe the firmware transmits it as a single frame at least once. 2553 * Frames successfully transmitted in an A-MPDU are completed when a compressed 2554 * block ack notification is received. 2555 */ 2556 void 2557 iwn_ampdu_tx_done(struct iwn_softc *sc, struct iwn_tx_ring *txq, 2558 struct iwn_rx_desc *desc, uint16_t status, uint8_t ackfailcnt, 2559 uint8_t rate, uint8_t rflags, int nframes, uint32_t ssn, 2560 struct iwn_txagg_status *agg_status) 2561 { 2562 struct ieee80211com *ic = &sc->sc_ic; 2563 int tid = desc->qid - sc->first_agg_txq; 2564 struct iwn_tx_data *txdata = &txq->data[desc->idx]; 2565 struct ieee80211_node *ni = txdata->ni; 2566 int txfail = (status != IWN_TX_STATUS_SUCCESS && 2567 status != IWN_TX_STATUS_DIRECT_DONE); 2568 struct ieee80211_tx_ba *ba; 2569 uint16_t seq; 2570 2571 sc->sc_tx_timer = 0; 2572 2573 if (ic->ic_state != IEEE80211_S_RUN) 2574 return; 2575 2576 if (nframes > 1) { 2577 int i; 2578 2579 /* 2580 * Collect information about this A-MPDU. 2581 */ 2582 for (i = 0; i < nframes; i++) { 2583 uint8_t qid = agg_status[i].qid; 2584 uint8_t idx = agg_status[i].idx; 2585 uint16_t txstatus = (le16toh(agg_status[i].status) & 2586 IWN_AGG_TX_STATUS_MASK); 2587 2588 if (txstatus != IWN_AGG_TX_STATE_TRANSMITTED) 2589 continue; 2590 2591 if (qid != desc->qid) 2592 continue; 2593 2594 txdata = &txq->data[idx]; 2595 if (txdata->ni == NULL) 2596 continue; 2597 2598 /* The Tx rate was the same for all subframes. */ 2599 txdata->ampdu_txmcs = rate; 2600 txdata->ampdu_nframes = nframes; 2601 } 2602 return; 2603 } 2604 2605 if (ni == NULL) 2606 return; 2607 2608 ba = &ni->ni_tx_ba[tid]; 2609 if (ba->ba_state != IEEE80211_BA_AGREED) 2610 return; 2611 if (SEQ_LT(ssn, ba->ba_winstart)) 2612 return; 2613 2614 /* This was a final single-frame Tx attempt for frame SSN-1. */ 2615 seq = (ssn - 1) & 0xfff; 2616 2617 /* 2618 * Skip rate control if our Tx rate is fixed. 2619 */ 2620 if (ic->ic_fixed_mcs == -1) { 2621 if (txdata->ampdu_nframes > 1) { 2622 struct iwn_node *wn = (void *)ni; 2623 /* 2624 * This frame was once part of an A-MPDU. 2625 * Report one failed A-MPDU Tx attempt. 2626 * The firmware might have made several such 2627 * attempts but we don't keep track of this. 2628 */ 2629 ieee80211_ra_add_stats_ht(&wn->rn, ic, ni, 2630 txdata->ampdu_txmcs, 1, 1); 2631 } 2632 2633 /* Report the final single-frame Tx attempt. */ 2634 if (rflags & IWN_RFLAG_MCS) 2635 iwn_ht_single_rate_control(sc, ni, rate, rflags, 2636 ackfailcnt, txfail); 2637 } 2638 2639 if (txfail) 2640 ieee80211_tx_compressed_bar(ic, ni, tid, ssn); 2641 2642 /* 2643 * SSN corresponds to the first (perhaps not yet transmitted) frame 2644 * in firmware's BA window. Firmware is not going to retransmit any 2645 * frames before its BA window so mark them all as done. 2646 */ 2647 ieee80211_output_ba_move_window(ic, ni, tid, ssn); 2648 iwn_ampdu_txq_advance(sc, txq, desc->qid, IWN_AGG_SSN_TO_TXQ_IDX(ssn)); 2649 iwn_clear_oactive(sc, txq); 2650 } 2651 2652 /* 2653 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2654 * and 5000 adapters have different incompatible TX status formats. 2655 */ 2656 void 2657 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2658 struct iwn_rx_data *data) 2659 { 2660 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2661 struct iwn_tx_ring *ring; 2662 size_t len = (letoh32(desc->len) & IWN_RX_DESC_LEN_MASK); 2663 uint16_t status = letoh32(stat->stat.status) & 0xff; 2664 uint32_t ssn; 2665 2666 if (desc->qid > IWN4965_NTXQUEUES) 2667 return; 2668 2669 ring = &sc->txq[desc->qid]; 2670 2671 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2672 len, BUS_DMASYNC_POSTREAD); 2673 2674 /* Sanity checks. */ 2675 if (sizeof(*stat) > len) 2676 return; 2677 if (stat->nframes < 1 || stat->nframes > IWN_AMPDU_MAX) 2678 return; 2679 if (desc->qid < sc->first_agg_txq && stat->nframes > 1) 2680 return; 2681 if (desc->qid >= sc->first_agg_txq && sizeof(*stat) + sizeof(ssn) + 2682 stat->nframes * sizeof(stat->stat) > len) 2683 return; 2684 2685 if (desc->qid < sc->first_agg_txq) { 2686 /* XXX 4965 does not report byte count */ 2687 struct iwn_tx_data *txdata = &ring->data[desc->idx]; 2688 uint16_t framelen = txdata->totlen + IEEE80211_CRC_LEN; 2689 int txfail = (status != IWN_TX_STATUS_SUCCESS && 2690 status != IWN_TX_STATUS_DIRECT_DONE); 2691 2692 iwn_tx_done(sc, desc, stat->ackfailcnt, stat->rate, 2693 stat->rflags, txfail, desc->qid, framelen); 2694 } else { 2695 memcpy(&ssn, &stat->stat.status + stat->nframes, sizeof(ssn)); 2696 ssn = le32toh(ssn) & 0xfff; 2697 iwn_ampdu_tx_done(sc, ring, desc, status, stat->ackfailcnt, 2698 stat->rate, stat->rflags, stat->nframes, ssn, 2699 stat->stat.agg_status); 2700 } 2701 } 2702 2703 void 2704 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2705 struct iwn_rx_data *data) 2706 { 2707 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2708 struct iwn_tx_ring *ring; 2709 size_t len = (letoh32(desc->len) & IWN_RX_DESC_LEN_MASK); 2710 uint16_t status = letoh32(stat->stat.status) & 0xff; 2711 uint32_t ssn; 2712 2713 if (desc->qid > IWN5000_NTXQUEUES) 2714 return; 2715 2716 ring = &sc->txq[desc->qid]; 2717 2718 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2719 sizeof (*stat), BUS_DMASYNC_POSTREAD); 2720 2721 /* Sanity checks. */ 2722 if (sizeof(*stat) > len) 2723 return; 2724 if (stat->nframes < 1 || stat->nframes > IWN_AMPDU_MAX) 2725 return; 2726 if (desc->qid < sc->first_agg_txq && stat->nframes > 1) 2727 return; 2728 if (desc->qid >= sc->first_agg_txq && sizeof(*stat) + sizeof(ssn) + 2729 stat->nframes * sizeof(stat->stat) > len) 2730 return; 2731 2732 /* If this was not an aggregated frame, complete it now. */ 2733 if (desc->qid < sc->first_agg_txq) { 2734 int txfail = (status != IWN_TX_STATUS_SUCCESS && 2735 status != IWN_TX_STATUS_DIRECT_DONE); 2736 2737 /* Reset TX scheduler slot. */ 2738 iwn5000_reset_sched(sc, desc->qid, desc->idx); 2739 2740 iwn_tx_done(sc, desc, stat->ackfailcnt, stat->rate, 2741 stat->rflags, txfail, desc->qid, letoh16(stat->len)); 2742 } else { 2743 memcpy(&ssn, &stat->stat.status + stat->nframes, sizeof(ssn)); 2744 ssn = le32toh(ssn) & 0xfff; 2745 iwn_ampdu_tx_done(sc, ring, desc, status, stat->ackfailcnt, 2746 stat->rate, stat->rflags, stat->nframes, ssn, 2747 stat->stat.agg_status); 2748 } 2749 } 2750 2751 void 2752 iwn_tx_done_free_txdata(struct iwn_softc *sc, struct iwn_tx_data *data) 2753 { 2754 struct ieee80211com *ic = &sc->sc_ic; 2755 2756 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 2757 BUS_DMASYNC_POSTWRITE); 2758 bus_dmamap_unload(sc->sc_dmat, data->map); 2759 m_freem(data->m); 2760 data->m = NULL; 2761 ieee80211_release_node(ic, data->ni); 2762 data->ni = NULL; 2763 data->totlen = 0; 2764 data->ampdu_nframes = 0; 2765 data->ampdu_txmcs = 0; 2766 } 2767 2768 void 2769 iwn_clear_oactive(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2770 { 2771 struct ieee80211com *ic = &sc->sc_ic; 2772 struct ifnet *ifp = &ic->ic_if; 2773 2774 if (ring->queued < IWN_TX_RING_LOMARK) { 2775 sc->qfullmsk &= ~(1 << ring->qid); 2776 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) { 2777 ifq_clr_oactive(&ifp->if_snd); 2778 (*ifp->if_start)(ifp); 2779 } 2780 } 2781 } 2782 2783 /* 2784 * Adapter-independent backend for TX_DONE firmware notifications. 2785 * This handles Tx status for non-aggregation queues. 2786 */ 2787 void 2788 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2789 uint8_t ackfailcnt, uint8_t rate, uint8_t rflags, int txfail, 2790 int qid, uint16_t len) 2791 { 2792 struct ieee80211com *ic = &sc->sc_ic; 2793 struct ifnet *ifp = &ic->ic_if; 2794 struct iwn_tx_ring *ring = &sc->txq[qid]; 2795 struct iwn_tx_data *data = &ring->data[desc->idx]; 2796 struct iwn_node *wn = (void *)data->ni; 2797 2798 if (data->ni == NULL) 2799 return; 2800 2801 if (data->ni->ni_flags & IEEE80211_NODE_HT) { 2802 if (ic->ic_state == IEEE80211_S_RUN && 2803 ic->ic_fixed_mcs == -1 && (rflags & IWN_RFLAG_MCS)) { 2804 iwn_ht_single_rate_control(sc, data->ni, rate, rflags, 2805 ackfailcnt, txfail); 2806 } 2807 } else { 2808 if (rate != data->ni->ni_txrate) { 2809 if (++wn->lq_rate_mismatch > 15) { 2810 /* Try to sync firmware with driver. */ 2811 iwn_set_link_quality(sc, data->ni); 2812 wn->lq_rate_mismatch = 0; 2813 } 2814 } else { 2815 wn->lq_rate_mismatch = 0; 2816 2817 wn->amn.amn_txcnt++; 2818 if (ackfailcnt > 0) 2819 wn->amn.amn_retrycnt++; 2820 if (txfail) 2821 wn->amn.amn_retrycnt++; 2822 } 2823 } 2824 if (txfail) 2825 ifp->if_oerrors++; 2826 2827 iwn_tx_done_free_txdata(sc, data); 2828 2829 sc->sc_tx_timer = 0; 2830 ring->queued--; 2831 iwn_clear_oactive(sc, ring); 2832 } 2833 2834 /* 2835 * Process a "command done" firmware notification. This is where we wakeup 2836 * processes waiting for a synchronous command completion. 2837 */ 2838 void 2839 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2840 { 2841 struct iwn_tx_ring *ring = &sc->txq[4]; 2842 struct iwn_tx_data *data; 2843 2844 if ((desc->qid & 0xf) != 4) 2845 return; /* Not a command ack. */ 2846 2847 data = &ring->data[desc->idx]; 2848 2849 /* If the command was mapped in an mbuf, free it. */ 2850 if (data->m != NULL) { 2851 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 2852 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2853 bus_dmamap_unload(sc->sc_dmat, data->map); 2854 m_freem(data->m); 2855 data->m = NULL; 2856 } 2857 wakeup(&ring->desc[desc->idx]); 2858 } 2859 2860 /* 2861 * Process an INT_FH_RX or INT_SW_RX interrupt. 2862 */ 2863 void 2864 iwn_notif_intr(struct iwn_softc *sc) 2865 { 2866 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 2867 struct iwn_ops *ops = &sc->ops; 2868 struct ieee80211com *ic = &sc->sc_ic; 2869 struct ifnet *ifp = &ic->ic_if; 2870 uint16_t hw; 2871 2872 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map, 2873 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD); 2874 2875 hw = letoh16(sc->rxq.stat->closed_count) & 0xfff; 2876 while (sc->rxq.cur != hw) { 2877 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2878 struct iwn_rx_desc *desc; 2879 2880 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc), 2881 BUS_DMASYNC_POSTREAD); 2882 desc = mtod(data->m, struct iwn_rx_desc *); 2883 2884 DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n", 2885 desc->qid & 0xf, desc->idx, desc->flags, desc->type)); 2886 2887 if (!(desc->qid & 0x80)) /* Reply to a command. */ 2888 iwn_cmd_done(sc, desc); 2889 2890 switch (desc->type) { 2891 case IWN_RX_PHY: 2892 iwn_rx_phy(sc, desc, data); 2893 break; 2894 2895 case IWN_RX_DONE: /* 4965AGN only. */ 2896 case IWN_MPDU_RX_DONE: 2897 /* An 802.11 frame has been received. */ 2898 iwn_rx_done(sc, desc, data, &ml); 2899 break; 2900 case IWN_RX_COMPRESSED_BA: 2901 /* A Compressed BlockAck has been received. */ 2902 iwn_rx_compressed_ba(sc, desc, data); 2903 break; 2904 case IWN_TX_DONE: 2905 /* An 802.11 frame has been transmitted. */ 2906 ops->tx_done(sc, desc, data); 2907 break; 2908 2909 case IWN_RX_STATISTICS: 2910 case IWN_BEACON_STATISTICS: 2911 iwn_rx_statistics(sc, desc, data); 2912 break; 2913 2914 case IWN_BEACON_MISSED: 2915 { 2916 struct iwn_beacon_missed *miss = 2917 (struct iwn_beacon_missed *)(desc + 1); 2918 uint32_t missed; 2919 2920 if ((ic->ic_opmode != IEEE80211_M_STA) || 2921 (ic->ic_state != IEEE80211_S_RUN)) 2922 break; 2923 2924 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2925 sizeof (*miss), BUS_DMASYNC_POSTREAD); 2926 missed = letoh32(miss->consecutive); 2927 2928 /* 2929 * If more than 5 consecutive beacons are missed, 2930 * reinitialize the sensitivity state machine. 2931 */ 2932 if (missed > 5) 2933 (void)iwn_init_sensitivity(sc); 2934 2935 /* 2936 * Rather than go directly to scan state, try to send a 2937 * directed probe request first. If that fails then the 2938 * state machine will drop us into scanning after timing 2939 * out waiting for a probe response. 2940 */ 2941 if (missed > ic->ic_bmissthres && !ic->ic_mgt_timer) { 2942 if (ic->ic_if.if_flags & IFF_DEBUG) 2943 printf("%s: receiving no beacons from " 2944 "%s; checking if this AP is still " 2945 "responding to probe requests\n", 2946 sc->sc_dev.dv_xname, ether_sprintf( 2947 ic->ic_bss->ni_macaddr)); 2948 IEEE80211_SEND_MGMT(ic, ic->ic_bss, 2949 IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0); 2950 } 2951 break; 2952 } 2953 case IWN_UC_READY: 2954 { 2955 struct iwn_ucode_info *uc = 2956 (struct iwn_ucode_info *)(desc + 1); 2957 2958 /* The microcontroller is ready. */ 2959 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2960 sizeof (*uc), BUS_DMASYNC_POSTREAD); 2961 DPRINTF(("microcode alive notification version=%d.%d " 2962 "subtype=%x alive=%x\n", uc->major, uc->minor, 2963 uc->subtype, letoh32(uc->valid))); 2964 2965 if (letoh32(uc->valid) != 1) { 2966 printf("%s: microcontroller initialization " 2967 "failed\n", sc->sc_dev.dv_xname); 2968 break; 2969 } 2970 if (uc->subtype == IWN_UCODE_INIT) { 2971 /* Save microcontroller report. */ 2972 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 2973 } 2974 /* Save the address of the error log in SRAM. */ 2975 sc->errptr = letoh32(uc->errptr); 2976 break; 2977 } 2978 case IWN_STATE_CHANGED: 2979 { 2980 uint32_t *status = (uint32_t *)(desc + 1); 2981 2982 /* Enabled/disabled notification. */ 2983 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 2984 sizeof (*status), BUS_DMASYNC_POSTREAD); 2985 DPRINTF(("state changed to %x\n", letoh32(*status))); 2986 2987 if (letoh32(*status) & 1) { 2988 /* Radio transmitter is off, power down. */ 2989 iwn_stop(ifp); 2990 return; /* No further processing. */ 2991 } 2992 break; 2993 } 2994 case IWN_START_SCAN: 2995 { 2996 struct iwn_start_scan *scan = 2997 (struct iwn_start_scan *)(desc + 1); 2998 2999 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 3000 sizeof (*scan), BUS_DMASYNC_POSTREAD); 3001 DPRINTFN(2, ("scan start: chan %d status %x\n", 3002 scan->chan, letoh32(scan->status))); 3003 3004 if (sc->sc_flags & IWN_FLAG_BGSCAN) 3005 break; 3006 3007 /* Fix current channel. */ 3008 ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan]; 3009 break; 3010 } 3011 case IWN_STOP_SCAN: 3012 { 3013 struct iwn_stop_scan *scan = 3014 (struct iwn_stop_scan *)(desc + 1); 3015 3016 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), 3017 sizeof (*scan), BUS_DMASYNC_POSTREAD); 3018 DPRINTFN(2, ("scan stop: nchan=%d status=%d chan=%d\n", 3019 scan->nchan, scan->status, scan->chan)); 3020 3021 if (scan->status == 1 && scan->chan <= 14 && 3022 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) { 3023 int error; 3024 /* 3025 * We just finished scanning 2GHz channels, 3026 * start scanning 5GHz ones. 3027 */ 3028 error = iwn_scan(sc, IEEE80211_CHAN_5GHZ, 3029 (sc->sc_flags & IWN_FLAG_BGSCAN) ? 1 : 0); 3030 if (error == 0) 3031 break; 3032 } 3033 sc->sc_flags &= ~IWN_FLAG_SCANNING; 3034 sc->sc_flags &= ~IWN_FLAG_BGSCAN; 3035 ieee80211_end_scan(ifp); 3036 break; 3037 } 3038 case IWN5000_CALIBRATION_RESULT: 3039 iwn5000_rx_calib_results(sc, desc, data); 3040 break; 3041 3042 case IWN5000_CALIBRATION_DONE: 3043 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3044 wakeup(sc); 3045 break; 3046 } 3047 3048 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3049 } 3050 if_input(&sc->sc_ic.ic_if, &ml); 3051 3052 /* Tell the firmware what we have processed. */ 3053 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3054 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3055 } 3056 3057 /* 3058 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3059 * from power-down sleep mode. 3060 */ 3061 void 3062 iwn_wakeup_intr(struct iwn_softc *sc) 3063 { 3064 int qid; 3065 3066 DPRINTF(("ucode wakeup from power-down sleep\n")); 3067 3068 /* Wakeup RX and TX rings. */ 3069 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3070 for (qid = 0; qid < sc->ntxqs; qid++) { 3071 struct iwn_tx_ring *ring = &sc->txq[qid]; 3072 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3073 } 3074 } 3075 3076 /* 3077 * Dump the error log of the firmware when a firmware panic occurs. Although 3078 * we can't debug the firmware because it is neither open source nor free, it 3079 * can help us to identify certain classes of problems. 3080 */ 3081 void 3082 iwn_fatal_intr(struct iwn_softc *sc) 3083 { 3084 struct iwn_fw_dump dump; 3085 int i; 3086 3087 /* Check that the error log address is valid. */ 3088 if (sc->errptr < IWN_FW_DATA_BASE || 3089 sc->errptr + sizeof (dump) > 3090 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3091 printf("%s: bad firmware error log address 0x%08x\n", 3092 sc->sc_dev.dv_xname, sc->errptr); 3093 return; 3094 } 3095 if (iwn_nic_lock(sc) != 0) { 3096 printf("%s: could not read firmware error log\n", 3097 sc->sc_dev.dv_xname); 3098 return; 3099 } 3100 /* Read firmware error log from SRAM. */ 3101 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3102 sizeof (dump) / sizeof (uint32_t)); 3103 iwn_nic_unlock(sc); 3104 3105 if (dump.valid == 0) { 3106 printf("%s: firmware error log is empty\n", 3107 sc->sc_dev.dv_xname); 3108 return; 3109 } 3110 printf("firmware error log:\n"); 3111 printf(" error type = \"%s\" (0x%08X)\n", 3112 (dump.id < nitems(iwn_fw_errmsg)) ? 3113 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3114 dump.id); 3115 printf(" program counter = 0x%08X\n", dump.pc); 3116 printf(" source line = 0x%08X\n", dump.src_line); 3117 printf(" error data = 0x%08X%08X\n", 3118 dump.error_data[0], dump.error_data[1]); 3119 printf(" branch link = 0x%08X%08X\n", 3120 dump.branch_link[0], dump.branch_link[1]); 3121 printf(" interrupt link = 0x%08X%08X\n", 3122 dump.interrupt_link[0], dump.interrupt_link[1]); 3123 printf(" time = %u\n", dump.time[0]); 3124 3125 /* Dump driver status (TX and RX rings) while we're here. */ 3126 printf("driver status:\n"); 3127 for (i = 0; i < sc->ntxqs; i++) { 3128 struct iwn_tx_ring *ring = &sc->txq[i]; 3129 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3130 i, ring->qid, ring->cur, ring->queued); 3131 } 3132 printf(" rx ring: cur=%d\n", sc->rxq.cur); 3133 printf(" 802.11 state %d\n", sc->sc_ic.ic_state); 3134 } 3135 3136 int 3137 iwn_intr(void *arg) 3138 { 3139 struct iwn_softc *sc = arg; 3140 struct ifnet *ifp = &sc->sc_ic.ic_if; 3141 uint32_t r1, r2, tmp; 3142 3143 /* Disable interrupts. */ 3144 IWN_WRITE(sc, IWN_INT_MASK, 0); 3145 3146 /* Read interrupts from ICT (fast) or from registers (slow). */ 3147 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3148 tmp = 0; 3149 while (sc->ict[sc->ict_cur] != 0) { 3150 tmp |= sc->ict[sc->ict_cur]; 3151 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3152 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3153 } 3154 tmp = letoh32(tmp); 3155 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3156 tmp = 0; 3157 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3158 tmp |= 0x8000; 3159 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3160 r2 = 0; /* Unused. */ 3161 } else { 3162 r1 = IWN_READ(sc, IWN_INT); 3163 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3164 return 0; /* Hardware gone! */ 3165 r2 = IWN_READ(sc, IWN_FH_INT); 3166 } 3167 if (r1 == 0 && r2 == 0) { 3168 if (ifp->if_flags & IFF_UP) 3169 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 3170 return 0; /* Interrupt not for us. */ 3171 } 3172 3173 /* Acknowledge interrupts. */ 3174 IWN_WRITE(sc, IWN_INT, r1); 3175 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3176 IWN_WRITE(sc, IWN_FH_INT, r2); 3177 3178 if (r1 & IWN_INT_RF_TOGGLED) { 3179 tmp = IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL; 3180 printf("%s: RF switch: radio %s\n", sc->sc_dev.dv_xname, 3181 tmp ? "enabled" : "disabled"); 3182 if (tmp) 3183 task_add(systq, &sc->init_task); 3184 } 3185 if (r1 & IWN_INT_CT_REACHED) { 3186 printf("%s: critical temperature reached!\n", 3187 sc->sc_dev.dv_xname); 3188 } 3189 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3190 printf("%s: fatal firmware error\n", sc->sc_dev.dv_xname); 3191 3192 /* Force a complete recalibration on next init. */ 3193 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3194 3195 /* Dump firmware error log and stop. */ 3196 if (ifp->if_flags & IFF_DEBUG) 3197 iwn_fatal_intr(sc); 3198 iwn_stop(ifp); 3199 task_add(systq, &sc->init_task); 3200 return 1; 3201 } 3202 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3203 (r2 & IWN_FH_INT_RX)) { 3204 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3205 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3206 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3207 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3208 IWN_INT_PERIODIC_DIS); 3209 iwn_notif_intr(sc); 3210 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 3211 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3212 IWN_INT_PERIODIC_ENA); 3213 } 3214 } else 3215 iwn_notif_intr(sc); 3216 } 3217 3218 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 3219 if (sc->sc_flags & IWN_FLAG_USE_ICT) 3220 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 3221 wakeup(sc); /* FH DMA transfer completed. */ 3222 } 3223 3224 if (r1 & IWN_INT_ALIVE) 3225 wakeup(sc); /* Firmware is alive. */ 3226 3227 if (r1 & IWN_INT_WAKEUP) 3228 iwn_wakeup_intr(sc); 3229 3230 /* Re-enable interrupts. */ 3231 if (ifp->if_flags & IFF_UP) 3232 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 3233 3234 return 1; 3235 } 3236 3237 /* 3238 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 3239 * 5000 adapters use a slightly different format). 3240 */ 3241 void 3242 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3243 uint16_t len) 3244 { 3245 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 3246 3247 *w = htole16(len + 8); 3248 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 3249 (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t), 3250 BUS_DMASYNC_PREWRITE); 3251 if (idx < IWN_SCHED_WINSZ) { 3252 *(w + IWN_TX_RING_COUNT) = *w; 3253 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 3254 (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr, 3255 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 3256 } 3257 } 3258 3259 void 3260 iwn4965_reset_sched(struct iwn_softc *sc, int qid, int idx) 3261 { 3262 /* TBD */ 3263 } 3264 3265 void 3266 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3267 uint16_t len) 3268 { 3269 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3270 3271 *w = htole16(id << 12 | (len + 8)); 3272 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 3273 (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t), 3274 BUS_DMASYNC_PREWRITE); 3275 if (idx < IWN_SCHED_WINSZ) { 3276 *(w + IWN_TX_RING_COUNT) = *w; 3277 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 3278 (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr, 3279 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 3280 } 3281 } 3282 3283 void 3284 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 3285 { 3286 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3287 3288 *w = (*w & htole16(0xf000)) | htole16(1); 3289 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 3290 (caddr_t)w - sc->sched_dma.vaddr, sizeof (uint16_t), 3291 BUS_DMASYNC_PREWRITE); 3292 if (idx < IWN_SCHED_WINSZ) { 3293 *(w + IWN_TX_RING_COUNT) = *w; 3294 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map, 3295 (caddr_t)(w + IWN_TX_RING_COUNT) - sc->sched_dma.vaddr, 3296 sizeof (uint16_t), BUS_DMASYNC_PREWRITE); 3297 } 3298 } 3299 3300 int 3301 iwn_rval2ridx(int rval) 3302 { 3303 int ridx; 3304 3305 for (ridx = 0; ridx < nitems(iwn_rates); ridx++) { 3306 if (rval == iwn_rates[ridx].rate) 3307 break; 3308 } 3309 3310 return ridx; 3311 } 3312 3313 int 3314 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 3315 { 3316 struct iwn_ops *ops = &sc->ops; 3317 struct ieee80211com *ic = &sc->sc_ic; 3318 struct iwn_node *wn = (void *)ni; 3319 struct iwn_tx_ring *ring; 3320 struct iwn_tx_desc *desc; 3321 struct iwn_tx_data *data; 3322 struct iwn_tx_cmd *cmd; 3323 struct iwn_cmd_data *tx; 3324 const struct iwn_rate *rinfo; 3325 struct ieee80211_frame *wh; 3326 struct ieee80211_key *k = NULL; 3327 enum ieee80211_edca_ac ac; 3328 int qid; 3329 uint32_t flags; 3330 uint16_t qos; 3331 u_int hdrlen; 3332 bus_dma_segment_t *seg; 3333 uint8_t *ivp, tid, ridx, txant, type, subtype; 3334 int i, totlen, hasqos, error, pad; 3335 3336 wh = mtod(m, struct ieee80211_frame *); 3337 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3338 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3339 if (type == IEEE80211_FC0_TYPE_CTL) 3340 hdrlen = sizeof(struct ieee80211_frame_min); 3341 else 3342 hdrlen = ieee80211_get_hdrlen(wh); 3343 3344 if ((hasqos = ieee80211_has_qos(wh))) { 3345 /* Select EDCA Access Category and TX ring for this frame. */ 3346 struct ieee80211_tx_ba *ba; 3347 qos = ieee80211_get_qos(wh); 3348 tid = qos & IEEE80211_QOS_TID; 3349 ac = ieee80211_up_to_ac(ic, tid); 3350 qid = ac; 3351 3352 /* If possible, put this frame on an aggregation queue. */ 3353 if (sc->sc_tx_ba[tid].wn == wn) { 3354 ba = &ni->ni_tx_ba[tid]; 3355 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && 3356 ba->ba_state == IEEE80211_BA_AGREED) { 3357 qid = sc->first_agg_txq + tid; 3358 if (sc->qfullmsk & (1 << qid)) { 3359 m_freem(m); 3360 return ENOBUFS; 3361 } 3362 } 3363 } 3364 } else { 3365 qos = 0; 3366 tid = IWN_NONQOS_TID; 3367 ac = EDCA_AC_BE; 3368 qid = ac; 3369 } 3370 3371 ring = &sc->txq[qid]; 3372 desc = &ring->desc[ring->cur]; 3373 data = &ring->data[ring->cur]; 3374 3375 /* Choose a TX rate index. */ 3376 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3377 type != IEEE80211_FC0_TYPE_DATA) 3378 ridx = iwn_rval2ridx(ieee80211_min_basic_rate(ic)); 3379 else if (ic->ic_fixed_mcs != -1) 3380 ridx = sc->fixed_ridx; 3381 else if (ic->ic_fixed_rate != -1) 3382 ridx = sc->fixed_ridx; 3383 else { 3384 if (ni->ni_flags & IEEE80211_NODE_HT) 3385 ridx = iwn_mcs2ridx[ni->ni_txmcs]; 3386 else 3387 ridx = wn->ridx[ni->ni_txrate]; 3388 } 3389 rinfo = &iwn_rates[ridx]; 3390 #if NBPFILTER > 0 3391 if (sc->sc_drvbpf != NULL) { 3392 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3393 uint16_t chan_flags; 3394 3395 tap->wt_flags = 0; 3396 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 3397 chan_flags = ni->ni_chan->ic_flags; 3398 if (ic->ic_curmode != IEEE80211_MODE_11N) 3399 chan_flags &= ~IEEE80211_CHAN_HT; 3400 tap->wt_chan_flags = htole16(chan_flags); 3401 if ((ni->ni_flags & IEEE80211_NODE_HT) && 3402 !IEEE80211_IS_MULTICAST(wh->i_addr1) && 3403 type == IEEE80211_FC0_TYPE_DATA) { 3404 tap->wt_rate = (0x80 | ni->ni_txmcs); 3405 } else 3406 tap->wt_rate = rinfo->rate; 3407 if ((ic->ic_flags & IEEE80211_F_WEPON) && 3408 (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) 3409 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3410 3411 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len, 3412 m, BPF_DIRECTION_OUT); 3413 } 3414 #endif 3415 3416 totlen = m->m_pkthdr.len; 3417 3418 /* Encrypt the frame if need be. */ 3419 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 3420 /* Retrieve key for TX. */ 3421 k = ieee80211_get_txkey(ic, wh, ni); 3422 if (k->k_cipher != IEEE80211_CIPHER_CCMP) { 3423 /* Do software encryption. */ 3424 if ((m = ieee80211_encrypt(ic, m, k)) == NULL) 3425 return ENOBUFS; 3426 /* 802.11 header may have moved. */ 3427 wh = mtod(m, struct ieee80211_frame *); 3428 totlen = m->m_pkthdr.len; 3429 3430 } else /* HW appends CCMP MIC. */ 3431 totlen += IEEE80211_CCMP_HDRLEN; 3432 } 3433 3434 data->totlen = totlen; 3435 3436 /* Prepare TX firmware command. */ 3437 cmd = &ring->cmd[ring->cur]; 3438 cmd->code = IWN_CMD_TX_DATA; 3439 cmd->flags = 0; 3440 cmd->qid = ring->qid; 3441 cmd->idx = ring->cur; 3442 3443 tx = (struct iwn_cmd_data *)cmd->data; 3444 /* NB: No need to clear tx, all fields are reinitialized here. */ 3445 tx->scratch = 0; /* clear "scratch" area */ 3446 3447 flags = 0; 3448 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3449 /* Unicast frame, check if an ACK is expected. */ 3450 if (!hasqos || (qos & IEEE80211_QOS_ACK_POLICY_MASK) != 3451 IEEE80211_QOS_ACK_POLICY_NOACK) 3452 flags |= IWN_TX_NEED_ACK; 3453 } 3454 if (type == IEEE80211_FC0_TYPE_CTL && 3455 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 3456 struct ieee80211_frame_min *mwh; 3457 uint8_t *barfrm; 3458 uint16_t ctl; 3459 mwh = mtod(m, struct ieee80211_frame_min *); 3460 barfrm = (uint8_t *)&mwh[1]; 3461 ctl = LE_READ_2(barfrm); 3462 tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >> 3463 IEEE80211_BA_TID_INFO_SHIFT; 3464 flags |= (IWN_TX_NEED_ACK | IWN_TX_IMM_BA); 3465 } 3466 3467 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 3468 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 3469 3470 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 3471 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3472 /* NB: Group frames are sent using CCK in 802.11b/g/n (2GHz). */ 3473 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) { 3474 flags |= IWN_TX_NEED_RTS; 3475 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3476 ridx >= IWN_RIDX_OFDM6) { 3477 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3478 flags |= IWN_TX_NEED_CTS; 3479 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3480 flags |= IWN_TX_NEED_RTS; 3481 } 3482 3483 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 3484 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3485 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3486 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 3487 flags |= IWN_TX_NEED_PROTECTION; 3488 } else 3489 flags |= IWN_TX_FULL_TXOP; 3490 } 3491 } 3492 3493 if (type == IEEE80211_FC0_TYPE_CTL && 3494 subtype == IEEE80211_FC0_SUBTYPE_BAR) 3495 tx->id = wn->id; 3496 else if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3497 type != IEEE80211_FC0_TYPE_DATA) 3498 tx->id = sc->broadcast_id; 3499 else 3500 tx->id = wn->id; 3501 3502 if (type == IEEE80211_FC0_TYPE_MGT) { 3503 #ifndef IEEE80211_STA_ONLY 3504 /* Tell HW to set timestamp in probe responses. */ 3505 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3506 flags |= IWN_TX_INSERT_TSTAMP; 3507 #endif 3508 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3509 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3510 tx->timeout = htole16(3); 3511 else 3512 tx->timeout = htole16(2); 3513 } else 3514 tx->timeout = htole16(0); 3515 3516 if (hdrlen & 3) { 3517 /* First segment length must be a multiple of 4. */ 3518 flags |= IWN_TX_NEED_PADDING; 3519 pad = 4 - (hdrlen & 3); 3520 } else 3521 pad = 0; 3522 3523 tx->len = htole16(totlen); 3524 tx->tid = tid; 3525 tx->rts_ntries = 60; 3526 tx->data_ntries = 15; 3527 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3528 3529 if ((ni->ni_flags & IEEE80211_NODE_HT) && 3530 tx->id != sc->broadcast_id) 3531 tx->plcp = rinfo->ht_plcp; 3532 else 3533 tx->plcp = rinfo->plcp; 3534 3535 if ((ni->ni_flags & IEEE80211_NODE_HT) && 3536 tx->id != sc->broadcast_id) { 3537 tx->rflags = rinfo->ht_flags; 3538 if (iwn_rxon_ht40_enabled(sc)) 3539 tx->rflags |= IWN_RFLAG_HT40; 3540 if (ieee80211_ra_use_ht_sgi(ni)) 3541 tx->rflags |= IWN_RFLAG_SGI; 3542 } 3543 else 3544 tx->rflags = rinfo->flags; 3545 if (tx->id == sc->broadcast_id || ic->ic_fixed_mcs != -1 || 3546 ic->ic_fixed_rate != -1) { 3547 /* Group or management frame, or fixed Tx rate. */ 3548 tx->linkq = 0; 3549 /* XXX Alternate between antenna A and B? */ 3550 txant = IWN_LSB(sc->txchainmask); 3551 tx->rflags |= IWN_RFLAG_ANT(txant); 3552 } else { 3553 tx->linkq = 0; /* initial index into firmware LQ retry table */ 3554 flags |= IWN_TX_LINKQ; /* enable multi-rate retry */ 3555 } 3556 /* Set physical address of "scratch area". */ 3557 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3558 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3559 3560 /* Copy 802.11 header in TX command. */ 3561 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3562 3563 if (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) { 3564 /* Trim 802.11 header and prepend CCMP IV. */ 3565 m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN); 3566 ivp = mtod(m, uint8_t *); 3567 k->k_tsc++; 3568 ivp[0] = k->k_tsc; 3569 ivp[1] = k->k_tsc >> 8; 3570 ivp[2] = 0; 3571 ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV; 3572 ivp[4] = k->k_tsc >> 16; 3573 ivp[5] = k->k_tsc >> 24; 3574 ivp[6] = k->k_tsc >> 32; 3575 ivp[7] = k->k_tsc >> 40; 3576 3577 tx->security = IWN_CIPHER_CCMP; 3578 if (qid >= sc->first_agg_txq) 3579 flags |= IWN_TX_AMPDU_CCMP; 3580 memcpy(tx->key, k->k_key, k->k_len); 3581 3582 /* TX scheduler includes CCMP MIC len w/5000 Series. */ 3583 if (sc->hw_type != IWN_HW_REV_TYPE_4965) 3584 totlen += IEEE80211_CCMP_MICLEN; 3585 } else { 3586 /* Trim 802.11 header. */ 3587 m_adj(m, hdrlen); 3588 tx->security = 0; 3589 } 3590 tx->flags = htole32(flags); 3591 3592 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3593 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3594 if (error != 0 && error != EFBIG) { 3595 printf("%s: can't map mbuf (error %d)\n", 3596 sc->sc_dev.dv_xname, error); 3597 m_freem(m); 3598 return error; 3599 } 3600 if (error != 0) { 3601 /* Too many DMA segments, linearize mbuf. */ 3602 if (m_defrag(m, M_DONTWAIT)) { 3603 m_freem(m); 3604 return ENOBUFS; 3605 } 3606 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m, 3607 BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3608 if (error != 0) { 3609 printf("%s: can't map mbuf (error %d)\n", 3610 sc->sc_dev.dv_xname, error); 3611 m_freem(m); 3612 return error; 3613 } 3614 } 3615 3616 data->m = m; 3617 data->ni = ni; 3618 data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */ 3619 3620 DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n", 3621 ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs)); 3622 3623 /* Fill TX descriptor. */ 3624 desc->nsegs = 1 + data->map->dm_nsegs; 3625 /* First DMA segment is used by the TX command. */ 3626 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3627 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3628 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3629 /* Other DMA segments are for data payload. */ 3630 seg = data->map->dm_segs; 3631 for (i = 1; i <= data->map->dm_nsegs; i++) { 3632 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3633 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3634 seg->ds_len << 4); 3635 seg++; 3636 } 3637 3638 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize, 3639 BUS_DMASYNC_PREWRITE); 3640 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3641 (caddr_t)cmd - ring->cmd_dma.vaddr, sizeof (*cmd), 3642 BUS_DMASYNC_PREWRITE); 3643 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3644 (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc), 3645 BUS_DMASYNC_PREWRITE); 3646 3647 /* Update TX scheduler. */ 3648 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3649 3650 /* Kick TX ring. */ 3651 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3652 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3653 3654 /* Mark TX ring as full if we reach a certain threshold. */ 3655 if (++ring->queued > IWN_TX_RING_HIMARK) 3656 sc->qfullmsk |= 1 << ring->qid; 3657 3658 return 0; 3659 } 3660 3661 void 3662 iwn_start(struct ifnet *ifp) 3663 { 3664 struct iwn_softc *sc = ifp->if_softc; 3665 struct ieee80211com *ic = &sc->sc_ic; 3666 struct ieee80211_node *ni; 3667 struct mbuf *m; 3668 3669 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 3670 return; 3671 3672 for (;;) { 3673 if (sc->qfullmsk != 0) { 3674 ifq_set_oactive(&ifp->if_snd); 3675 break; 3676 } 3677 3678 /* Send pending management frames first. */ 3679 m = mq_dequeue(&ic->ic_mgtq); 3680 if (m != NULL) { 3681 ni = m->m_pkthdr.ph_cookie; 3682 goto sendit; 3683 } 3684 if (ic->ic_state != IEEE80211_S_RUN || 3685 (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY)) 3686 break; 3687 3688 /* Encapsulate and send data frames. */ 3689 m = ifq_dequeue(&ifp->if_snd); 3690 if (m == NULL) 3691 break; 3692 #if NBPFILTER > 0 3693 if (ifp->if_bpf != NULL) 3694 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 3695 #endif 3696 if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) 3697 continue; 3698 sendit: 3699 #if NBPFILTER > 0 3700 if (ic->ic_rawbpf != NULL) 3701 bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT); 3702 #endif 3703 if (iwn_tx(sc, m, ni) != 0) { 3704 ieee80211_release_node(ic, ni); 3705 ifp->if_oerrors++; 3706 continue; 3707 } 3708 3709 sc->sc_tx_timer = 5; 3710 ifp->if_timer = 1; 3711 } 3712 } 3713 3714 void 3715 iwn_watchdog(struct ifnet *ifp) 3716 { 3717 struct iwn_softc *sc = ifp->if_softc; 3718 3719 ifp->if_timer = 0; 3720 3721 if (sc->sc_tx_timer > 0) { 3722 if (--sc->sc_tx_timer == 0) { 3723 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 3724 iwn_stop(ifp); 3725 ifp->if_oerrors++; 3726 return; 3727 } 3728 ifp->if_timer = 1; 3729 } 3730 3731 ieee80211_watchdog(ifp); 3732 } 3733 3734 int 3735 iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3736 { 3737 struct iwn_softc *sc = ifp->if_softc; 3738 struct ieee80211com *ic = &sc->sc_ic; 3739 int s, error = 0; 3740 3741 error = rw_enter(&sc->sc_rwlock, RW_WRITE | RW_INTR); 3742 if (error) 3743 return error; 3744 s = splnet(); 3745 3746 switch (cmd) { 3747 case SIOCSIFADDR: 3748 ifp->if_flags |= IFF_UP; 3749 /* FALLTHROUGH */ 3750 case SIOCSIFFLAGS: 3751 if (ifp->if_flags & IFF_UP) { 3752 if (!(ifp->if_flags & IFF_RUNNING)) 3753 error = iwn_init(ifp); 3754 } else { 3755 if (ifp->if_flags & IFF_RUNNING) 3756 iwn_stop(ifp); 3757 } 3758 break; 3759 3760 case SIOCS80211POWER: 3761 error = ieee80211_ioctl(ifp, cmd, data); 3762 if (error != ENETRESET) 3763 break; 3764 if (ic->ic_state == IEEE80211_S_RUN && 3765 sc->calib.state == IWN_CALIB_STATE_RUN) { 3766 if (ic->ic_flags & IEEE80211_F_PMGTON) 3767 error = iwn_set_pslevel(sc, 0, 3, 0); 3768 else /* back to CAM */ 3769 error = iwn_set_pslevel(sc, 0, 0, 0); 3770 } else { 3771 /* Defer until transition to IWN_CALIB_STATE_RUN. */ 3772 error = 0; 3773 } 3774 break; 3775 3776 default: 3777 error = ieee80211_ioctl(ifp, cmd, data); 3778 } 3779 3780 if (error == ENETRESET) { 3781 error = 0; 3782 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 3783 (IFF_UP | IFF_RUNNING)) { 3784 iwn_stop(ifp); 3785 error = iwn_init(ifp); 3786 } 3787 } 3788 3789 splx(s); 3790 rw_exit_write(&sc->sc_rwlock); 3791 return error; 3792 } 3793 3794 /* 3795 * Send a command to the firmware. 3796 */ 3797 int 3798 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 3799 { 3800 struct iwn_ops *ops = &sc->ops; 3801 struct iwn_tx_ring *ring = &sc->txq[4]; 3802 struct iwn_tx_desc *desc; 3803 struct iwn_tx_data *data; 3804 struct iwn_tx_cmd *cmd; 3805 struct mbuf *m; 3806 bus_addr_t paddr; 3807 int totlen, error; 3808 3809 desc = &ring->desc[ring->cur]; 3810 data = &ring->data[ring->cur]; 3811 totlen = 4 + size; 3812 3813 if (size > sizeof cmd->data) { 3814 /* Command is too large to fit in a descriptor. */ 3815 if (totlen > MCLBYTES) 3816 return EINVAL; 3817 MGETHDR(m, M_DONTWAIT, MT_DATA); 3818 if (m == NULL) 3819 return ENOMEM; 3820 if (totlen > MHLEN) { 3821 MCLGET(m, M_DONTWAIT); 3822 if (!(m->m_flags & M_EXT)) { 3823 m_freem(m); 3824 return ENOMEM; 3825 } 3826 } 3827 cmd = mtod(m, struct iwn_tx_cmd *); 3828 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen, 3829 NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE); 3830 if (error != 0) { 3831 m_freem(m); 3832 return error; 3833 } 3834 data->m = m; 3835 paddr = data->map->dm_segs[0].ds_addr; 3836 } else { 3837 cmd = &ring->cmd[ring->cur]; 3838 paddr = data->cmd_paddr; 3839 } 3840 3841 cmd->code = code; 3842 cmd->flags = 0; 3843 cmd->qid = ring->qid; 3844 cmd->idx = ring->cur; 3845 memcpy(cmd->data, buf, size); 3846 3847 desc->nsegs = 1; 3848 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 3849 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 3850 3851 if (size > sizeof cmd->data) { 3852 bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen, 3853 BUS_DMASYNC_PREWRITE); 3854 } else { 3855 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map, 3856 (caddr_t)cmd - ring->cmd_dma.vaddr, totlen, 3857 BUS_DMASYNC_PREWRITE); 3858 } 3859 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 3860 (caddr_t)desc - ring->desc_dma.vaddr, sizeof (*desc), 3861 BUS_DMASYNC_PREWRITE); 3862 3863 /* Update TX scheduler. */ 3864 ops->update_sched(sc, ring->qid, ring->cur, 0, 0); 3865 3866 /* Kick command ring. */ 3867 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3868 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3869 3870 return async ? 0 : tsleep_nsec(desc, PCATCH, "iwncmd", SEC_TO_NSEC(1)); 3871 } 3872 3873 int 3874 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3875 { 3876 struct iwn4965_node_info hnode; 3877 caddr_t src, dst; 3878 3879 /* 3880 * We use the node structure for 5000 Series internally (it is 3881 * a superset of the one for 4965AGN). We thus copy the common 3882 * fields before sending the command. 3883 */ 3884 src = (caddr_t)node; 3885 dst = (caddr_t)&hnode; 3886 memcpy(dst, src, 48); 3887 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 3888 memcpy(dst + 48, src + 72, 20); 3889 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 3890 } 3891 3892 int 3893 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 3894 { 3895 /* Direct mapping. */ 3896 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 3897 } 3898 3899 int 3900 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 3901 { 3902 struct ieee80211com *ic = &sc->sc_ic; 3903 struct iwn_node *wn = (void *)ni; 3904 struct iwn_cmd_link_quality linkq; 3905 const struct iwn_rate *rinfo; 3906 uint8_t txant; 3907 int i; 3908 3909 /* Use the first valid TX antenna. */ 3910 txant = IWN_LSB(sc->txchainmask); 3911 3912 memset(&linkq, 0, sizeof linkq); 3913 linkq.id = wn->id; 3914 linkq.antmsk_1stream = txant; 3915 linkq.antmsk_2stream = IWN_ANT_AB; 3916 linkq.ampdu_max = IWN_AMPDU_MAX; 3917 linkq.ampdu_threshold = 3; 3918 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3919 3920 i = 0; 3921 if (ni->ni_flags & IEEE80211_NODE_HT) { 3922 int txmcs; 3923 for (txmcs = ni->ni_txmcs; txmcs >= 0; txmcs--) { 3924 rinfo = &iwn_rates[iwn_mcs2ridx[txmcs]]; 3925 linkq.retry[i].plcp = rinfo->ht_plcp; 3926 linkq.retry[i].rflags = rinfo->ht_flags; 3927 3928 /* XXX set correct ant mask for MIMO rates here */ 3929 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3930 3931 /* First two Tx attempts may use 40MHz/SGI. */ 3932 if (i < 2) { 3933 if (iwn_rxon_ht40_enabled(sc)) 3934 linkq.retry[i].rflags |= IWN_RFLAG_HT40; 3935 if (ieee80211_ra_use_ht_sgi(ni)) 3936 linkq.retry[i].rflags |= IWN_RFLAG_SGI; 3937 } 3938 3939 if (++i >= IWN_MAX_TX_RETRIES) 3940 break; 3941 } 3942 } else { 3943 int txrate; 3944 for (txrate = ni->ni_txrate; txrate >= 0; txrate--) { 3945 rinfo = &iwn_rates[wn->ridx[txrate]]; 3946 linkq.retry[i].plcp = rinfo->plcp; 3947 linkq.retry[i].rflags = rinfo->flags; 3948 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3949 if (++i >= IWN_MAX_TX_RETRIES) 3950 break; 3951 } 3952 } 3953 3954 /* Fill the rest with the lowest basic rate. */ 3955 rinfo = &iwn_rates[iwn_rval2ridx(ieee80211_min_basic_rate(ic))]; 3956 while (i < IWN_MAX_TX_RETRIES) { 3957 linkq.retry[i].plcp = rinfo->plcp; 3958 linkq.retry[i].rflags = rinfo->flags; 3959 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant); 3960 i++; 3961 } 3962 3963 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 3964 } 3965 3966 /* 3967 * Broadcast node is used to send group-addressed and management frames. 3968 */ 3969 int 3970 iwn_add_broadcast_node(struct iwn_softc *sc, int async, int ridx) 3971 { 3972 struct iwn_ops *ops = &sc->ops; 3973 struct iwn_node_info node; 3974 struct iwn_cmd_link_quality linkq; 3975 const struct iwn_rate *rinfo; 3976 uint8_t txant; 3977 int i, error; 3978 3979 memset(&node, 0, sizeof node); 3980 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr); 3981 node.id = sc->broadcast_id; 3982 DPRINTF(("adding broadcast node\n")); 3983 if ((error = ops->add_node(sc, &node, async)) != 0) 3984 return error; 3985 3986 /* Use the first valid TX antenna. */ 3987 txant = IWN_LSB(sc->txchainmask); 3988 3989 memset(&linkq, 0, sizeof linkq); 3990 linkq.id = sc->broadcast_id; 3991 linkq.antmsk_1stream = txant; 3992 linkq.antmsk_2stream = IWN_ANT_AB; 3993 linkq.ampdu_max = IWN_AMPDU_MAX_NO_AGG; 3994 linkq.ampdu_threshold = 3; 3995 linkq.ampdu_limit = htole16(4000); /* 4ms */ 3996 3997 /* Use lowest mandatory bit-rate. */ 3998 rinfo = &iwn_rates[ridx]; 3999 linkq.retry[0].plcp = rinfo->plcp; 4000 linkq.retry[0].rflags = rinfo->flags; 4001 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant); 4002 /* Use same bit-rate for all TX retries. */ 4003 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 4004 linkq.retry[i].plcp = linkq.retry[0].plcp; 4005 linkq.retry[i].rflags = linkq.retry[0].rflags; 4006 } 4007 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 4008 } 4009 4010 void 4011 iwn_updateedca(struct ieee80211com *ic) 4012 { 4013 #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 4014 struct iwn_softc *sc = ic->ic_softc; 4015 struct iwn_edca_params cmd; 4016 int aci; 4017 4018 memset(&cmd, 0, sizeof cmd); 4019 cmd.flags = htole32(IWN_EDCA_UPDATE); 4020 for (aci = 0; aci < EDCA_NUM_AC; aci++) { 4021 const struct ieee80211_edca_ac_params *ac = 4022 &ic->ic_edca_ac[aci]; 4023 cmd.ac[aci].aifsn = ac->ac_aifsn; 4024 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->ac_ecwmin)); 4025 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->ac_ecwmax)); 4026 cmd.ac[aci].txoplimit = 4027 htole16(IEEE80211_TXOP_TO_US(ac->ac_txoplimit)); 4028 } 4029 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 4030 #undef IWN_EXP2 4031 } 4032 4033 void 4034 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 4035 { 4036 struct iwn_cmd_led led; 4037 4038 /* Clear microcode LED ownership. */ 4039 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 4040 4041 led.which = which; 4042 led.unit = htole32(10000); /* on/off in unit of 100ms */ 4043 led.off = off; 4044 led.on = on; 4045 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 4046 } 4047 4048 /* 4049 * Set the critical temperature at which the firmware will stop the radio 4050 * and notify us. 4051 */ 4052 int 4053 iwn_set_critical_temp(struct iwn_softc *sc) 4054 { 4055 struct iwn_critical_temp crit; 4056 int32_t temp; 4057 4058 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 4059 4060 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 4061 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 4062 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 4063 temp = IWN_CTOK(110); 4064 else 4065 temp = 110; 4066 memset(&crit, 0, sizeof crit); 4067 crit.tempR = htole32(temp); 4068 DPRINTF(("setting critical temperature to %d\n", temp)); 4069 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 4070 } 4071 4072 int 4073 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 4074 { 4075 struct iwn_cmd_timing cmd; 4076 uint64_t val, mod; 4077 4078 memset(&cmd, 0, sizeof cmd); 4079 memcpy(&cmd.tstamp, ni->ni_tstamp, sizeof (uint64_t)); 4080 cmd.bintval = htole16(ni->ni_intval); 4081 cmd.lintval = htole16(10); 4082 4083 /* Compute remaining time until next beacon. */ 4084 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 4085 mod = letoh64(cmd.tstamp) % val; 4086 cmd.binitval = htole32((uint32_t)(val - mod)); 4087 4088 DPRINTF(("timing bintval=%u, tstamp=%llu, init=%u\n", 4089 ni->ni_intval, letoh64(cmd.tstamp), (uint32_t)(val - mod))); 4090 4091 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 4092 } 4093 4094 void 4095 iwn4965_power_calibration(struct iwn_softc *sc, int temp) 4096 { 4097 /* Adjust TX power if need be (delta >= 3 degC). */ 4098 DPRINTF(("temperature %d->%d\n", sc->temp, temp)); 4099 if (abs(temp - sc->temp) >= 3) { 4100 /* Record temperature of last calibration. */ 4101 sc->temp = temp; 4102 (void)iwn4965_set_txpower(sc, 1); 4103 } 4104 } 4105 4106 /* 4107 * Set TX power for current channel (each rate has its own power settings). 4108 * This function takes into account the regulatory information from EEPROM, 4109 * the current temperature and the current voltage. 4110 */ 4111 int 4112 iwn4965_set_txpower(struct iwn_softc *sc, int async) 4113 { 4114 /* Fixed-point arithmetic division using a n-bit fractional part. */ 4115 #define fdivround(a, b, n) \ 4116 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 4117 /* Linear interpolation. */ 4118 #define interpolate(x, x1, y1, x2, y2, n) \ 4119 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 4120 4121 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 4122 struct ieee80211com *ic = &sc->sc_ic; 4123 struct iwn_ucode_info *uc = &sc->ucode_info; 4124 struct ieee80211_channel *ch; 4125 struct iwn4965_cmd_txpower cmd; 4126 struct iwn4965_eeprom_chan_samples *chans; 4127 const uint8_t *rf_gain, *dsp_gain; 4128 int32_t vdiff, tdiff; 4129 int i, c, grp, maxpwr, is_ht40 = 0; 4130 uint8_t chan, ext_chan; 4131 4132 /* Retrieve current channel from last RXON. */ 4133 chan = sc->rxon.chan; 4134 DPRINTF(("setting TX power for channel %d\n", chan)); 4135 ch = &ic->ic_channels[chan]; 4136 4137 memset(&cmd, 0, sizeof cmd); 4138 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 4139 cmd.chan = chan; 4140 4141 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 4142 maxpwr = sc->maxpwr5GHz; 4143 rf_gain = iwn4965_rf_gain_5ghz; 4144 dsp_gain = iwn4965_dsp_gain_5ghz; 4145 } else { 4146 maxpwr = sc->maxpwr2GHz; 4147 rf_gain = iwn4965_rf_gain_2ghz; 4148 dsp_gain = iwn4965_dsp_gain_2ghz; 4149 } 4150 4151 /* Compute voltage compensation. */ 4152 vdiff = ((int32_t)letoh32(uc->volt) - sc->eeprom_voltage) / 7; 4153 if (vdiff > 0) 4154 vdiff *= 2; 4155 if (abs(vdiff) > 2) 4156 vdiff = 0; 4157 DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 4158 vdiff, letoh32(uc->volt), sc->eeprom_voltage)); 4159 4160 /* Get channel attenuation group. */ 4161 if (chan <= 20) /* 1-20 */ 4162 grp = 4; 4163 else if (chan <= 43) /* 34-43 */ 4164 grp = 0; 4165 else if (chan <= 70) /* 44-70 */ 4166 grp = 1; 4167 else if (chan <= 124) /* 71-124 */ 4168 grp = 2; 4169 else /* 125-200 */ 4170 grp = 3; 4171 DPRINTF(("chan %d, attenuation group=%d\n", chan, grp)); 4172 4173 /* Get channel sub-band. */ 4174 for (i = 0; i < IWN_NBANDS; i++) 4175 if (sc->bands[i].lo != 0 && 4176 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 4177 break; 4178 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 4179 return EINVAL; 4180 chans = sc->bands[i].chans; 4181 DPRINTF(("chan %d sub-band=%d\n", chan, i)); 4182 4183 if (iwn_rxon_ht40_enabled(sc)) { 4184 is_ht40 = 1; 4185 if (le32toh(sc->rxon.flags) & IWN_RXON_HT_HT40MINUS) 4186 ext_chan = chan - 2; 4187 else 4188 ext_chan = chan + 2; 4189 } else 4190 ext_chan = chan; 4191 4192 for (c = 0; c < 2; c++) { 4193 uint8_t power, gain, temp; 4194 int maxchpwr, pwr, ridx, idx; 4195 4196 power = interpolate(ext_chan, 4197 chans[0].num, chans[0].samples[c][1].power, 4198 chans[1].num, chans[1].samples[c][1].power, 1); 4199 gain = interpolate(ext_chan, 4200 chans[0].num, chans[0].samples[c][1].gain, 4201 chans[1].num, chans[1].samples[c][1].gain, 1); 4202 temp = interpolate(ext_chan, 4203 chans[0].num, chans[0].samples[c][1].temp, 4204 chans[1].num, chans[1].samples[c][1].temp, 1); 4205 DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n", 4206 c, power, gain, temp)); 4207 4208 /* Compute temperature compensation. */ 4209 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 4210 DPRINTF(("temperature compensation=%d (current=%d, " 4211 "EEPROM=%d)\n", tdiff, sc->temp, temp)); 4212 4213 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 4214 /* Convert dBm to half-dBm. */ 4215 if (is_ht40) 4216 maxchpwr = sc->maxpwr40[chan] * 2; 4217 else 4218 maxchpwr = sc->maxpwr[chan] * 2; 4219 #ifdef notyet 4220 if (ridx > iwn_mcs2ridx[7] && ridx < iwn_mcs2ridx[16]) 4221 maxchpwr -= 6; /* MIMO 2T: -3dB */ 4222 #endif 4223 4224 pwr = maxpwr; 4225 4226 /* Adjust TX power based on rate. */ 4227 if ((ridx % 8) == 5) 4228 pwr -= 15; /* OFDM48: -7.5dB */ 4229 else if ((ridx % 8) == 6) 4230 pwr -= 17; /* OFDM54: -8.5dB */ 4231 else if ((ridx % 8) == 7) 4232 pwr -= 20; /* OFDM60: -10dB */ 4233 else 4234 pwr -= 10; /* Others: -5dB */ 4235 4236 /* Do not exceed channel max TX power. */ 4237 if (pwr > maxchpwr) 4238 pwr = maxchpwr; 4239 4240 idx = gain - (pwr - power) - tdiff - vdiff; 4241 if (ridx > iwn_mcs2ridx[7]) /* MIMO */ 4242 idx += (int32_t)letoh32(uc->atten[grp][c]); 4243 4244 if (cmd.band == 0) 4245 idx += 9; /* 5GHz */ 4246 if (ridx == IWN_RIDX_MAX) 4247 idx += 5; /* CCK */ 4248 4249 /* Make sure idx stays in a valid range. */ 4250 if (idx < 0) 4251 idx = 0; 4252 else if (idx > IWN4965_MAX_PWR_INDEX) 4253 idx = IWN4965_MAX_PWR_INDEX; 4254 4255 DPRINTF(("TX chain %d, rate idx %d: power=%d\n", 4256 c, ridx, idx)); 4257 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 4258 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 4259 } 4260 } 4261 4262 DPRINTF(("setting TX power for chan %d\n", chan)); 4263 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 4264 4265 #undef interpolate 4266 #undef fdivround 4267 } 4268 4269 int 4270 iwn5000_set_txpower(struct iwn_softc *sc, int async) 4271 { 4272 struct iwn5000_cmd_txpower cmd; 4273 4274 /* 4275 * TX power calibration is handled automatically by the firmware 4276 * for 5000 Series. 4277 */ 4278 memset(&cmd, 0, sizeof cmd); 4279 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 4280 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 4281 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 4282 DPRINTF(("setting TX power\n")); 4283 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 4284 } 4285 4286 /* 4287 * Retrieve the maximum RSSI (in dBm) among receivers. 4288 */ 4289 int 4290 iwn4965_get_rssi(const struct iwn_rx_stat *stat) 4291 { 4292 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 4293 uint8_t mask, agc; 4294 int rssi; 4295 4296 mask = (letoh16(phy->antenna) >> 4) & IWN_ANT_ABC; 4297 agc = (letoh16(phy->agc) >> 7) & 0x7f; 4298 4299 rssi = 0; 4300 if (mask & IWN_ANT_A) 4301 rssi = MAX(rssi, phy->rssi[0]); 4302 if (mask & IWN_ANT_B) 4303 rssi = MAX(rssi, phy->rssi[2]); 4304 if (mask & IWN_ANT_C) 4305 rssi = MAX(rssi, phy->rssi[4]); 4306 4307 return rssi - agc - IWN_RSSI_TO_DBM; 4308 } 4309 4310 int 4311 iwn5000_get_rssi(const struct iwn_rx_stat *stat) 4312 { 4313 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 4314 uint8_t agc; 4315 int rssi; 4316 4317 agc = (letoh32(phy->agc) >> 9) & 0x7f; 4318 4319 rssi = MAX(letoh16(phy->rssi[0]) & 0xff, 4320 letoh16(phy->rssi[1]) & 0xff); 4321 rssi = MAX(letoh16(phy->rssi[2]) & 0xff, rssi); 4322 4323 return rssi - agc - IWN_RSSI_TO_DBM; 4324 } 4325 4326 /* 4327 * Retrieve the average noise (in dBm) among receivers. 4328 */ 4329 int 4330 iwn_get_noise(const struct iwn_rx_general_stats *stats) 4331 { 4332 int i, total, nbant, noise; 4333 4334 total = nbant = 0; 4335 for (i = 0; i < 3; i++) { 4336 if ((noise = letoh32(stats->noise[i]) & 0xff) == 0) 4337 continue; 4338 total += noise; 4339 nbant++; 4340 } 4341 /* There should be at least one antenna but check anyway. */ 4342 return (nbant == 0) ? -127 : (total / nbant) - 107; 4343 } 4344 4345 /* 4346 * Compute temperature (in degC) from last received statistics. 4347 */ 4348 int 4349 iwn4965_get_temperature(struct iwn_softc *sc) 4350 { 4351 struct iwn_ucode_info *uc = &sc->ucode_info; 4352 int32_t r1, r2, r3, r4, temp; 4353 4354 if (sc->rx_stats_flags & IWN_STATS_FLAGS_BAND_HT40) { 4355 r1 = letoh32(uc->temp[0].chan40MHz); 4356 r2 = letoh32(uc->temp[1].chan40MHz); 4357 r3 = letoh32(uc->temp[2].chan40MHz); 4358 } else { 4359 r1 = letoh32(uc->temp[0].chan20MHz); 4360 r2 = letoh32(uc->temp[1].chan20MHz); 4361 r3 = letoh32(uc->temp[2].chan20MHz); 4362 } 4363 r4 = letoh32(sc->rawtemp); 4364 4365 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 4366 return 0; 4367 4368 /* Sign-extend 23-bit R4 value to 32-bit. */ 4369 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 4370 /* Compute temperature in Kelvin. */ 4371 temp = (259 * (r4 - r2)) / (r3 - r1); 4372 temp = (temp * 97) / 100 + 8; 4373 4374 DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp))); 4375 return IWN_KTOC(temp); 4376 } 4377 4378 int 4379 iwn5000_get_temperature(struct iwn_softc *sc) 4380 { 4381 int32_t temp; 4382 4383 /* 4384 * Temperature is not used by the driver for 5000 Series because 4385 * TX power calibration is handled by firmware. 4386 */ 4387 temp = letoh32(sc->rawtemp); 4388 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 4389 temp = (temp / -5) + sc->temp_off; 4390 temp = IWN_KTOC(temp); 4391 } 4392 return temp; 4393 } 4394 4395 /* 4396 * Initialize sensitivity calibration state machine. 4397 */ 4398 int 4399 iwn_init_sensitivity(struct iwn_softc *sc) 4400 { 4401 struct iwn_ops *ops = &sc->ops; 4402 struct iwn_calib_state *calib = &sc->calib; 4403 uint32_t flags; 4404 int error; 4405 4406 /* Reset calibration state machine. */ 4407 memset(calib, 0, sizeof (*calib)); 4408 calib->state = IWN_CALIB_STATE_INIT; 4409 calib->cck_state = IWN_CCK_STATE_HIFA; 4410 /* Set initial correlation values. */ 4411 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 4412 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 4413 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 4414 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 4415 calib->cck_x4 = 125; 4416 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 4417 calib->energy_cck = sc->limits->energy_cck; 4418 4419 /* Write initial sensitivity. */ 4420 if ((error = iwn_send_sensitivity(sc)) != 0) 4421 return error; 4422 4423 /* Write initial gains. */ 4424 if ((error = ops->init_gains(sc)) != 0) 4425 return error; 4426 4427 /* Request statistics at each beacon interval. */ 4428 flags = 0; 4429 DPRINTFN(2, ("sending request for statistics\n")); 4430 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 4431 } 4432 4433 /* 4434 * Collect noise and RSSI statistics for the first 20 beacons received 4435 * after association and use them to determine connected antennas and 4436 * to set differential gains. 4437 */ 4438 void 4439 iwn_collect_noise(struct iwn_softc *sc, 4440 const struct iwn_rx_general_stats *stats) 4441 { 4442 struct iwn_ops *ops = &sc->ops; 4443 struct iwn_calib_state *calib = &sc->calib; 4444 uint32_t val; 4445 int i; 4446 4447 /* Accumulate RSSI and noise for all 3 antennas. */ 4448 for (i = 0; i < 3; i++) { 4449 calib->rssi[i] += letoh32(stats->rssi[i]) & 0xff; 4450 calib->noise[i] += letoh32(stats->noise[i]) & 0xff; 4451 } 4452 /* NB: We update differential gains only once after 20 beacons. */ 4453 if (++calib->nbeacons < 20) 4454 return; 4455 4456 /* Determine highest average RSSI. */ 4457 val = MAX(calib->rssi[0], calib->rssi[1]); 4458 val = MAX(calib->rssi[2], val); 4459 4460 /* Determine which antennas are connected. */ 4461 sc->chainmask = sc->rxchainmask; 4462 for (i = 0; i < 3; i++) 4463 if (val - calib->rssi[i] > 15 * 20) 4464 sc->chainmask &= ~(1 << i); 4465 DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n", 4466 sc->rxchainmask, sc->chainmask)); 4467 4468 /* If none of the TX antennas are connected, keep at least one. */ 4469 if ((sc->chainmask & sc->txchainmask) == 0) 4470 sc->chainmask |= IWN_LSB(sc->txchainmask); 4471 4472 (void)ops->set_gains(sc); 4473 calib->state = IWN_CALIB_STATE_RUN; 4474 4475 #ifdef notyet 4476 /* XXX Disable RX chains with no antennas connected. */ 4477 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 4478 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 4479 #endif 4480 4481 /* Enable power-saving mode if requested by user. */ 4482 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) 4483 (void)iwn_set_pslevel(sc, 0, 3, 1); 4484 } 4485 4486 int 4487 iwn4965_init_gains(struct iwn_softc *sc) 4488 { 4489 struct iwn_phy_calib_gain cmd; 4490 4491 memset(&cmd, 0, sizeof cmd); 4492 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4493 /* Differential gains initially set to 0 for all 3 antennas. */ 4494 DPRINTF(("setting initial differential gains\n")); 4495 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4496 } 4497 4498 int 4499 iwn5000_init_gains(struct iwn_softc *sc) 4500 { 4501 struct iwn_phy_calib cmd; 4502 4503 memset(&cmd, 0, sizeof cmd); 4504 cmd.code = sc->reset_noise_gain; 4505 cmd.ngroups = 1; 4506 cmd.isvalid = 1; 4507 DPRINTF(("setting initial differential gains\n")); 4508 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4509 } 4510 4511 int 4512 iwn4965_set_gains(struct iwn_softc *sc) 4513 { 4514 struct iwn_calib_state *calib = &sc->calib; 4515 struct iwn_phy_calib_gain cmd; 4516 int i, delta, noise; 4517 4518 /* Get minimal noise among connected antennas. */ 4519 noise = INT_MAX; /* NB: There's at least one antenna. */ 4520 for (i = 0; i < 3; i++) 4521 if (sc->chainmask & (1 << i)) 4522 noise = MIN(calib->noise[i], noise); 4523 4524 memset(&cmd, 0, sizeof cmd); 4525 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4526 /* Set differential gains for connected antennas. */ 4527 for (i = 0; i < 3; i++) { 4528 if (sc->chainmask & (1 << i)) { 4529 /* Compute attenuation (in unit of 1.5dB). */ 4530 delta = (noise - (int32_t)calib->noise[i]) / 30; 4531 /* NB: delta <= 0 */ 4532 /* Limit to [-4.5dB,0]. */ 4533 cmd.gain[i] = MIN(abs(delta), 3); 4534 if (delta < 0) 4535 cmd.gain[i] |= 1 << 2; /* sign bit */ 4536 } 4537 } 4538 DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4539 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask)); 4540 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4541 } 4542 4543 int 4544 iwn5000_set_gains(struct iwn_softc *sc) 4545 { 4546 struct iwn_calib_state *calib = &sc->calib; 4547 struct iwn_phy_calib_gain cmd; 4548 int i, ant, div, delta; 4549 4550 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 4551 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 4552 4553 memset(&cmd, 0, sizeof cmd); 4554 cmd.code = sc->noise_gain; 4555 cmd.ngroups = 1; 4556 cmd.isvalid = 1; 4557 /* 4558 * Get first available RX antenna as referential. 4559 * IWN_LSB() return values start with 1, but antenna gain array 4560 * cmd.gain[] and noise array calib->noise[] start with 0. 4561 */ 4562 ant = IWN_LSB(sc->rxchainmask) - 1; 4563 4564 /* Set differential gains for other antennas. */ 4565 for (i = ant + 1; i < 3; i++) { 4566 if (sc->chainmask & (1 << i)) { 4567 /* The delta is relative to antenna "ant". */ 4568 delta = ((int32_t)calib->noise[ant] - 4569 (int32_t)calib->noise[i]) / div; 4570 DPRINTF(("Ant[%d] vs. Ant[%d]: delta %d\n", ant, i, delta)); 4571 /* Limit to [-4.5dB,+4.5dB]. */ 4572 cmd.gain[i] = MIN(abs(delta), 3); 4573 if (delta < 0) 4574 cmd.gain[i] |= 1 << 2; /* sign bit */ 4575 DPRINTF(("Setting differential gains for antenna %d: %x\n", 4576 i, cmd.gain[i])); 4577 } 4578 } 4579 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4580 } 4581 4582 /* 4583 * Tune RF RX sensitivity based on the number of false alarms detected 4584 * during the last beacon period. 4585 */ 4586 void 4587 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 4588 { 4589 #define inc(val, inc, max) \ 4590 if ((val) < (max)) { \ 4591 if ((val) < (max) - (inc)) \ 4592 (val) += (inc); \ 4593 else \ 4594 (val) = (max); \ 4595 needs_update = 1; \ 4596 } 4597 #define dec(val, dec, min) \ 4598 if ((val) > (min)) { \ 4599 if ((val) > (min) + (dec)) \ 4600 (val) -= (dec); \ 4601 else \ 4602 (val) = (min); \ 4603 needs_update = 1; \ 4604 } 4605 4606 const struct iwn_sensitivity_limits *limits = sc->limits; 4607 struct iwn_calib_state *calib = &sc->calib; 4608 uint32_t val, rxena, fa; 4609 uint32_t energy[3], energy_min; 4610 uint8_t noise[3], noise_ref; 4611 int i, needs_update = 0; 4612 4613 /* Check that we've been enabled long enough. */ 4614 if ((rxena = letoh32(stats->general.load)) == 0) 4615 return; 4616 4617 /* Compute number of false alarms since last call for OFDM. */ 4618 fa = letoh32(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 4619 fa += letoh32(stats->ofdm.fa) - calib->fa_ofdm; 4620 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 4621 4622 /* Save counters values for next call. */ 4623 calib->bad_plcp_ofdm = letoh32(stats->ofdm.bad_plcp); 4624 calib->fa_ofdm = letoh32(stats->ofdm.fa); 4625 4626 if (fa > 50 * rxena) { 4627 /* High false alarm count, decrease sensitivity. */ 4628 DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa)); 4629 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 4630 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 4631 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 4632 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 4633 4634 } else if (fa < 5 * rxena) { 4635 /* Low false alarm count, increase sensitivity. */ 4636 DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa)); 4637 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 4638 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 4639 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 4640 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 4641 } 4642 4643 /* Compute maximum noise among 3 receivers. */ 4644 for (i = 0; i < 3; i++) 4645 noise[i] = (letoh32(stats->general.noise[i]) >> 8) & 0xff; 4646 val = MAX(noise[0], noise[1]); 4647 val = MAX(noise[2], val); 4648 /* Insert it into our samples table. */ 4649 calib->noise_samples[calib->cur_noise_sample] = val; 4650 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 4651 4652 /* Compute maximum noise among last 20 samples. */ 4653 noise_ref = calib->noise_samples[0]; 4654 for (i = 1; i < 20; i++) 4655 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 4656 4657 /* Compute maximum energy among 3 receivers. */ 4658 for (i = 0; i < 3; i++) 4659 energy[i] = letoh32(stats->general.energy[i]); 4660 val = MIN(energy[0], energy[1]); 4661 val = MIN(energy[2], val); 4662 /* Insert it into our samples table. */ 4663 calib->energy_samples[calib->cur_energy_sample] = val; 4664 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 4665 4666 /* Compute minimum energy among last 10 samples. */ 4667 energy_min = calib->energy_samples[0]; 4668 for (i = 1; i < 10; i++) 4669 energy_min = MAX(energy_min, calib->energy_samples[i]); 4670 energy_min += 6; 4671 4672 /* Compute number of false alarms since last call for CCK. */ 4673 fa = letoh32(stats->cck.bad_plcp) - calib->bad_plcp_cck; 4674 fa += letoh32(stats->cck.fa) - calib->fa_cck; 4675 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 4676 4677 /* Save counters values for next call. */ 4678 calib->bad_plcp_cck = letoh32(stats->cck.bad_plcp); 4679 calib->fa_cck = letoh32(stats->cck.fa); 4680 4681 if (fa > 50 * rxena) { 4682 /* High false alarm count, decrease sensitivity. */ 4683 DPRINTFN(2, ("CCK high false alarm count: %u\n", fa)); 4684 calib->cck_state = IWN_CCK_STATE_HIFA; 4685 calib->low_fa = 0; 4686 4687 if (calib->cck_x4 > 160) { 4688 calib->noise_ref = noise_ref; 4689 if (calib->energy_cck > 2) 4690 dec(calib->energy_cck, 2, energy_min); 4691 } 4692 if (calib->cck_x4 < 160) { 4693 calib->cck_x4 = 161; 4694 needs_update = 1; 4695 } else 4696 inc(calib->cck_x4, 3, limits->max_cck_x4); 4697 4698 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 4699 4700 } else if (fa < 5 * rxena) { 4701 /* Low false alarm count, increase sensitivity. */ 4702 DPRINTFN(2, ("CCK low false alarm count: %u\n", fa)); 4703 calib->cck_state = IWN_CCK_STATE_LOFA; 4704 calib->low_fa++; 4705 4706 if (calib->cck_state != IWN_CCK_STATE_INIT && 4707 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 4708 calib->low_fa > 100)) { 4709 inc(calib->energy_cck, 2, limits->min_energy_cck); 4710 dec(calib->cck_x4, 3, limits->min_cck_x4); 4711 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 4712 } 4713 } else { 4714 /* Not worth to increase or decrease sensitivity. */ 4715 DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa)); 4716 calib->low_fa = 0; 4717 calib->noise_ref = noise_ref; 4718 4719 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 4720 /* Previous interval had many false alarms. */ 4721 dec(calib->energy_cck, 8, energy_min); 4722 } 4723 calib->cck_state = IWN_CCK_STATE_INIT; 4724 } 4725 4726 if (needs_update) 4727 (void)iwn_send_sensitivity(sc); 4728 #undef dec 4729 #undef inc 4730 } 4731 4732 int 4733 iwn_send_sensitivity(struct iwn_softc *sc) 4734 { 4735 struct iwn_calib_state *calib = &sc->calib; 4736 struct iwn_enhanced_sensitivity_cmd cmd; 4737 int len; 4738 4739 memset(&cmd, 0, sizeof cmd); 4740 len = sizeof (struct iwn_sensitivity_cmd); 4741 cmd.which = IWN_SENSITIVITY_WORKTBL; 4742 /* OFDM modulation. */ 4743 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 4744 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 4745 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 4746 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 4747 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 4748 cmd.energy_ofdm_th = htole16(62); 4749 /* CCK modulation. */ 4750 cmd.corr_cck_x4 = htole16(calib->cck_x4); 4751 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 4752 cmd.energy_cck = htole16(calib->energy_cck); 4753 /* Barker modulation: use default values. */ 4754 cmd.corr_barker = htole16(190); 4755 cmd.corr_barker_mrc = htole16(390); 4756 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 4757 goto send; 4758 /* Enhanced sensitivity settings. */ 4759 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 4760 cmd.ofdm_det_slope_mrc = htole16(668); 4761 cmd.ofdm_det_icept_mrc = htole16(4); 4762 cmd.ofdm_det_slope = htole16(486); 4763 cmd.ofdm_det_icept = htole16(37); 4764 cmd.cck_det_slope_mrc = htole16(853); 4765 cmd.cck_det_icept_mrc = htole16(4); 4766 cmd.cck_det_slope = htole16(476); 4767 cmd.cck_det_icept = htole16(99); 4768 send: 4769 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 4770 } 4771 4772 /* 4773 * Set STA mode power saving level (between 0 and 5). 4774 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 4775 */ 4776 int 4777 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 4778 { 4779 struct iwn_pmgt_cmd cmd; 4780 const struct iwn_pmgt *pmgt; 4781 uint32_t max, skip_dtim; 4782 pcireg_t reg; 4783 int i; 4784 4785 /* Select which PS parameters to use. */ 4786 if (dtim <= 2) 4787 pmgt = &iwn_pmgt[0][level]; 4788 else if (dtim <= 10) 4789 pmgt = &iwn_pmgt[1][level]; 4790 else 4791 pmgt = &iwn_pmgt[2][level]; 4792 4793 memset(&cmd, 0, sizeof cmd); 4794 if (level != 0) /* not CAM */ 4795 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 4796 if (level == 5) 4797 cmd.flags |= htole16(IWN_PS_FAST_PD); 4798 /* Retrieve PCIe Active State Power Management (ASPM). */ 4799 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 4800 sc->sc_cap_off + PCI_PCIE_LCSR); 4801 if (!(reg & PCI_PCIE_LCSR_ASPM_L0S)) /* L0s Entry disabled. */ 4802 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 4803 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 4804 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 4805 4806 if (dtim == 0) { 4807 dtim = 1; 4808 skip_dtim = 0; 4809 } else 4810 skip_dtim = pmgt->skip_dtim; 4811 if (skip_dtim != 0) { 4812 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 4813 max = pmgt->intval[4]; 4814 if (max == (uint32_t)-1) 4815 max = dtim * (skip_dtim + 1); 4816 else if (max > dtim) 4817 max = (max / dtim) * dtim; 4818 } else 4819 max = dtim; 4820 for (i = 0; i < 5; i++) 4821 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 4822 4823 DPRINTF(("setting power saving level to %d\n", level)); 4824 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 4825 } 4826 4827 int 4828 iwn_send_btcoex(struct iwn_softc *sc) 4829 { 4830 struct iwn_bluetooth cmd; 4831 4832 memset(&cmd, 0, sizeof cmd); 4833 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 4834 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 4835 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 4836 DPRINTF(("configuring bluetooth coexistence\n")); 4837 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 4838 } 4839 4840 int 4841 iwn_send_advanced_btcoex(struct iwn_softc *sc) 4842 { 4843 static const uint32_t btcoex_3wire[12] = { 4844 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 4845 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 4846 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 4847 }; 4848 struct iwn_btcoex_priotable btprio; 4849 struct iwn_btcoex_prot btprot; 4850 int error, i; 4851 4852 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 4853 sc->hw_type == IWN_HW_REV_TYPE_135) { 4854 struct iwn2000_btcoex_config btconfig; 4855 4856 memset(&btconfig, 0, sizeof btconfig); 4857 btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION | 4858 (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) | 4859 IWN_BT_SYNC_2_BT_DISABLE; 4860 btconfig.max_kill = 5; 4861 btconfig.bt3_t7_timer = 1; 4862 btconfig.kill_ack = htole32(0xffff0000); 4863 btconfig.kill_cts = htole32(0xffff0000); 4864 btconfig.sample_time = 2; 4865 btconfig.bt3_t2_timer = 0xc; 4866 for (i = 0; i < 12; i++) 4867 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 4868 btconfig.valid = htole16(0xff); 4869 btconfig.prio_boost = htole32(0xf0); 4870 DPRINTF(("configuring advanced bluetooth coexistence\n")); 4871 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 4872 sizeof(btconfig), 1); 4873 if (error != 0) 4874 return (error); 4875 } else { 4876 struct iwn6000_btcoex_config btconfig; 4877 4878 memset(&btconfig, 0, sizeof btconfig); 4879 btconfig.flags = IWN_BT_COEX6000_CHAN_INHIBITION | 4880 (IWN_BT_COEX6000_MODE_3W << IWN_BT_COEX6000_MODE_SHIFT) | 4881 IWN_BT_SYNC_2_BT_DISABLE; 4882 btconfig.max_kill = 5; 4883 btconfig.bt3_t7_timer = 1; 4884 btconfig.kill_ack = htole32(0xffff0000); 4885 btconfig.kill_cts = htole32(0xffff0000); 4886 btconfig.sample_time = 2; 4887 btconfig.bt3_t2_timer = 0xc; 4888 for (i = 0; i < 12; i++) 4889 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 4890 btconfig.valid = htole16(0xff); 4891 btconfig.prio_boost = 0xf0; 4892 DPRINTF(("configuring advanced bluetooth coexistence\n")); 4893 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 4894 sizeof(btconfig), 1); 4895 if (error != 0) 4896 return (error); 4897 } 4898 4899 memset(&btprio, 0, sizeof btprio); 4900 btprio.calib_init1 = 0x6; 4901 btprio.calib_init2 = 0x7; 4902 btprio.calib_periodic_low1 = 0x2; 4903 btprio.calib_periodic_low2 = 0x3; 4904 btprio.calib_periodic_high1 = 0x4; 4905 btprio.calib_periodic_high2 = 0x5; 4906 btprio.dtim = 0x6; 4907 btprio.scan52 = 0x8; 4908 btprio.scan24 = 0xa; 4909 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 4910 1); 4911 if (error != 0) 4912 return (error); 4913 4914 /* Force BT state machine change */ 4915 memset(&btprot, 0, sizeof btprot); 4916 btprot.open = 1; 4917 btprot.type = 1; 4918 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 4919 if (error != 0) 4920 return (error); 4921 4922 btprot.open = 0; 4923 return (iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1)); 4924 } 4925 4926 int 4927 iwn5000_runtime_calib(struct iwn_softc *sc) 4928 { 4929 struct iwn5000_calib_config cmd; 4930 4931 memset(&cmd, 0, sizeof cmd); 4932 cmd.ucode.once.enable = 0xffffffff; 4933 cmd.ucode.once.start = IWN5000_CALIB_DC; 4934 DPRINTF(("configuring runtime calibration\n")); 4935 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 4936 } 4937 4938 int 4939 iwn_config(struct iwn_softc *sc) 4940 { 4941 struct iwn_ops *ops = &sc->ops; 4942 struct ieee80211com *ic = &sc->sc_ic; 4943 struct ifnet *ifp = &ic->ic_if; 4944 uint32_t txmask; 4945 uint16_t rxchain; 4946 int error, ridx; 4947 4948 /* Set radio temperature sensor offset. */ 4949 if (sc->hw_type == IWN_HW_REV_TYPE_6005) { 4950 error = iwn6000_temp_offset_calib(sc); 4951 if (error != 0) { 4952 printf("%s: could not set temperature offset\n", 4953 sc->sc_dev.dv_xname); 4954 return error; 4955 } 4956 } 4957 4958 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 4959 sc->hw_type == IWN_HW_REV_TYPE_2000 || 4960 sc->hw_type == IWN_HW_REV_TYPE_135 || 4961 sc->hw_type == IWN_HW_REV_TYPE_105) { 4962 error = iwn2000_temp_offset_calib(sc); 4963 if (error != 0) { 4964 printf("%s: could not set temperature offset\n", 4965 sc->sc_dev.dv_xname); 4966 return error; 4967 } 4968 } 4969 4970 if (sc->hw_type == IWN_HW_REV_TYPE_6050 || 4971 sc->hw_type == IWN_HW_REV_TYPE_6005) { 4972 /* Configure runtime DC calibration. */ 4973 error = iwn5000_runtime_calib(sc); 4974 if (error != 0) { 4975 printf("%s: could not configure runtime calibration\n", 4976 sc->sc_dev.dv_xname); 4977 return error; 4978 } 4979 } 4980 4981 /* Configure valid TX chains for >=5000 Series. */ 4982 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4983 txmask = htole32(sc->txchainmask); 4984 DPRINTF(("configuring valid TX chains 0x%x\n", txmask)); 4985 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 4986 sizeof txmask, 0); 4987 if (error != 0) { 4988 printf("%s: could not configure valid TX chains\n", 4989 sc->sc_dev.dv_xname); 4990 return error; 4991 } 4992 } 4993 4994 /* Configure bluetooth coexistence. */ 4995 if (sc->sc_flags & IWN_FLAG_ADV_BT_COEX) 4996 error = iwn_send_advanced_btcoex(sc); 4997 else 4998 error = iwn_send_btcoex(sc); 4999 if (error != 0) { 5000 printf("%s: could not configure bluetooth coexistence\n", 5001 sc->sc_dev.dv_xname); 5002 return error; 5003 } 5004 5005 /* Set mode, channel, RX filter and enable RX. */ 5006 memset(&sc->rxon, 0, sizeof (struct iwn_rxon)); 5007 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); 5008 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr); 5009 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr); 5010 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); 5011 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5012 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan)) { 5013 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5014 if (ic->ic_flags & IEEE80211_F_USEPROT) 5015 sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT); 5016 DPRINTF(("%s: 2ghz prot 0x%x\n", __func__, 5017 le32toh(sc->rxon.flags))); 5018 } 5019 switch (ic->ic_opmode) { 5020 case IEEE80211_M_STA: 5021 sc->rxon.mode = IWN_MODE_STA; 5022 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST); 5023 break; 5024 case IEEE80211_M_MONITOR: 5025 sc->rxon.mode = IWN_MODE_MONITOR; 5026 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST | 5027 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 5028 break; 5029 default: 5030 /* Should not get there. */ 5031 break; 5032 } 5033 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 5034 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 5035 sc->rxon.ht_single_mask = 0xff; 5036 sc->rxon.ht_dual_mask = 0xff; 5037 sc->rxon.ht_triple_mask = 0xff; 5038 rxchain = 5039 IWN_RXCHAIN_VALID(sc->rxchainmask) | 5040 IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) | 5041 IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains); 5042 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5043 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 5044 rxchain |= IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask); 5045 rxchain |= (IWN_RXCHAIN_DRIVER_FORCE | IWN_RXCHAIN_MIMO_FORCE); 5046 } 5047 sc->rxon.rxchain = htole16(rxchain); 5048 DPRINTF(("setting configuration\n")); 5049 DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x rxchain %x\n", 5050 __func__, sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask, 5051 sc->rxon.ofdm_mask, sc->rxon.rxchain)); 5052 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0); 5053 if (error != 0) { 5054 printf("%s: RXON command failed\n", sc->sc_dev.dv_xname); 5055 return error; 5056 } 5057 5058 ridx = (sc->sc_ic.ic_curmode == IEEE80211_MODE_11A) ? 5059 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 5060 if ((error = iwn_add_broadcast_node(sc, 0, ridx)) != 0) { 5061 printf("%s: could not add broadcast node\n", 5062 sc->sc_dev.dv_xname); 5063 return error; 5064 } 5065 5066 /* Configuration has changed, set TX power accordingly. */ 5067 if ((error = ops->set_txpower(sc, 0)) != 0) { 5068 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 5069 return error; 5070 } 5071 5072 if ((error = iwn_set_critical_temp(sc)) != 0) { 5073 printf("%s: could not set critical temperature\n", 5074 sc->sc_dev.dv_xname); 5075 return error; 5076 } 5077 5078 /* Set power saving level to CAM during initialization. */ 5079 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 5080 printf("%s: could not set power saving level\n", 5081 sc->sc_dev.dv_xname); 5082 return error; 5083 } 5084 return 0; 5085 } 5086 5087 uint16_t 5088 iwn_get_active_dwell_time(struct iwn_softc *sc, 5089 uint16_t flags, uint8_t n_probes) 5090 { 5091 /* No channel? Default to 2GHz settings */ 5092 if (flags & IEEE80211_CHAN_2GHZ) { 5093 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 5094 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 5095 } 5096 5097 /* 5GHz dwell time */ 5098 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 5099 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 5100 } 5101 5102 /* 5103 * Limit the total dwell time to 85% of the beacon interval. 5104 * 5105 * Returns the dwell time in milliseconds. 5106 */ 5107 uint16_t 5108 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 5109 { 5110 struct ieee80211com *ic = &sc->sc_ic; 5111 struct ieee80211_node *ni = ic->ic_bss; 5112 int bintval = 0; 5113 5114 /* bintval is in TU (1.024mS) */ 5115 if (ni != NULL) 5116 bintval = ni->ni_intval; 5117 5118 /* 5119 * If it's non-zero, we should calculate the minimum of 5120 * it and the DWELL_BASE. 5121 * 5122 * XXX Yes, the math should take into account that bintval 5123 * is 1.024mS, not 1mS.. 5124 */ 5125 if (ic->ic_state == IEEE80211_S_RUN && bintval > 0) 5126 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 5127 5128 /* No association context? Default */ 5129 return dwell_time; 5130 } 5131 5132 uint16_t 5133 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags) 5134 { 5135 uint16_t passive; 5136 if (flags & IEEE80211_CHAN_2GHZ) { 5137 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 5138 } else { 5139 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 5140 } 5141 5142 /* Clamp to the beacon interval if we're associated */ 5143 return (iwn_limit_dwell(sc, passive)); 5144 } 5145 5146 int 5147 iwn_scan(struct iwn_softc *sc, uint16_t flags, int bgscan) 5148 { 5149 struct ieee80211com *ic = &sc->sc_ic; 5150 struct iwn_scan_hdr *hdr; 5151 struct iwn_cmd_data *tx; 5152 struct iwn_scan_essid *essid; 5153 struct iwn_scan_chan *chan; 5154 struct ieee80211_frame *wh; 5155 struct ieee80211_rateset *rs; 5156 struct ieee80211_channel *c; 5157 uint8_t *buf, *frm; 5158 uint16_t rxchain, dwell_active, dwell_passive; 5159 uint8_t txant; 5160 int buflen, error, is_active; 5161 5162 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 5163 if (buf == NULL) { 5164 printf("%s: could not allocate buffer for scan command\n", 5165 sc->sc_dev.dv_xname); 5166 return ENOMEM; 5167 } 5168 hdr = (struct iwn_scan_hdr *)buf; 5169 /* 5170 * Move to the next channel if no frames are received within 10ms 5171 * after sending the probe request. 5172 */ 5173 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 5174 hdr->quiet_threshold = htole16(1); /* min # of packets */ 5175 5176 if (bgscan) { 5177 int bintval; 5178 5179 /* Set maximum off-channel time. */ 5180 hdr->max_out = htole32(200 * 1024); 5181 5182 /* Configure scan pauses which service on-channel traffic. */ 5183 bintval = ic->ic_bss->ni_intval ? ic->ic_bss->ni_intval : 100; 5184 hdr->pause_scan = htole32(((100 / bintval) << 22) | 5185 ((100 % bintval) * 1024)); 5186 } 5187 5188 /* Select antennas for scanning. */ 5189 rxchain = 5190 IWN_RXCHAIN_VALID(sc->rxchainmask) | 5191 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 5192 IWN_RXCHAIN_DRIVER_FORCE; 5193 if ((flags & IEEE80211_CHAN_5GHZ) && 5194 sc->hw_type == IWN_HW_REV_TYPE_4965) { 5195 /* 5196 * On 4965 ant A and C must be avoided in 5GHz because of a 5197 * HW bug which causes very weak RSSI values being reported. 5198 */ 5199 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 5200 } else /* Use all available RX antennas. */ 5201 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 5202 hdr->rxchain = htole16(rxchain); 5203 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 5204 5205 tx = (struct iwn_cmd_data *)(hdr + 1); 5206 tx->flags = htole32(IWN_TX_AUTO_SEQ); 5207 tx->id = sc->broadcast_id; 5208 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 5209 5210 if (flags & IEEE80211_CHAN_5GHZ) { 5211 /* Send probe requests at 6Mbps. */ 5212 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 5213 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 5214 } else { 5215 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 5216 if (bgscan && sc->hw_type == IWN_HW_REV_TYPE_4965 && 5217 sc->rxon.chan > 14) { 5218 /* 5219 * 4965 firmware can crash when sending probe requests 5220 * with CCK rates while associated to a 5GHz AP. 5221 * Send probe requests at 6Mbps OFDM as a workaround. 5222 */ 5223 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp; 5224 } else { 5225 /* Send probe requests at 1Mbps. */ 5226 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp; 5227 tx->rflags = IWN_RFLAG_CCK; 5228 } 5229 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 5230 } 5231 /* Use the first valid TX antenna. */ 5232 txant = IWN_LSB(sc->txchainmask); 5233 tx->rflags |= IWN_RFLAG_ANT(txant); 5234 5235 /* 5236 * Only do active scanning if we're announcing a probe request 5237 * for a given SSID (or more, if we ever add it to the driver.) 5238 */ 5239 is_active = 0; 5240 5241 /* 5242 * If we're scanning for a specific SSID, add it to the command. 5243 */ 5244 essid = (struct iwn_scan_essid *)(tx + 1); 5245 if (ic->ic_des_esslen != 0) { 5246 essid[0].id = IEEE80211_ELEMID_SSID; 5247 essid[0].len = ic->ic_des_esslen; 5248 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen); 5249 5250 is_active = 1; 5251 } 5252 /* 5253 * Build a probe request frame. Most of the following code is a 5254 * copy & paste of what is done in net80211. 5255 */ 5256 wh = (struct ieee80211_frame *)(essid + 20); 5257 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 5258 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 5259 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 5260 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr); 5261 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr); 5262 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr); 5263 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 5264 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 5265 5266 frm = (uint8_t *)(wh + 1); 5267 frm = ieee80211_add_ssid(frm, NULL, 0); 5268 frm = ieee80211_add_rates(frm, rs); 5269 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 5270 frm = ieee80211_add_xrates(frm, rs); 5271 if (ic->ic_flags & IEEE80211_F_HTON) 5272 frm = ieee80211_add_htcaps(frm, ic); 5273 5274 /* Set length of probe request. */ 5275 tx->len = htole16(frm - (uint8_t *)wh); 5276 5277 /* 5278 * If active scanning is requested but a certain channel is 5279 * marked passive, we can do active scanning if we detect 5280 * transmissions. 5281 * 5282 * There is an issue with some firmware versions that triggers 5283 * a sysassert on a "good CRC threshold" of zero (== disabled), 5284 * on a radar channel even though this means that we should NOT 5285 * send probes. 5286 * 5287 * The "good CRC threshold" is the number of frames that we 5288 * need to receive during our dwell time on a channel before 5289 * sending out probes -- setting this to a huge value will 5290 * mean we never reach it, but at the same time work around 5291 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER 5292 * here instead of IWN_GOOD_CRC_TH_DISABLED. 5293 * 5294 * This was fixed in later versions along with some other 5295 * scan changes, and the threshold behaves as a flag in those 5296 * versions. 5297 */ 5298 5299 /* 5300 * If we're doing active scanning, set the crc_threshold 5301 * to a suitable value. This is different to active veruss 5302 * passive scanning depending upon the channel flags; the 5303 * firmware will obey that particular check for us. 5304 */ 5305 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 5306 hdr->crc_threshold = is_active ? 5307 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 5308 else 5309 hdr->crc_threshold = is_active ? 5310 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 5311 5312 chan = (struct iwn_scan_chan *)frm; 5313 for (c = &ic->ic_channels[1]; 5314 c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) { 5315 if ((c->ic_flags & flags) != flags) 5316 continue; 5317 5318 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 5319 DPRINTFN(2, ("adding channel %d\n", chan->chan)); 5320 chan->flags = 0; 5321 if (ic->ic_des_esslen != 0) 5322 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 5323 5324 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 5325 chan->flags |= htole32(IWN_CHAN_PASSIVE); 5326 else 5327 chan->flags |= htole32(IWN_CHAN_ACTIVE); 5328 5329 /* 5330 * Calculate the active/passive dwell times. 5331 */ 5332 5333 dwell_active = iwn_get_active_dwell_time(sc, flags, is_active); 5334 dwell_passive = iwn_get_passive_dwell_time(sc, flags); 5335 5336 /* Make sure they're valid */ 5337 if (dwell_passive <= dwell_active) 5338 dwell_passive = dwell_active + 1; 5339 5340 chan->active = htole16(dwell_active); 5341 chan->passive = htole16(dwell_passive); 5342 5343 chan->dsp_gain = 0x6e; 5344 if (IEEE80211_IS_CHAN_5GHZ(c)) { 5345 chan->rf_gain = 0x3b; 5346 } else { 5347 chan->rf_gain = 0x28; 5348 } 5349 hdr->nchan++; 5350 chan++; 5351 } 5352 5353 buflen = (uint8_t *)chan - buf; 5354 hdr->len = htole16(buflen); 5355 5356 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 5357 if (error == 0) { 5358 /* 5359 * The current mode might have been fixed during association. 5360 * Ensure all channels get scanned. 5361 */ 5362 if (IFM_MODE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO) 5363 ieee80211_setmode(ic, IEEE80211_MODE_AUTO); 5364 5365 sc->sc_flags |= IWN_FLAG_SCANNING; 5366 if (bgscan) 5367 sc->sc_flags |= IWN_FLAG_BGSCAN; 5368 } 5369 free(buf, M_DEVBUF, IWN_SCAN_MAXSZ); 5370 return error; 5371 } 5372 5373 void 5374 iwn_scan_abort(struct iwn_softc *sc) 5375 { 5376 iwn_cmd(sc, IWN_CMD_SCAN_ABORT, NULL, 0, 1); 5377 5378 /* XXX Cannot wait for status response in interrupt context. */ 5379 DELAY(100); 5380 5381 sc->sc_flags &= ~IWN_FLAG_SCANNING; 5382 sc->sc_flags &= ~IWN_FLAG_BGSCAN; 5383 } 5384 5385 int 5386 iwn_bgscan(struct ieee80211com *ic) 5387 { 5388 struct iwn_softc *sc = ic->ic_softc; 5389 int error; 5390 5391 if (sc->sc_flags & IWN_FLAG_SCANNING) 5392 return 0; 5393 5394 error = iwn_scan(sc, IEEE80211_CHAN_2GHZ, 1); 5395 if (error) 5396 printf("%s: could not initiate background scan\n", 5397 sc->sc_dev.dv_xname); 5398 return error; 5399 } 5400 5401 void 5402 iwn_rxon_configure_ht40(struct ieee80211com *ic, struct ieee80211_node *ni) 5403 { 5404 struct iwn_softc *sc = ic->ic_softc; 5405 uint8_t sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK); 5406 enum ieee80211_htprot htprot = (ni->ni_htop1 & 5407 IEEE80211_HTOP1_PROT_MASK); 5408 5409 sc->rxon.flags &= ~htole32(IWN_RXON_HT_CHANMODE_MIXED2040 | 5410 IWN_RXON_HT_CHANMODE_PURE40 | IWN_RXON_HT_HT40MINUS); 5411 5412 if (ieee80211_node_supports_ht_chan40(ni) && 5413 (sco == IEEE80211_HTOP0_SCO_SCA || 5414 sco == IEEE80211_HTOP0_SCO_SCB)) { 5415 if (sco == IEEE80211_HTOP0_SCO_SCB) 5416 sc->rxon.flags |= htole32(IWN_RXON_HT_HT40MINUS); 5417 if (htprot == IEEE80211_HTPROT_20MHZ) 5418 sc->rxon.flags |= htole32(IWN_RXON_HT_CHANMODE_PURE40); 5419 else 5420 sc->rxon.flags |= htole32( 5421 IWN_RXON_HT_CHANMODE_MIXED2040); 5422 } 5423 } 5424 5425 int 5426 iwn_rxon_ht40_enabled(struct iwn_softc *sc) 5427 { 5428 return ((le32toh(sc->rxon.flags) & IWN_RXON_HT_CHANMODE_MIXED2040) || 5429 (le32toh(sc->rxon.flags) & IWN_RXON_HT_CHANMODE_PURE40)) ? 1 : 0; 5430 } 5431 5432 int 5433 iwn_auth(struct iwn_softc *sc, int arg) 5434 { 5435 struct iwn_ops *ops = &sc->ops; 5436 struct ieee80211com *ic = &sc->sc_ic; 5437 struct ieee80211_node *ni = ic->ic_bss; 5438 int error, ridx; 5439 int bss_switch = 5440 (!IEEE80211_ADDR_EQ(sc->bss_node_addr, etheranyaddr) && 5441 !IEEE80211_ADDR_EQ(sc->bss_node_addr, ni->ni_macaddr)); 5442 5443 /* Update adapter configuration. */ 5444 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 5445 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan); 5446 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5447 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) { 5448 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5449 if (ic->ic_flags & IEEE80211_F_USEPROT) 5450 sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT); 5451 DPRINTF(("%s: 2ghz prot 0x%x\n", __func__, 5452 le32toh(sc->rxon.flags))); 5453 } 5454 if (ic->ic_flags & IEEE80211_F_SHSLOT) 5455 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 5456 else 5457 sc->rxon.flags &= ~htole32(IWN_RXON_SHSLOT); 5458 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 5459 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 5460 else 5461 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE); 5462 switch (ic->ic_curmode) { 5463 case IEEE80211_MODE_11A: 5464 sc->rxon.cck_mask = 0; 5465 sc->rxon.ofdm_mask = 0x15; 5466 break; 5467 case IEEE80211_MODE_11B: 5468 sc->rxon.cck_mask = 0x03; 5469 sc->rxon.ofdm_mask = 0; 5470 break; 5471 default: /* Assume 802.11b/g/n. */ 5472 sc->rxon.cck_mask = 0x0f; 5473 sc->rxon.ofdm_mask = 0x15; 5474 } 5475 /* Configure 40MHz early to avoid problems on 6205 devices. */ 5476 iwn_rxon_configure_ht40(ic, ni); 5477 DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__, 5478 sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask, 5479 sc->rxon.ofdm_mask)); 5480 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 5481 if (error != 0) { 5482 printf("%s: RXON command failed\n", sc->sc_dev.dv_xname); 5483 return error; 5484 } 5485 5486 /* Configuration has changed, set TX power accordingly. */ 5487 if ((error = ops->set_txpower(sc, 1)) != 0) { 5488 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 5489 return error; 5490 } 5491 /* 5492 * Reconfiguring RXON clears the firmware nodes table so we must 5493 * add the broadcast node again. 5494 */ 5495 ridx = IEEE80211_IS_CHAN_5GHZ(ni->ni_chan) ? 5496 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1; 5497 if ((error = iwn_add_broadcast_node(sc, 1, ridx)) != 0) { 5498 printf("%s: could not add broadcast node\n", 5499 sc->sc_dev.dv_xname); 5500 return error; 5501 } 5502 5503 /* 5504 * Make sure the firmware gets to see a beacon before we send 5505 * the auth request. Otherwise the Tx attempt can fail due to 5506 * the firmware's built-in regulatory domain enforcement. 5507 * Delaying here for every incoming deauth frame can result in a DoS. 5508 * Don't delay if we're here because of an incoming frame (arg != -1) 5509 * or if we're already waiting for a response (ic_mgt_timer != 0). 5510 * If we are switching APs after a background scan then net80211 has 5511 * just faked the reception of a deauth frame from our old AP, so it 5512 * is safe to delay in that case. 5513 */ 5514 if ((arg == -1 || bss_switch) && ic->ic_mgt_timer == 0) 5515 DELAY(ni->ni_intval * 3 * IEEE80211_DUR_TU); 5516 5517 /* We can now clear the cached address of our previous AP. */ 5518 memset(sc->bss_node_addr, 0, sizeof(sc->bss_node_addr)); 5519 5520 return 0; 5521 } 5522 5523 int 5524 iwn_run(struct iwn_softc *sc) 5525 { 5526 struct iwn_ops *ops = &sc->ops; 5527 struct ieee80211com *ic = &sc->sc_ic; 5528 struct ieee80211_node *ni = ic->ic_bss; 5529 struct iwn_node *wn = (void *)ni; 5530 struct iwn_node_info node; 5531 int error; 5532 5533 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5534 /* Link LED blinks while monitoring. */ 5535 iwn_set_led(sc, IWN_LED_LINK, 50, 50); 5536 return 0; 5537 } 5538 if ((error = iwn_set_timing(sc, ni)) != 0) { 5539 printf("%s: could not set timing\n", sc->sc_dev.dv_xname); 5540 return error; 5541 } 5542 5543 /* Update adapter configuration. */ 5544 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd)); 5545 /* Short preamble and slot time are negotiated when associating. */ 5546 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT); 5547 if (ic->ic_flags & IEEE80211_F_SHSLOT) 5548 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 5549 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 5550 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 5551 sc->rxon.filter |= htole32(IWN_FILTER_BSS); 5552 5553 /* HT is negotiated when associating. */ 5554 if (ni->ni_flags & IEEE80211_NODE_HT) { 5555 enum ieee80211_htprot htprot = 5556 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK); 5557 DPRINTF(("%s: htprot = %d\n", __func__, htprot)); 5558 sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot)); 5559 } else 5560 sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3)); 5561 5562 iwn_rxon_configure_ht40(ic, ni); 5563 5564 if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) { 5565 /* 11a or 11n 5GHz */ 5566 sc->rxon.cck_mask = 0; 5567 sc->rxon.ofdm_mask = 0x15; 5568 } else if (ni->ni_flags & IEEE80211_NODE_HT) { 5569 /* 11n 2GHz */ 5570 sc->rxon.cck_mask = 0x0f; 5571 sc->rxon.ofdm_mask = 0x15; 5572 } else { 5573 if (ni->ni_rates.rs_nrates == 4) { 5574 /* 11b */ 5575 sc->rxon.cck_mask = 0x03; 5576 sc->rxon.ofdm_mask = 0; 5577 } else { 5578 /* assume 11g */ 5579 sc->rxon.cck_mask = 0x0f; 5580 sc->rxon.ofdm_mask = 0x15; 5581 } 5582 } 5583 DPRINTF(("%s: rxon chan %d flags %x cck %x ofdm %x\n", __func__, 5584 sc->rxon.chan, le32toh(sc->rxon.flags), sc->rxon.cck_mask, 5585 sc->rxon.ofdm_mask)); 5586 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1); 5587 if (error != 0) { 5588 printf("%s: could not update configuration\n", 5589 sc->sc_dev.dv_xname); 5590 return error; 5591 } 5592 5593 /* Configuration has changed, set TX power accordingly. */ 5594 if ((error = ops->set_txpower(sc, 1)) != 0) { 5595 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 5596 return error; 5597 } 5598 5599 /* Fake a join to initialize the TX rate. */ 5600 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 5601 iwn_newassoc(ic, ni, 1); 5602 5603 /* Add BSS node. */ 5604 memset(&node, 0, sizeof node); 5605 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 5606 node.id = IWN_ID_BSS; 5607 if (ni->ni_flags & IEEE80211_NODE_HT) { 5608 node.htmask = (IWN_AMDPU_SIZE_FACTOR_MASK | 5609 IWN_AMDPU_DENSITY_MASK); 5610 node.htflags = htole32( 5611 IWN_AMDPU_SIZE_FACTOR( 5612 (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_LE)) | 5613 IWN_AMDPU_DENSITY( 5614 (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) >> 2)); 5615 if (iwn_rxon_ht40_enabled(sc)) 5616 node.htflags |= htole32(IWN_40MHZ_ENABLE); 5617 } 5618 DPRINTF(("adding BSS node\n")); 5619 error = ops->add_node(sc, &node, 1); 5620 if (error != 0) { 5621 printf("%s: could not add BSS node\n", sc->sc_dev.dv_xname); 5622 return error; 5623 } 5624 5625 /* Cache address of AP in case it changes after a background scan. */ 5626 IEEE80211_ADDR_COPY(sc->bss_node_addr, ni->ni_macaddr); 5627 5628 DPRINTF(("setting link quality for node %d\n", node.id)); 5629 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 5630 printf("%s: could not setup link quality for node %d\n", 5631 sc->sc_dev.dv_xname, node.id); 5632 return error; 5633 } 5634 5635 if ((error = iwn_init_sensitivity(sc)) != 0) { 5636 printf("%s: could not set sensitivity\n", 5637 sc->sc_dev.dv_xname); 5638 return error; 5639 } 5640 /* Start periodic calibration timer. */ 5641 sc->calib.state = IWN_CALIB_STATE_ASSOC; 5642 sc->calib_cnt = 0; 5643 timeout_add_msec(&sc->calib_to, 500); 5644 5645 ieee80211_ra_node_init(&wn->rn); 5646 5647 /* Link LED always on while associated. */ 5648 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 5649 return 0; 5650 } 5651 5652 /* 5653 * We support CCMP hardware encryption/decryption of unicast frames only. 5654 * HW support for TKIP really sucks. We should let TKIP die anyway. 5655 */ 5656 int 5657 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, 5658 struct ieee80211_key *k) 5659 { 5660 struct iwn_softc *sc = ic->ic_softc; 5661 struct iwn_ops *ops = &sc->ops; 5662 struct iwn_node *wn = (void *)ni; 5663 struct iwn_node_info node; 5664 uint16_t kflags; 5665 5666 if ((k->k_flags & IEEE80211_KEY_GROUP) || 5667 k->k_cipher != IEEE80211_CIPHER_CCMP) 5668 return ieee80211_set_key(ic, ni, k); 5669 5670 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id); 5671 if (k->k_flags & IEEE80211_KEY_GROUP) 5672 kflags |= IWN_KFLAG_GROUP; 5673 5674 memset(&node, 0, sizeof node); 5675 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 5676 sc->broadcast_id : wn->id; 5677 node.control = IWN_NODE_UPDATE; 5678 node.flags = IWN_FLAG_SET_KEY; 5679 node.kflags = htole16(kflags); 5680 node.kid = k->k_id; 5681 memcpy(node.key, k->k_key, k->k_len); 5682 DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id)); 5683 return ops->add_node(sc, &node, 1); 5684 } 5685 5686 void 5687 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, 5688 struct ieee80211_key *k) 5689 { 5690 struct iwn_softc *sc = ic->ic_softc; 5691 struct iwn_ops *ops = &sc->ops; 5692 struct iwn_node *wn = (void *)ni; 5693 struct iwn_node_info node; 5694 5695 if ((k->k_flags & IEEE80211_KEY_GROUP) || 5696 k->k_cipher != IEEE80211_CIPHER_CCMP) { 5697 /* See comment about other ciphers above. */ 5698 ieee80211_delete_key(ic, ni, k); 5699 return; 5700 } 5701 if (ic->ic_state != IEEE80211_S_RUN) 5702 return; /* Nothing to do. */ 5703 memset(&node, 0, sizeof node); 5704 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ? 5705 sc->broadcast_id : wn->id; 5706 node.control = IWN_NODE_UPDATE; 5707 node.flags = IWN_FLAG_SET_KEY; 5708 node.kflags = htole16(IWN_KFLAG_INVALID); 5709 node.kid = 0xff; 5710 DPRINTF(("delete keys for node %d\n", node.id)); 5711 (void)ops->add_node(sc, &node, 1); 5712 } 5713 5714 void 5715 iwn_updatechan(struct ieee80211com *ic) 5716 { 5717 struct iwn_softc *sc = ic->ic_softc; 5718 5719 if (ic->ic_state != IEEE80211_S_RUN) 5720 return; 5721 5722 iwn_rxon_configure_ht40(ic, ic->ic_bss); 5723 sc->ops.update_rxon(sc); 5724 iwn_set_link_quality(sc, ic->ic_bss); 5725 } 5726 5727 void 5728 iwn_updateprot(struct ieee80211com *ic) 5729 { 5730 struct iwn_softc *sc = ic->ic_softc; 5731 enum ieee80211_htprot htprot; 5732 5733 if (ic->ic_state != IEEE80211_S_RUN) 5734 return; 5735 5736 /* Update ERP protection setting. */ 5737 if (ic->ic_flags & IEEE80211_F_USEPROT) 5738 sc->rxon.flags |= htole32(IWN_RXON_TGG_PROT); 5739 else 5740 sc->rxon.flags &= ~htole32(IWN_RXON_TGG_PROT); 5741 5742 /* Update HT protection mode setting. */ 5743 htprot = (ic->ic_bss->ni_htop1 & IEEE80211_HTOP1_PROT_MASK) >> 5744 IEEE80211_HTOP1_PROT_SHIFT; 5745 sc->rxon.flags &= ~htole32(IWN_RXON_HT_PROTMODE(3)); 5746 sc->rxon.flags |= htole32(IWN_RXON_HT_PROTMODE(htprot)); 5747 5748 sc->ops.update_rxon(sc); 5749 } 5750 5751 void 5752 iwn_updateslot(struct ieee80211com *ic) 5753 { 5754 struct iwn_softc *sc = ic->ic_softc; 5755 5756 if (ic->ic_state != IEEE80211_S_RUN) 5757 return; 5758 5759 if (ic->ic_flags & IEEE80211_F_SHSLOT) 5760 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT); 5761 else 5762 sc->rxon.flags &= ~htole32(IWN_RXON_SHSLOT); 5763 5764 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 5765 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE); 5766 else 5767 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE); 5768 5769 sc->ops.update_rxon(sc); 5770 } 5771 5772 void 5773 iwn_update_rxon_restore_power(struct iwn_softc *sc) 5774 { 5775 struct ieee80211com *ic = &sc->sc_ic; 5776 struct iwn_ops *ops = &sc->ops; 5777 int error; 5778 5779 DELAY(100); 5780 5781 /* All RXONs wipe the firmware's txpower table. Restore it. */ 5782 error = ops->set_txpower(sc, 1); 5783 if (error != 0) 5784 printf("%s: could not set TX power\n", sc->sc_dev.dv_xname); 5785 5786 DELAY(100); 5787 5788 /* Restore power saving level */ 5789 if (ic->ic_flags & IEEE80211_F_PMGTON) 5790 error = iwn_set_pslevel(sc, 0, 3, 1); 5791 else 5792 error = iwn_set_pslevel(sc, 0, 0, 1); 5793 if (error != 0) 5794 printf("%s: could not set PS level\n", sc->sc_dev.dv_xname); 5795 } 5796 5797 void 5798 iwn5000_update_rxon(struct iwn_softc *sc) 5799 { 5800 struct iwn_rxon_assoc rxon_assoc; 5801 int s, error; 5802 5803 /* Update RXON config. */ 5804 memset(&rxon_assoc, 0, sizeof(rxon_assoc)); 5805 rxon_assoc.flags = sc->rxon.flags; 5806 rxon_assoc.filter = sc->rxon.filter; 5807 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 5808 rxon_assoc.cck_mask = sc->rxon.cck_mask; 5809 rxon_assoc.ht_single_mask = sc->rxon.ht_single_mask; 5810 rxon_assoc.ht_dual_mask = sc->rxon.ht_dual_mask; 5811 rxon_assoc.ht_triple_mask = sc->rxon.ht_triple_mask; 5812 rxon_assoc.rxchain = sc->rxon.rxchain; 5813 rxon_assoc.acquisition = sc->rxon.acquisition; 5814 5815 s = splnet(); 5816 5817 error = iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &rxon_assoc, 5818 sizeof(rxon_assoc), 1); 5819 if (error != 0) 5820 printf("%s: RXON_ASSOC command failed\n", sc->sc_dev.dv_xname); 5821 5822 iwn_update_rxon_restore_power(sc); 5823 5824 splx(s); 5825 } 5826 5827 void 5828 iwn4965_update_rxon(struct iwn_softc *sc) 5829 { 5830 struct iwn4965_rxon_assoc rxon_assoc; 5831 int s, error; 5832 5833 /* Update RXON config. */ 5834 memset(&rxon_assoc, 0, sizeof(rxon_assoc)); 5835 rxon_assoc.flags = sc->rxon.flags; 5836 rxon_assoc.filter = sc->rxon.filter; 5837 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 5838 rxon_assoc.cck_mask = sc->rxon.cck_mask; 5839 rxon_assoc.ht_single_mask = sc->rxon.ht_single_mask; 5840 rxon_assoc.ht_dual_mask = sc->rxon.ht_dual_mask; 5841 rxon_assoc.rxchain = sc->rxon.rxchain; 5842 5843 s = splnet(); 5844 5845 error = iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &rxon_assoc, 5846 sizeof(rxon_assoc), 1); 5847 if (error != 0) 5848 printf("%s: RXON_ASSOC command failed\n", sc->sc_dev.dv_xname); 5849 5850 iwn_update_rxon_restore_power(sc); 5851 5852 splx(s); 5853 } 5854 5855 /* 5856 * This function is called by upper layer when an ADDBA request is received 5857 * from another STA and before the ADDBA response is sent. 5858 */ 5859 int 5860 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5861 uint8_t tid) 5862 { 5863 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid]; 5864 struct iwn_softc *sc = ic->ic_softc; 5865 struct iwn_ops *ops = &sc->ops; 5866 struct iwn_node *wn = (void *)ni; 5867 struct iwn_node_info node; 5868 5869 memset(&node, 0, sizeof node); 5870 node.id = wn->id; 5871 node.control = IWN_NODE_UPDATE; 5872 node.flags = IWN_FLAG_SET_ADDBA; 5873 node.addba_tid = tid; 5874 node.addba_ssn = htole16(ba->ba_winstart); 5875 DPRINTF(("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid, 5876 ba->ba_winstart)); 5877 /* XXX async command, so firmware may still fail to add BA agreement */ 5878 return ops->add_node(sc, &node, 1); 5879 } 5880 5881 /* 5882 * This function is called by upper layer on teardown of an HT-immediate 5883 * Block Ack agreement (e.g., upon receipt of a DELBA frame). 5884 */ 5885 void 5886 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5887 uint8_t tid) 5888 { 5889 struct iwn_softc *sc = ic->ic_softc; 5890 struct iwn_ops *ops = &sc->ops; 5891 struct iwn_node *wn = (void *)ni; 5892 struct iwn_node_info node; 5893 5894 memset(&node, 0, sizeof node); 5895 node.id = wn->id; 5896 node.control = IWN_NODE_UPDATE; 5897 node.flags = IWN_FLAG_SET_DELBA; 5898 node.delba_tid = tid; 5899 DPRINTF(("DELBA RA=%d TID=%d\n", wn->id, tid)); 5900 (void)ops->add_node(sc, &node, 1); 5901 } 5902 5903 /* 5904 * This function is called by upper layer when an ADDBA response is received 5905 * from another STA. 5906 */ 5907 int 5908 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 5909 uint8_t tid) 5910 { 5911 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5912 struct iwn_softc *sc = ic->ic_softc; 5913 struct iwn_ops *ops = &sc->ops; 5914 struct iwn_node *wn = (void *)ni; 5915 struct iwn_node_info node; 5916 int qid = sc->first_agg_txq + tid; 5917 int error; 5918 5919 /* Ensure we can map this TID to an aggregation queue. */ 5920 if (tid >= IWN_NUM_AMPDU_TID || ba->ba_winsize > IWN_SCHED_WINSZ || 5921 qid > sc->ntxqs || (sc->agg_queue_mask & (1 << qid))) 5922 return ENOSPC; 5923 5924 /* Enable TX for the specified RA/TID. */ 5925 wn->disable_tid &= ~(1 << tid); 5926 memset(&node, 0, sizeof node); 5927 node.id = wn->id; 5928 node.control = IWN_NODE_UPDATE; 5929 node.flags = IWN_FLAG_SET_DISABLE_TID; 5930 node.disable_tid = htole16(wn->disable_tid); 5931 error = ops->add_node(sc, &node, 1); 5932 if (error != 0) 5933 return error; 5934 5935 if ((error = iwn_nic_lock(sc)) != 0) 5936 return error; 5937 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart); 5938 iwn_nic_unlock(sc); 5939 5940 sc->agg_queue_mask |= (1 << qid); 5941 sc->sc_tx_ba[tid].wn = wn; 5942 ba->ba_bitmap = 0; 5943 5944 return 0; 5945 } 5946 5947 void 5948 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, 5949 uint8_t tid) 5950 { 5951 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid]; 5952 struct iwn_softc *sc = ic->ic_softc; 5953 struct iwn_ops *ops = &sc->ops; 5954 int qid = sc->first_agg_txq + tid; 5955 struct iwn_node *wn = (void *)ni; 5956 struct iwn_node_info node; 5957 5958 /* Discard all frames in the current window. */ 5959 iwn_ampdu_txq_advance(sc, &sc->txq[qid], qid, 5960 IWN_AGG_SSN_TO_TXQ_IDX(ba->ba_winend)); 5961 5962 if (iwn_nic_lock(sc) != 0) 5963 return; 5964 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart); 5965 iwn_nic_unlock(sc); 5966 5967 sc->agg_queue_mask &= ~(1 << qid); 5968 sc->sc_tx_ba[tid].wn = NULL; 5969 ba->ba_bitmap = 0; 5970 5971 /* Disable TX for the specified RA/TID. */ 5972 wn->disable_tid |= (1 << tid); 5973 memset(&node, 0, sizeof node); 5974 node.id = wn->id; 5975 node.control = IWN_NODE_UPDATE; 5976 node.flags = IWN_FLAG_SET_DISABLE_TID; 5977 node.disable_tid = htole16(wn->disable_tid); 5978 ops->add_node(sc, &node, 1); 5979 } 5980 5981 void 5982 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 5983 uint8_t tid, uint16_t ssn) 5984 { 5985 struct iwn_node *wn = (void *)ni; 5986 int qid = IWN4965_FIRST_AGG_TXQUEUE + tid; 5987 uint16_t idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn); 5988 5989 /* Stop TX scheduler while we're changing its configuration. */ 5990 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 5991 IWN4965_TXQ_STATUS_CHGACT); 5992 5993 /* Assign RA/TID translation to the queue. */ 5994 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 5995 wn->id << 4 | tid); 5996 5997 /* Enable chain-building mode for the queue. */ 5998 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 5999 6000 /* Set starting sequence number from the ADDBA request. */ 6001 sc->txq[qid].cur = sc->txq[qid].read = idx; 6002 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx); 6003 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6004 6005 /* Set scheduler window size. */ 6006 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 6007 IWN_SCHED_WINSZ); 6008 /* Set scheduler frame limit. */ 6009 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 6010 IWN_SCHED_LIMIT << 16); 6011 6012 /* Enable interrupts for the queue. */ 6013 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6014 6015 /* Mark the queue as active. */ 6016 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6017 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 6018 iwn_tid2fifo[tid] << 1); 6019 } 6020 6021 void 6022 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 6023 { 6024 int qid = IWN4965_FIRST_AGG_TXQUEUE + tid; 6025 uint16_t idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn); 6026 6027 /* Stop TX scheduler while we're changing its configuration. */ 6028 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6029 IWN4965_TXQ_STATUS_CHGACT); 6030 6031 /* Set starting sequence number from the ADDBA request. */ 6032 sc->txq[qid].cur = sc->txq[qid].read = idx; 6033 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx); 6034 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6035 6036 /* Disable interrupts for the queue. */ 6037 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6038 6039 /* Mark the queue as inactive. */ 6040 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6041 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 6042 } 6043 6044 void 6045 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 6046 uint8_t tid, uint16_t ssn) 6047 { 6048 int qid = IWN5000_FIRST_AGG_TXQUEUE + tid; 6049 int idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn); 6050 struct iwn_node *wn = (void *)ni; 6051 6052 /* Stop TX scheduler while we're changing its configuration. */ 6053 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6054 IWN5000_TXQ_STATUS_CHGACT); 6055 6056 /* Assign RA/TID translation to the queue. */ 6057 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 6058 wn->id << 4 | tid); 6059 6060 /* Enable chain-building mode for the queue. */ 6061 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 6062 6063 /* Enable aggregation for the queue. */ 6064 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 6065 6066 /* Set starting sequence number from the ADDBA request. */ 6067 sc->txq[qid].cur = sc->txq[qid].read = idx; 6068 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx); 6069 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 6070 6071 /* Set scheduler window size and frame limit. */ 6072 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 6073 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 6074 6075 /* Enable interrupts for the queue. */ 6076 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 6077 6078 /* Mark the queue as active. */ 6079 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6080 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 6081 } 6082 6083 void 6084 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn) 6085 { 6086 int qid = IWN5000_FIRST_AGG_TXQUEUE + tid; 6087 int idx = IWN_AGG_SSN_TO_TXQ_IDX(ssn); 6088 6089 /* Stop TX scheduler while we're changing its configuration. */ 6090 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6091 IWN5000_TXQ_STATUS_CHGACT); 6092 6093 /* Disable aggregation for the queue. */ 6094 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 6095 6096 /* Set starting sequence number from the ADDBA request. */ 6097 sc->txq[qid].cur = sc->txq[qid].read = idx; 6098 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | idx); 6099 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 6100 6101 /* Disable interrupts for the queue. */ 6102 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 6103 6104 /* Mark the queue as inactive. */ 6105 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6106 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 6107 } 6108 6109 /* 6110 * Query calibration tables from the initialization firmware. We do this 6111 * only once at first boot. Called from a process context. 6112 */ 6113 int 6114 iwn5000_query_calibration(struct iwn_softc *sc) 6115 { 6116 struct iwn5000_calib_config cmd; 6117 int error; 6118 6119 memset(&cmd, 0, sizeof cmd); 6120 cmd.ucode.once.enable = 0xffffffff; 6121 cmd.ucode.once.start = 0xffffffff; 6122 cmd.ucode.once.send = 0xffffffff; 6123 cmd.ucode.flags = 0xffffffff; 6124 DPRINTF(("sending calibration query\n")); 6125 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 6126 if (error != 0) 6127 return error; 6128 6129 /* Wait at most two seconds for calibration to complete. */ 6130 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 6131 error = tsleep_nsec(sc, PCATCH, "iwncal", SEC_TO_NSEC(2)); 6132 return error; 6133 } 6134 6135 /* 6136 * Send calibration results to the runtime firmware. These results were 6137 * obtained on first boot from the initialization firmware. 6138 */ 6139 int 6140 iwn5000_send_calibration(struct iwn_softc *sc) 6141 { 6142 int idx, error; 6143 6144 for (idx = 0; idx < 5; idx++) { 6145 if (sc->calibcmd[idx].buf == NULL) 6146 continue; /* No results available. */ 6147 DPRINTF(("send calibration result idx=%d len=%d\n", 6148 idx, sc->calibcmd[idx].len)); 6149 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 6150 sc->calibcmd[idx].len, 0); 6151 if (error != 0) { 6152 printf("%s: could not send calibration result\n", 6153 sc->sc_dev.dv_xname); 6154 return error; 6155 } 6156 } 6157 return 0; 6158 } 6159 6160 int 6161 iwn5000_send_wimax_coex(struct iwn_softc *sc) 6162 { 6163 struct iwn5000_wimax_coex wimax; 6164 6165 #ifdef notyet 6166 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6167 /* Enable WiMAX coexistence for combo adapters. */ 6168 wimax.flags = 6169 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 6170 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 6171 IWN_WIMAX_COEX_STA_TABLE_VALID | 6172 IWN_WIMAX_COEX_ENABLE; 6173 memcpy(wimax.events, iwn6050_wimax_events, 6174 sizeof iwn6050_wimax_events); 6175 } else 6176 #endif 6177 { 6178 /* Disable WiMAX coexistence. */ 6179 wimax.flags = 0; 6180 memset(wimax.events, 0, sizeof wimax.events); 6181 } 6182 DPRINTF(("Configuring WiMAX coexistence\n")); 6183 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 6184 } 6185 6186 int 6187 iwn5000_crystal_calib(struct iwn_softc *sc) 6188 { 6189 struct iwn5000_phy_calib_crystal cmd; 6190 6191 memset(&cmd, 0, sizeof cmd); 6192 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 6193 cmd.ngroups = 1; 6194 cmd.isvalid = 1; 6195 cmd.cap_pin[0] = letoh32(sc->eeprom_crystal) & 0xff; 6196 cmd.cap_pin[1] = (letoh32(sc->eeprom_crystal) >> 16) & 0xff; 6197 DPRINTF(("sending crystal calibration %d, %d\n", 6198 cmd.cap_pin[0], cmd.cap_pin[1])); 6199 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 6200 } 6201 6202 int 6203 iwn6000_temp_offset_calib(struct iwn_softc *sc) 6204 { 6205 struct iwn6000_phy_calib_temp_offset cmd; 6206 6207 memset(&cmd, 0, sizeof cmd); 6208 cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET; 6209 cmd.ngroups = 1; 6210 cmd.isvalid = 1; 6211 if (sc->eeprom_temp != 0) 6212 cmd.offset = htole16(sc->eeprom_temp); 6213 else 6214 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 6215 DPRINTF(("setting radio sensor offset to %d\n", letoh16(cmd.offset))); 6216 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 6217 } 6218 6219 int 6220 iwn2000_temp_offset_calib(struct iwn_softc *sc) 6221 { 6222 struct iwn2000_phy_calib_temp_offset cmd; 6223 6224 memset(&cmd, 0, sizeof cmd); 6225 cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET; 6226 cmd.ngroups = 1; 6227 cmd.isvalid = 1; 6228 if (sc->eeprom_rawtemp != 0) { 6229 cmd.offset_low = htole16(sc->eeprom_rawtemp); 6230 cmd.offset_high = htole16(sc->eeprom_temp); 6231 } else { 6232 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 6233 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 6234 } 6235 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 6236 DPRINTF(("setting radio sensor offset to %d:%d, voltage to %d\n", 6237 letoh16(cmd.offset_low), letoh16(cmd.offset_high), 6238 letoh16(cmd.burnt_voltage_ref))); 6239 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 6240 } 6241 6242 /* 6243 * This function is called after the runtime firmware notifies us of its 6244 * readiness (called in a process context). 6245 */ 6246 int 6247 iwn4965_post_alive(struct iwn_softc *sc) 6248 { 6249 int error, qid; 6250 6251 if ((error = iwn_nic_lock(sc)) != 0) 6252 return error; 6253 6254 /* Clear TX scheduler state in SRAM. */ 6255 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 6256 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 6257 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 6258 6259 /* Set physical address of TX scheduler rings (1KB aligned). */ 6260 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 6261 6262 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 6263 6264 /* Disable chain mode for all our 16 queues. */ 6265 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 6266 6267 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 6268 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 6269 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 6270 6271 /* Set scheduler window size. */ 6272 iwn_mem_write(sc, sc->sched_base + 6273 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 6274 /* Set scheduler frame limit. */ 6275 iwn_mem_write(sc, sc->sched_base + 6276 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 6277 IWN_SCHED_LIMIT << 16); 6278 } 6279 6280 /* Enable interrupts for all our 16 queues. */ 6281 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 6282 /* Identify TX FIFO rings (0-7). */ 6283 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 6284 6285 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 6286 for (qid = 0; qid < 7; qid++) { 6287 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 6288 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6289 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 6290 } 6291 iwn_nic_unlock(sc); 6292 return 0; 6293 } 6294 6295 /* 6296 * This function is called after the initialization or runtime firmware 6297 * notifies us of its readiness (called in a process context). 6298 */ 6299 int 6300 iwn5000_post_alive(struct iwn_softc *sc) 6301 { 6302 int error, qid; 6303 6304 /* Switch to using ICT interrupt mode. */ 6305 iwn5000_ict_reset(sc); 6306 6307 if ((error = iwn_nic_lock(sc)) != 0) 6308 return error; 6309 6310 /* Clear TX scheduler state in SRAM. */ 6311 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 6312 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 6313 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 6314 6315 /* Set physical address of TX scheduler rings (1KB aligned). */ 6316 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 6317 6318 /* Disable scheduler chain extension (enabled by default in HW). */ 6319 iwn_prph_write(sc, IWN5000_SCHED_CHAINEXT_EN, 0); 6320 6321 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 6322 6323 /* Enable chain mode for all queues, except command queue. */ 6324 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 6325 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 6326 6327 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 6328 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 6329 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 6330 6331 iwn_mem_write(sc, sc->sched_base + 6332 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 6333 /* Set scheduler window size and frame limit. */ 6334 iwn_mem_write(sc, sc->sched_base + 6335 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 6336 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 6337 } 6338 6339 /* Enable interrupts for all our 20 queues. */ 6340 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 6341 /* Identify TX FIFO rings (0-7). */ 6342 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 6343 6344 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 6345 for (qid = 0; qid < 7; qid++) { 6346 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 6347 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6348 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 6349 } 6350 iwn_nic_unlock(sc); 6351 6352 /* Configure WiMAX coexistence for combo adapters. */ 6353 error = iwn5000_send_wimax_coex(sc); 6354 if (error != 0) { 6355 printf("%s: could not configure WiMAX coexistence\n", 6356 sc->sc_dev.dv_xname); 6357 return error; 6358 } 6359 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 6360 /* Perform crystal calibration. */ 6361 error = iwn5000_crystal_calib(sc); 6362 if (error != 0) { 6363 printf("%s: crystal calibration failed\n", 6364 sc->sc_dev.dv_xname); 6365 return error; 6366 } 6367 } 6368 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 6369 /* Query calibration from the initialization firmware. */ 6370 if ((error = iwn5000_query_calibration(sc)) != 0) { 6371 printf("%s: could not query calibration\n", 6372 sc->sc_dev.dv_xname); 6373 return error; 6374 } 6375 /* 6376 * We have the calibration results now, reboot with the 6377 * runtime firmware (call ourselves recursively!) 6378 */ 6379 iwn_hw_stop(sc); 6380 error = iwn_hw_init(sc); 6381 } else { 6382 /* Send calibration results to runtime firmware. */ 6383 error = iwn5000_send_calibration(sc); 6384 } 6385 return error; 6386 } 6387 6388 /* 6389 * The firmware boot code is small and is intended to be copied directly into 6390 * the NIC internal memory (no DMA transfer). 6391 */ 6392 int 6393 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 6394 { 6395 int error, ntries; 6396 6397 size /= sizeof (uint32_t); 6398 6399 if ((error = iwn_nic_lock(sc)) != 0) 6400 return error; 6401 6402 /* Copy microcode image into NIC memory. */ 6403 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 6404 (const uint32_t *)ucode, size); 6405 6406 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 6407 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 6408 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 6409 6410 /* Start boot load now. */ 6411 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 6412 6413 /* Wait for transfer to complete. */ 6414 for (ntries = 0; ntries < 1000; ntries++) { 6415 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 6416 IWN_BSM_WR_CTRL_START)) 6417 break; 6418 DELAY(10); 6419 } 6420 if (ntries == 1000) { 6421 printf("%s: could not load boot firmware\n", 6422 sc->sc_dev.dv_xname); 6423 iwn_nic_unlock(sc); 6424 return ETIMEDOUT; 6425 } 6426 6427 /* Enable boot after power up. */ 6428 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 6429 6430 iwn_nic_unlock(sc); 6431 return 0; 6432 } 6433 6434 int 6435 iwn4965_load_firmware(struct iwn_softc *sc) 6436 { 6437 struct iwn_fw_info *fw = &sc->fw; 6438 struct iwn_dma_info *dma = &sc->fw_dma; 6439 int error; 6440 6441 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 6442 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 6443 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz, 6444 BUS_DMASYNC_PREWRITE); 6445 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 6446 fw->init.text, fw->init.textsz); 6447 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 6448 fw->init.textsz, BUS_DMASYNC_PREWRITE); 6449 6450 /* Tell adapter where to find initialization sections. */ 6451 if ((error = iwn_nic_lock(sc)) != 0) 6452 return error; 6453 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 6454 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 6455 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 6456 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 6457 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 6458 iwn_nic_unlock(sc); 6459 6460 /* Load firmware boot code. */ 6461 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 6462 if (error != 0) { 6463 printf("%s: could not load boot firmware\n", 6464 sc->sc_dev.dv_xname); 6465 return error; 6466 } 6467 /* Now press "execute". */ 6468 IWN_WRITE(sc, IWN_RESET, 0); 6469 6470 /* Wait at most one second for first alive notification. */ 6471 if ((error = tsleep_nsec(sc, PCATCH, "iwninit", SEC_TO_NSEC(1))) != 0) { 6472 printf("%s: timeout waiting for adapter to initialize\n", 6473 sc->sc_dev.dv_xname); 6474 return error; 6475 } 6476 6477 /* Retrieve current temperature for initial TX power calibration. */ 6478 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 6479 sc->temp = iwn4965_get_temperature(sc); 6480 6481 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 6482 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 6483 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz, 6484 BUS_DMASYNC_PREWRITE); 6485 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 6486 fw->main.text, fw->main.textsz); 6487 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ, 6488 fw->main.textsz, BUS_DMASYNC_PREWRITE); 6489 6490 /* Tell adapter where to find runtime sections. */ 6491 if ((error = iwn_nic_lock(sc)) != 0) 6492 return error; 6493 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 6494 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 6495 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 6496 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 6497 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 6498 IWN_FW_UPDATED | fw->main.textsz); 6499 iwn_nic_unlock(sc); 6500 6501 return 0; 6502 } 6503 6504 int 6505 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 6506 const uint8_t *section, int size) 6507 { 6508 struct iwn_dma_info *dma = &sc->fw_dma; 6509 int error; 6510 6511 /* Copy firmware section into pre-allocated DMA-safe memory. */ 6512 memcpy(dma->vaddr, section, size); 6513 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE); 6514 6515 if ((error = iwn_nic_lock(sc)) != 0) 6516 return error; 6517 6518 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 6519 IWN_FH_TX_CONFIG_DMA_PAUSE); 6520 6521 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 6522 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 6523 IWN_LOADDR(dma->paddr)); 6524 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 6525 IWN_HIADDR(dma->paddr) << 28 | size); 6526 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 6527 IWN_FH_TXBUF_STATUS_TBNUM(1) | 6528 IWN_FH_TXBUF_STATUS_TBIDX(1) | 6529 IWN_FH_TXBUF_STATUS_TFBD_VALID); 6530 6531 /* Kick Flow Handler to start DMA transfer. */ 6532 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 6533 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 6534 6535 iwn_nic_unlock(sc); 6536 6537 /* Wait at most five seconds for FH DMA transfer to complete. */ 6538 return tsleep_nsec(sc, PCATCH, "iwninit", SEC_TO_NSEC(5)); 6539 } 6540 6541 int 6542 iwn5000_load_firmware(struct iwn_softc *sc) 6543 { 6544 struct iwn_fw_part *fw; 6545 int error; 6546 6547 /* Load the initialization firmware on first boot only. */ 6548 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 6549 &sc->fw.main : &sc->fw.init; 6550 6551 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 6552 fw->text, fw->textsz); 6553 if (error != 0) { 6554 printf("%s: could not load firmware %s section\n", 6555 sc->sc_dev.dv_xname, ".text"); 6556 return error; 6557 } 6558 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 6559 fw->data, fw->datasz); 6560 if (error != 0) { 6561 printf("%s: could not load firmware %s section\n", 6562 sc->sc_dev.dv_xname, ".data"); 6563 return error; 6564 } 6565 6566 /* Now press "execute". */ 6567 IWN_WRITE(sc, IWN_RESET, 0); 6568 return 0; 6569 } 6570 6571 /* 6572 * Extract text and data sections from a legacy firmware image. 6573 */ 6574 int 6575 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 6576 { 6577 const uint32_t *ptr; 6578 size_t hdrlen = 24; 6579 uint32_t rev; 6580 6581 ptr = (const uint32_t *)fw->data; 6582 rev = letoh32(*ptr++); 6583 6584 /* Check firmware API version. */ 6585 if (IWN_FW_API(rev) <= 1) { 6586 printf("%s: bad firmware, need API version >=2\n", 6587 sc->sc_dev.dv_xname); 6588 return EINVAL; 6589 } 6590 if (IWN_FW_API(rev) >= 3) { 6591 /* Skip build number (version 2 header). */ 6592 hdrlen += 4; 6593 ptr++; 6594 } 6595 if (fw->size < hdrlen) { 6596 printf("%s: firmware too short: %zu bytes\n", 6597 sc->sc_dev.dv_xname, fw->size); 6598 return EINVAL; 6599 } 6600 fw->main.textsz = letoh32(*ptr++); 6601 fw->main.datasz = letoh32(*ptr++); 6602 fw->init.textsz = letoh32(*ptr++); 6603 fw->init.datasz = letoh32(*ptr++); 6604 fw->boot.textsz = letoh32(*ptr++); 6605 6606 /* Check that all firmware sections fit. */ 6607 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 6608 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 6609 printf("%s: firmware too short: %zu bytes\n", 6610 sc->sc_dev.dv_xname, fw->size); 6611 return EINVAL; 6612 } 6613 6614 /* Get pointers to firmware sections. */ 6615 fw->main.text = (const uint8_t *)ptr; 6616 fw->main.data = fw->main.text + fw->main.textsz; 6617 fw->init.text = fw->main.data + fw->main.datasz; 6618 fw->init.data = fw->init.text + fw->init.textsz; 6619 fw->boot.text = fw->init.data + fw->init.datasz; 6620 return 0; 6621 } 6622 6623 /* 6624 * Extract text and data sections from a TLV firmware image. 6625 */ 6626 int 6627 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 6628 uint16_t alt) 6629 { 6630 const struct iwn_fw_tlv_hdr *hdr; 6631 const struct iwn_fw_tlv *tlv; 6632 const uint8_t *ptr, *end; 6633 uint64_t altmask; 6634 uint32_t len; 6635 6636 if (fw->size < sizeof (*hdr)) { 6637 printf("%s: firmware too short: %zu bytes\n", 6638 sc->sc_dev.dv_xname, fw->size); 6639 return EINVAL; 6640 } 6641 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 6642 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 6643 printf("%s: bad firmware signature 0x%08x\n", 6644 sc->sc_dev.dv_xname, letoh32(hdr->signature)); 6645 return EINVAL; 6646 } 6647 DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr, 6648 letoh32(hdr->build))); 6649 6650 /* 6651 * Select the closest supported alternative that is less than 6652 * or equal to the specified one. 6653 */ 6654 altmask = letoh64(hdr->altmask); 6655 while (alt > 0 && !(altmask & (1ULL << alt))) 6656 alt--; /* Downgrade. */ 6657 DPRINTF(("using alternative %d\n", alt)); 6658 6659 ptr = (const uint8_t *)(hdr + 1); 6660 end = (const uint8_t *)(fw->data + fw->size); 6661 6662 /* Parse type-length-value fields. */ 6663 while (ptr + sizeof (*tlv) <= end) { 6664 tlv = (const struct iwn_fw_tlv *)ptr; 6665 len = letoh32(tlv->len); 6666 6667 ptr += sizeof (*tlv); 6668 if (ptr + len > end) { 6669 printf("%s: firmware too short: %zu bytes\n", 6670 sc->sc_dev.dv_xname, fw->size); 6671 return EINVAL; 6672 } 6673 /* Skip other alternatives. */ 6674 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 6675 goto next; 6676 6677 switch (letoh16(tlv->type)) { 6678 case IWN_FW_TLV_MAIN_TEXT: 6679 fw->main.text = ptr; 6680 fw->main.textsz = len; 6681 break; 6682 case IWN_FW_TLV_MAIN_DATA: 6683 fw->main.data = ptr; 6684 fw->main.datasz = len; 6685 break; 6686 case IWN_FW_TLV_INIT_TEXT: 6687 fw->init.text = ptr; 6688 fw->init.textsz = len; 6689 break; 6690 case IWN_FW_TLV_INIT_DATA: 6691 fw->init.data = ptr; 6692 fw->init.datasz = len; 6693 break; 6694 case IWN_FW_TLV_BOOT_TEXT: 6695 fw->boot.text = ptr; 6696 fw->boot.textsz = len; 6697 break; 6698 case IWN_FW_TLV_ENH_SENS: 6699 if (len != 0) { 6700 printf("%s: TLV type %d has invalid size %u\n", 6701 sc->sc_dev.dv_xname, letoh16(tlv->type), 6702 len); 6703 goto next; 6704 } 6705 sc->sc_flags |= IWN_FLAG_ENH_SENS; 6706 break; 6707 case IWN_FW_TLV_PHY_CALIB: 6708 if (len != sizeof(uint32_t)) { 6709 printf("%s: TLV type %d has invalid size %u\n", 6710 sc->sc_dev.dv_xname, letoh16(tlv->type), 6711 len); 6712 goto next; 6713 } 6714 if (letoh32(*ptr) <= IWN5000_PHY_CALIB_MAX) { 6715 sc->reset_noise_gain = letoh32(*ptr); 6716 sc->noise_gain = letoh32(*ptr) + 1; 6717 } 6718 break; 6719 case IWN_FW_TLV_FLAGS: 6720 if (len < sizeof(uint32_t)) 6721 break; 6722 if (len % sizeof(uint32_t)) 6723 break; 6724 sc->tlv_feature_flags = letoh32(*ptr); 6725 DPRINTF(("feature: 0x%08x\n", sc->tlv_feature_flags)); 6726 break; 6727 default: 6728 DPRINTF(("TLV type %d not handled\n", 6729 letoh16(tlv->type))); 6730 break; 6731 } 6732 next: /* TLV fields are 32-bit aligned. */ 6733 ptr += (len + 3) & ~3; 6734 } 6735 return 0; 6736 } 6737 6738 int 6739 iwn_read_firmware(struct iwn_softc *sc) 6740 { 6741 struct iwn_fw_info *fw = &sc->fw; 6742 int error; 6743 6744 /* 6745 * Some PHY calibration commands are firmware-dependent; these 6746 * are the default values that will be overridden if 6747 * necessary. 6748 */ 6749 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 6750 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 6751 6752 memset(fw, 0, sizeof (*fw)); 6753 6754 /* Read firmware image from filesystem. */ 6755 if ((error = loadfirmware(sc->fwname, &fw->data, &fw->size)) != 0) { 6756 printf("%s: could not read firmware %s (error %d)\n", 6757 sc->sc_dev.dv_xname, sc->fwname, error); 6758 return error; 6759 } 6760 if (fw->size < sizeof (uint32_t)) { 6761 printf("%s: firmware too short: %zu bytes\n", 6762 sc->sc_dev.dv_xname, fw->size); 6763 free(fw->data, M_DEVBUF, fw->size); 6764 return EINVAL; 6765 } 6766 6767 /* Retrieve text and data sections. */ 6768 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 6769 error = iwn_read_firmware_leg(sc, fw); 6770 else 6771 error = iwn_read_firmware_tlv(sc, fw, 1); 6772 if (error != 0) { 6773 printf("%s: could not read firmware sections\n", 6774 sc->sc_dev.dv_xname); 6775 free(fw->data, M_DEVBUF, fw->size); 6776 return error; 6777 } 6778 6779 /* Make sure text and data sections fit in hardware memory. */ 6780 if (fw->main.textsz > sc->fw_text_maxsz || 6781 fw->main.datasz > sc->fw_data_maxsz || 6782 fw->init.textsz > sc->fw_text_maxsz || 6783 fw->init.datasz > sc->fw_data_maxsz || 6784 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 6785 (fw->boot.textsz & 3) != 0) { 6786 printf("%s: firmware sections too large\n", 6787 sc->sc_dev.dv_xname); 6788 free(fw->data, M_DEVBUF, fw->size); 6789 return EINVAL; 6790 } 6791 6792 /* We can proceed with loading the firmware. */ 6793 return 0; 6794 } 6795 6796 int 6797 iwn_clock_wait(struct iwn_softc *sc) 6798 { 6799 int ntries; 6800 6801 /* Set "initialization complete" bit. */ 6802 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6803 6804 /* Wait for clock stabilization. */ 6805 for (ntries = 0; ntries < 2500; ntries++) { 6806 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 6807 return 0; 6808 DELAY(10); 6809 } 6810 printf("%s: timeout waiting for clock stabilization\n", 6811 sc->sc_dev.dv_xname); 6812 return ETIMEDOUT; 6813 } 6814 6815 int 6816 iwn_apm_init(struct iwn_softc *sc) 6817 { 6818 pcireg_t reg; 6819 int error; 6820 6821 /* Disable L0s exit timer (NMI bug workaround). */ 6822 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6823 /* Don't wait for ICH L0s (ICH bug workaround). */ 6824 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6825 6826 /* Set FH wait threshold to max (HW bug under stress workaround). */ 6827 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6828 6829 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6830 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6831 6832 /* Retrieve PCIe Active State Power Management (ASPM). */ 6833 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 6834 sc->sc_cap_off + PCI_PCIE_LCSR); 6835 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6836 if (reg & PCI_PCIE_LCSR_ASPM_L1) /* L1 Entry enabled. */ 6837 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6838 else 6839 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6840 6841 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6842 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6843 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6844 6845 /* Wait for clock stabilization before accessing prph. */ 6846 if ((error = iwn_clock_wait(sc)) != 0) 6847 return error; 6848 6849 if ((error = iwn_nic_lock(sc)) != 0) 6850 return error; 6851 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6852 /* Enable DMA and BSM (Bootstrap State Machine). */ 6853 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6854 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6855 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6856 } else { 6857 /* Enable DMA. */ 6858 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6859 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6860 } 6861 DELAY(20); 6862 /* Disable L1-Active. */ 6863 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6864 iwn_nic_unlock(sc); 6865 6866 return 0; 6867 } 6868 6869 void 6870 iwn_apm_stop_master(struct iwn_softc *sc) 6871 { 6872 int ntries; 6873 6874 /* Stop busmaster DMA activity. */ 6875 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6876 for (ntries = 0; ntries < 100; ntries++) { 6877 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6878 return; 6879 DELAY(10); 6880 } 6881 printf("%s: timeout waiting for master\n", sc->sc_dev.dv_xname); 6882 } 6883 6884 void 6885 iwn_apm_stop(struct iwn_softc *sc) 6886 { 6887 iwn_apm_stop_master(sc); 6888 6889 /* Reset the entire device. */ 6890 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 6891 DELAY(10); 6892 /* Clear "initialization complete" bit. */ 6893 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6894 } 6895 6896 int 6897 iwn4965_nic_config(struct iwn_softc *sc) 6898 { 6899 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 6900 /* 6901 * I don't believe this to be correct but this is what the 6902 * vendor driver is doing. Probably the bits should not be 6903 * shifted in IWN_RFCFG_*. 6904 */ 6905 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6906 IWN_RFCFG_TYPE(sc->rfcfg) | 6907 IWN_RFCFG_STEP(sc->rfcfg) | 6908 IWN_RFCFG_DASH(sc->rfcfg)); 6909 } 6910 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6911 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6912 return 0; 6913 } 6914 6915 int 6916 iwn5000_nic_config(struct iwn_softc *sc) 6917 { 6918 uint32_t tmp; 6919 int error; 6920 6921 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 6922 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6923 IWN_RFCFG_TYPE(sc->rfcfg) | 6924 IWN_RFCFG_STEP(sc->rfcfg) | 6925 IWN_RFCFG_DASH(sc->rfcfg)); 6926 } 6927 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 6928 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 6929 6930 if ((error = iwn_nic_lock(sc)) != 0) 6931 return error; 6932 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 6933 6934 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 6935 /* 6936 * Select first Switching Voltage Regulator (1.32V) to 6937 * solve a stability issue related to noisy DC2DC line 6938 * in the silicon of 1000 Series. 6939 */ 6940 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 6941 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 6942 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 6943 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 6944 } 6945 iwn_nic_unlock(sc); 6946 6947 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 6948 /* Use internal power amplifier only. */ 6949 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 6950 } 6951 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 6952 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 6953 /* Indicate that ROM calibration version is >=6. */ 6954 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 6955 } 6956 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 6957 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 6958 if (sc->hw_type == IWN_HW_REV_TYPE_2030 || 6959 sc->hw_type == IWN_HW_REV_TYPE_2000 || 6960 sc->hw_type == IWN_HW_REV_TYPE_135 || 6961 sc->hw_type == IWN_HW_REV_TYPE_105) 6962 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT); 6963 return 0; 6964 } 6965 6966 /* 6967 * Take NIC ownership over Intel Active Management Technology (AMT). 6968 */ 6969 int 6970 iwn_hw_prepare(struct iwn_softc *sc) 6971 { 6972 int ntries; 6973 6974 /* Check if hardware is ready. */ 6975 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6976 for (ntries = 0; ntries < 5; ntries++) { 6977 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6978 IWN_HW_IF_CONFIG_NIC_READY) 6979 return 0; 6980 DELAY(10); 6981 } 6982 6983 /* Hardware not ready, force into ready state. */ 6984 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 6985 for (ntries = 0; ntries < 15000; ntries++) { 6986 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 6987 IWN_HW_IF_CONFIG_PREPARE_DONE)) 6988 break; 6989 DELAY(10); 6990 } 6991 if (ntries == 15000) 6992 return ETIMEDOUT; 6993 6994 /* Hardware should be ready now. */ 6995 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 6996 for (ntries = 0; ntries < 5; ntries++) { 6997 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 6998 IWN_HW_IF_CONFIG_NIC_READY) 6999 return 0; 7000 DELAY(10); 7001 } 7002 return ETIMEDOUT; 7003 } 7004 7005 int 7006 iwn_hw_init(struct iwn_softc *sc) 7007 { 7008 struct iwn_ops *ops = &sc->ops; 7009 int error, chnl, qid; 7010 7011 /* Clear pending interrupts. */ 7012 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7013 7014 if ((error = iwn_apm_init(sc)) != 0) { 7015 printf("%s: could not power on adapter\n", 7016 sc->sc_dev.dv_xname); 7017 return error; 7018 } 7019 7020 /* Select VMAIN power source. */ 7021 if ((error = iwn_nic_lock(sc)) != 0) 7022 return error; 7023 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 7024 iwn_nic_unlock(sc); 7025 7026 /* Perform adapter-specific initialization. */ 7027 if ((error = ops->nic_config(sc)) != 0) 7028 return error; 7029 7030 /* Initialize RX ring. */ 7031 if ((error = iwn_nic_lock(sc)) != 0) 7032 return error; 7033 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 7034 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 7035 /* Set physical address of RX ring (256-byte aligned). */ 7036 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 7037 /* Set physical address of RX status (16-byte aligned). */ 7038 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 7039 /* Enable RX. */ 7040 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 7041 IWN_FH_RX_CONFIG_ENA | 7042 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 7043 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 7044 IWN_FH_RX_CONFIG_SINGLE_FRAME | 7045 IWN_FH_RX_CONFIG_RB_TIMEOUT(0x11) | /* about 1/2 msec */ 7046 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 7047 iwn_nic_unlock(sc); 7048 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 7049 7050 if ((error = iwn_nic_lock(sc)) != 0) 7051 return error; 7052 7053 /* Initialize TX scheduler. */ 7054 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 7055 7056 /* Set physical address of "keep warm" page (16-byte aligned). */ 7057 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 7058 7059 /* Initialize TX rings. */ 7060 for (qid = 0; qid < sc->ntxqs; qid++) { 7061 struct iwn_tx_ring *txq = &sc->txq[qid]; 7062 7063 /* Set physical address of TX ring (256-byte aligned). */ 7064 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 7065 txq->desc_dma.paddr >> 8); 7066 } 7067 iwn_nic_unlock(sc); 7068 7069 /* Enable DMA channels. */ 7070 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 7071 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 7072 IWN_FH_TX_CONFIG_DMA_ENA | 7073 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 7074 } 7075 7076 /* Clear "radio off" and "commands blocked" bits. */ 7077 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 7078 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 7079 7080 /* Clear pending interrupts. */ 7081 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7082 /* Enable interrupt coalescing. */ 7083 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 7084 /* Enable interrupts. */ 7085 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 7086 7087 /* _Really_ make sure "radio off" bit is cleared! */ 7088 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 7089 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 7090 7091 /* Enable shadow registers. */ 7092 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 7093 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 7094 7095 if ((error = ops->load_firmware(sc)) != 0) { 7096 printf("%s: could not load firmware\n", sc->sc_dev.dv_xname); 7097 return error; 7098 } 7099 /* Wait at most one second for firmware alive notification. */ 7100 if ((error = tsleep_nsec(sc, PCATCH, "iwninit", SEC_TO_NSEC(1))) != 0) { 7101 printf("%s: timeout waiting for adapter to initialize\n", 7102 sc->sc_dev.dv_xname); 7103 return error; 7104 } 7105 /* Do post-firmware initialization. */ 7106 return ops->post_alive(sc); 7107 } 7108 7109 void 7110 iwn_hw_stop(struct iwn_softc *sc) 7111 { 7112 int chnl, qid, ntries; 7113 7114 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 7115 7116 /* Disable interrupts. */ 7117 IWN_WRITE(sc, IWN_INT_MASK, 0); 7118 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7119 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 7120 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 7121 7122 /* Make sure we no longer hold the NIC lock. */ 7123 iwn_nic_unlock(sc); 7124 7125 /* Stop TX scheduler. */ 7126 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 7127 7128 /* Stop all DMA channels. */ 7129 if (iwn_nic_lock(sc) == 0) { 7130 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 7131 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 7132 for (ntries = 0; ntries < 200; ntries++) { 7133 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 7134 IWN_FH_TX_STATUS_IDLE(chnl)) 7135 break; 7136 DELAY(10); 7137 } 7138 } 7139 iwn_nic_unlock(sc); 7140 } 7141 7142 /* Stop RX ring. */ 7143 iwn_reset_rx_ring(sc, &sc->rxq); 7144 7145 /* Reset all TX rings. */ 7146 for (qid = 0; qid < sc->ntxqs; qid++) 7147 iwn_reset_tx_ring(sc, &sc->txq[qid]); 7148 7149 if (iwn_nic_lock(sc) == 0) { 7150 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 7151 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 7152 iwn_nic_unlock(sc); 7153 } 7154 DELAY(5); 7155 /* Power OFF adapter. */ 7156 iwn_apm_stop(sc); 7157 } 7158 7159 int 7160 iwn_init(struct ifnet *ifp) 7161 { 7162 struct iwn_softc *sc = ifp->if_softc; 7163 struct ieee80211com *ic = &sc->sc_ic; 7164 int error; 7165 7166 memset(sc->bss_node_addr, 0, sizeof(sc->bss_node_addr)); 7167 sc->agg_queue_mask = 0; 7168 memset(sc->sc_tx_ba, 0, sizeof(sc->sc_tx_ba)); 7169 7170 if ((error = iwn_hw_prepare(sc)) != 0) { 7171 printf("%s: hardware not ready\n", sc->sc_dev.dv_xname); 7172 goto fail; 7173 } 7174 7175 /* Initialize interrupt mask to default value. */ 7176 sc->int_mask = IWN_INT_MASK_DEF; 7177 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 7178 7179 /* Check that the radio is not disabled by hardware switch. */ 7180 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 7181 printf("%s: radio is disabled by hardware switch\n", 7182 sc->sc_dev.dv_xname); 7183 error = EPERM; /* :-) */ 7184 /* Re-enable interrupts. */ 7185 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7186 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 7187 return error; 7188 } 7189 7190 /* Read firmware images from the filesystem. */ 7191 if ((error = iwn_read_firmware(sc)) != 0) { 7192 printf("%s: could not read firmware\n", sc->sc_dev.dv_xname); 7193 goto fail; 7194 } 7195 7196 /* Initialize hardware and upload firmware. */ 7197 error = iwn_hw_init(sc); 7198 free(sc->fw.data, M_DEVBUF, sc->fw.size); 7199 if (error != 0) { 7200 printf("%s: could not initialize hardware\n", 7201 sc->sc_dev.dv_xname); 7202 goto fail; 7203 } 7204 7205 /* Configure adapter now that it is ready. */ 7206 if ((error = iwn_config(sc)) != 0) { 7207 printf("%s: could not configure device\n", 7208 sc->sc_dev.dv_xname); 7209 goto fail; 7210 } 7211 7212 ifq_clr_oactive(&ifp->if_snd); 7213 ifp->if_flags |= IFF_RUNNING; 7214 7215 if (ic->ic_opmode != IEEE80211_M_MONITOR) 7216 ieee80211_begin_scan(ifp); 7217 else 7218 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 7219 7220 return 0; 7221 7222 fail: iwn_stop(ifp); 7223 return error; 7224 } 7225 7226 void 7227 iwn_stop(struct ifnet *ifp) 7228 { 7229 struct iwn_softc *sc = ifp->if_softc; 7230 struct ieee80211com *ic = &sc->sc_ic; 7231 7232 timeout_del(&sc->calib_to); 7233 ifp->if_timer = sc->sc_tx_timer = 0; 7234 ifp->if_flags &= ~IFF_RUNNING; 7235 ifq_clr_oactive(&ifp->if_snd); 7236 7237 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 7238 7239 /* Power OFF hardware. */ 7240 iwn_hw_stop(sc); 7241 } 7242