1 /*
2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
7 *
8 * Modified for iPXE, July 2009, by Joshua Oreman <oremanj@rwcr.net>
9 * Original from Linux kernel 2.6.30.
10 *
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer,
18 * without modification.
19 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
20 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
21 * redistribution must be conditioned upon including a substantially
22 * similar Disclaimer requirement for further binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
30 *
31 * NO WARRANTY
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
35 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
36 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
37 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
39 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
42 * THE POSSIBILITY OF SUCH DAMAGES.
43 *
44 */
45
46 FILE_LICENCE ( BSD3 );
47
48 #include <stdlib.h>
49 #include <ipxe/malloc.h>
50 #include <ipxe/timer.h>
51 #include <ipxe/netdevice.h>
52 #include <ipxe/pci.h>
53 #include <ipxe/pci_io.h>
54
55 #include "base.h"
56 #include "reg.h"
57
58 #define ATH5K_CALIB_INTERVAL 10 /* Calibrate PHY every 10 seconds */
59 #define ATH5K_RETRIES 4 /* Number of times to retry packet sends */
60 #define ATH5K_DESC_ALIGN 16 /* Alignment for TX/RX descriptors */
61
62 /******************\
63 * Internal defines *
64 \******************/
65
66 /* Known PCI ids */
67 static struct pci_device_id ath5k_nics[] = {
68 PCI_ROM(0x168c, 0x0207, "ath5210e", "Atheros 5210 early", AR5K_AR5210),
69 PCI_ROM(0x168c, 0x0007, "ath5210", "Atheros 5210", AR5K_AR5210),
70 PCI_ROM(0x168c, 0x0011, "ath5311", "Atheros 5311 (AHB)", AR5K_AR5211),
71 PCI_ROM(0x168c, 0x0012, "ath5211", "Atheros 5211", AR5K_AR5211),
72 PCI_ROM(0x168c, 0x0013, "ath5212", "Atheros 5212", AR5K_AR5212),
73 PCI_ROM(0xa727, 0x0013, "ath5212c","3com Ath 5212", AR5K_AR5212),
74 PCI_ROM(0x10b7, 0x0013, "rdag675", "3com 3CRDAG675", AR5K_AR5212),
75 PCI_ROM(0x168c, 0x1014, "ath5212m", "Ath 5212 miniPCI", AR5K_AR5212),
76 PCI_ROM(0x168c, 0x0014, "ath5212x14", "Atheros 5212 x14", AR5K_AR5212),
77 PCI_ROM(0x168c, 0x0015, "ath5212x15", "Atheros 5212 x15", AR5K_AR5212),
78 PCI_ROM(0x168c, 0x0016, "ath5212x16", "Atheros 5212 x16", AR5K_AR5212),
79 PCI_ROM(0x168c, 0x0017, "ath5212x17", "Atheros 5212 x17", AR5K_AR5212),
80 PCI_ROM(0x168c, 0x0018, "ath5212x18", "Atheros 5212 x18", AR5K_AR5212),
81 PCI_ROM(0x168c, 0x0019, "ath5212x19", "Atheros 5212 x19", AR5K_AR5212),
82 PCI_ROM(0x168c, 0x001a, "ath2413", "Atheros 2413 Griffin", AR5K_AR5212),
83 PCI_ROM(0x168c, 0x001b, "ath5413", "Atheros 5413 Eagle", AR5K_AR5212),
84 PCI_ROM(0x168c, 0x001c, "ath5212e", "Atheros 5212 PCI-E", AR5K_AR5212),
85 PCI_ROM(0x168c, 0x001d, "ath2417", "Atheros 2417 Nala", AR5K_AR5212),
86 };
87
88 #define ATH5K_SPMBL_NO 1
89 #define ATH5K_SPMBL_YES 2
90 #define ATH5K_SPMBL_BOTH 3
91
92 static const struct {
93 u16 bitrate;
94 u8 short_pmbl;
95 u8 hw_code;
96 } ath5k_rates[] = {
97 { 10, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_1M },
98 { 20, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_2M },
99 { 55, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_5_5M },
100 { 110, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_11M },
101 { 60, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_6M },
102 { 90, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_9M },
103 { 120, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_12M },
104 { 180, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_18M },
105 { 240, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_24M },
106 { 360, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_36M },
107 { 480, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_48M },
108 { 540, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_54M },
109 { 20, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE },
110 { 55, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE },
111 { 110, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE },
112 { 0, 0, 0 },
113 };
114
115 #define ATH5K_NR_RATES 15
116
117 /*
118 * Prototypes - PCI stack related functions
119 */
120 static int ath5k_probe(struct pci_device *pdev);
121 static void ath5k_remove(struct pci_device *pdev);
122
123 struct pci_driver ath5k_pci_driver __pci_driver = {
124 .ids = ath5k_nics,
125 .id_count = sizeof(ath5k_nics) / sizeof(ath5k_nics[0]),
126 .probe = ath5k_probe,
127 .remove = ath5k_remove,
128 };
129
130
131
132 /*
133 * Prototypes - MAC 802.11 stack related functions
134 */
135 static int ath5k_tx(struct net80211_device *dev, struct io_buffer *skb);
136 static int ath5k_reset(struct ath5k_softc *sc, struct net80211_channel *chan);
137 static int ath5k_reset_wake(struct ath5k_softc *sc);
138 static int ath5k_start(struct net80211_device *dev);
139 static void ath5k_stop(struct net80211_device *dev);
140 static int ath5k_config(struct net80211_device *dev, int changed);
141 static void ath5k_poll(struct net80211_device *dev);
142 static void ath5k_irq(struct net80211_device *dev, int enable);
143
144 static struct net80211_device_operations ath5k_ops = {
145 .open = ath5k_start,
146 .close = ath5k_stop,
147 .transmit = ath5k_tx,
148 .poll = ath5k_poll,
149 .irq = ath5k_irq,
150 .config = ath5k_config,
151 };
152
153 /*
154 * Prototypes - Internal functions
155 */
156 /* Attach detach */
157 static int ath5k_attach(struct net80211_device *dev);
158 static void ath5k_detach(struct net80211_device *dev);
159 /* Channel/mode setup */
160 static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
161 struct net80211_channel *channels,
162 unsigned int mode,
163 unsigned int max);
164 static int ath5k_setup_bands(struct net80211_device *dev);
165 static int ath5k_chan_set(struct ath5k_softc *sc,
166 struct net80211_channel *chan);
167 static void ath5k_setcurmode(struct ath5k_softc *sc,
168 unsigned int mode);
169 static void ath5k_mode_setup(struct ath5k_softc *sc);
170
171 /* Descriptor setup */
172 static int ath5k_desc_alloc(struct ath5k_softc *sc);
173 static void ath5k_desc_free(struct ath5k_softc *sc);
174 /* Buffers setup */
175 static int ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf);
176 static int ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf);
177
ath5k_txbuf_free(struct ath5k_softc * sc,struct ath5k_buf * bf)178 static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
179 struct ath5k_buf *bf)
180 {
181 if (!bf->iob)
182 return;
183
184 net80211_tx_complete(sc->dev, bf->iob, 0, ECANCELED);
185 bf->iob = NULL;
186 }
187
ath5k_rxbuf_free(struct ath5k_softc * sc __unused,struct ath5k_buf * bf)188 static inline void ath5k_rxbuf_free(struct ath5k_softc *sc __unused,
189 struct ath5k_buf *bf)
190 {
191 free_iob(bf->iob);
192 bf->iob = NULL;
193 }
194
195 /* Queues setup */
196 static int ath5k_txq_setup(struct ath5k_softc *sc,
197 int qtype, int subtype);
198 static void ath5k_txq_drainq(struct ath5k_softc *sc,
199 struct ath5k_txq *txq);
200 static void ath5k_txq_cleanup(struct ath5k_softc *sc);
201 static void ath5k_txq_release(struct ath5k_softc *sc);
202 /* Rx handling */
203 static int ath5k_rx_start(struct ath5k_softc *sc);
204 static void ath5k_rx_stop(struct ath5k_softc *sc);
205 /* Tx handling */
206 static void ath5k_tx_processq(struct ath5k_softc *sc,
207 struct ath5k_txq *txq);
208
209 /* Interrupt handling */
210 static int ath5k_init(struct ath5k_softc *sc);
211 static int ath5k_stop_hw(struct ath5k_softc *sc);
212
213 static void ath5k_calibrate(struct ath5k_softc *sc);
214
215 /* Filter */
216 static void ath5k_configure_filter(struct ath5k_softc *sc);
217
218 /********************\
219 * PCI Initialization *
220 \********************/
221
222 #if DBGLVL_MAX
223 static const char *
ath5k_chip_name(enum ath5k_srev_type type,u16 val)224 ath5k_chip_name(enum ath5k_srev_type type, u16 val)
225 {
226 const char *name = "xxxxx";
227 unsigned int i;
228
229 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
230 if (srev_names[i].sr_type != type)
231 continue;
232
233 if ((val & 0xf0) == srev_names[i].sr_val)
234 name = srev_names[i].sr_name;
235
236 if ((val & 0xff) == srev_names[i].sr_val) {
237 name = srev_names[i].sr_name;
238 break;
239 }
240 }
241
242 return name;
243 }
244 #endif
245
ath5k_probe(struct pci_device * pdev)246 static int ath5k_probe(struct pci_device *pdev)
247 {
248 void *mem;
249 struct ath5k_softc *sc;
250 struct net80211_device *dev;
251 int ret;
252 u8 csz;
253
254 adjust_pci_device(pdev);
255
256 /*
257 * Cache line size is used to size and align various
258 * structures used to communicate with the hardware.
259 */
260 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
261 if (csz == 0) {
262 /*
263 * We must have this setup properly for rx buffer
264 * DMA to work so force a reasonable value here if it
265 * comes up zero.
266 */
267 csz = 16;
268 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
269 }
270 /*
271 * The default setting of latency timer yields poor results,
272 * set it to the value used by other systems. It may be worth
273 * tweaking this setting more.
274 */
275 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
276
277 /*
278 * Disable the RETRY_TIMEOUT register (0x41) to keep
279 * PCI Tx retries from interfering with C3 CPU state.
280 */
281 pci_write_config_byte(pdev, 0x41, 0);
282
283 mem = ioremap(pdev->membase, 0x10000);
284 if (!mem) {
285 DBG("ath5k: cannot remap PCI memory region\n");
286 ret = -EIO;
287 goto err;
288 }
289
290 /*
291 * Allocate dev (net80211 main struct)
292 * and dev->priv (driver private data)
293 */
294 dev = net80211_alloc(sizeof(*sc));
295 if (!dev) {
296 DBG("ath5k: cannot allocate 802.11 device\n");
297 ret = -ENOMEM;
298 goto err_map;
299 }
300
301 /* Initialize driver private data */
302 sc = dev->priv;
303 sc->dev = dev;
304 sc->pdev = pdev;
305
306 sc->hwinfo = zalloc(sizeof(*sc->hwinfo));
307 if (!sc->hwinfo) {
308 DBG("ath5k: cannot allocate 802.11 hardware info structure\n");
309 ret = -ENOMEM;
310 goto err_free;
311 }
312
313 sc->hwinfo->flags = NET80211_HW_RX_HAS_FCS;
314 sc->hwinfo->signal_type = NET80211_SIGNAL_DB;
315 sc->hwinfo->signal_max = 40; /* 35dB should give perfect 54Mbps */
316 sc->hwinfo->channel_change_time = 5000;
317
318 /* Avoid working with the device until setup is complete */
319 sc->status |= ATH_STAT_INVALID;
320
321 sc->iobase = mem;
322 sc->cachelsz = csz * 4; /* convert to bytes */
323
324 DBG("ath5k: register base at %p (%08lx)\n", sc->iobase, pdev->membase);
325 DBG("ath5k: cache line size %d\n", sc->cachelsz);
326
327 /* Set private data */
328 pci_set_drvdata(pdev, dev);
329 dev->netdev->dev = (struct device *)pdev;
330
331 /* Initialize device */
332 ret = ath5k_hw_attach(sc, pdev->id->driver_data, &sc->ah);
333 if (ret)
334 goto err_free_hwinfo;
335
336 /* Finish private driver data initialization */
337 ret = ath5k_attach(dev);
338 if (ret)
339 goto err_ah;
340
341 #if DBGLVL_MAX
342 DBG("Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
343 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
344 sc->ah->ah_mac_srev, sc->ah->ah_phy_revision);
345
346 if (!sc->ah->ah_single_chip) {
347 /* Single chip radio (!RF5111) */
348 if (sc->ah->ah_radio_5ghz_revision &&
349 !sc->ah->ah_radio_2ghz_revision) {
350 /* No 5GHz support -> report 2GHz radio */
351 if (!(sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11A)) {
352 DBG("RF%s 2GHz radio found (0x%x)\n",
353 ath5k_chip_name(AR5K_VERSION_RAD,
354 sc->ah->ah_radio_5ghz_revision),
355 sc->ah->ah_radio_5ghz_revision);
356 /* No 2GHz support (5110 and some
357 * 5Ghz only cards) -> report 5Ghz radio */
358 } else if (!(sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11B)) {
359 DBG("RF%s 5GHz radio found (0x%x)\n",
360 ath5k_chip_name(AR5K_VERSION_RAD,
361 sc->ah->ah_radio_5ghz_revision),
362 sc->ah->ah_radio_5ghz_revision);
363 /* Multiband radio */
364 } else {
365 DBG("RF%s multiband radio found (0x%x)\n",
366 ath5k_chip_name(AR5K_VERSION_RAD,
367 sc->ah->ah_radio_5ghz_revision),
368 sc->ah->ah_radio_5ghz_revision);
369 }
370 }
371 /* Multi chip radio (RF5111 - RF2111) ->
372 * report both 2GHz/5GHz radios */
373 else if (sc->ah->ah_radio_5ghz_revision &&
374 sc->ah->ah_radio_2ghz_revision) {
375 DBG("RF%s 5GHz radio found (0x%x)\n",
376 ath5k_chip_name(AR5K_VERSION_RAD,
377 sc->ah->ah_radio_5ghz_revision),
378 sc->ah->ah_radio_5ghz_revision);
379 DBG("RF%s 2GHz radio found (0x%x)\n",
380 ath5k_chip_name(AR5K_VERSION_RAD,
381 sc->ah->ah_radio_2ghz_revision),
382 sc->ah->ah_radio_2ghz_revision);
383 }
384 }
385 #endif
386
387 /* Ready to go */
388 sc->status &= ~ATH_STAT_INVALID;
389
390 return 0;
391 err_ah:
392 ath5k_hw_detach(sc->ah);
393 err_free_hwinfo:
394 free(sc->hwinfo);
395 err_free:
396 net80211_free(dev);
397 err_map:
398 iounmap(mem);
399 err:
400 return ret;
401 }
402
ath5k_remove(struct pci_device * pdev)403 static void ath5k_remove(struct pci_device *pdev)
404 {
405 struct net80211_device *dev = pci_get_drvdata(pdev);
406 struct ath5k_softc *sc = dev->priv;
407
408 ath5k_detach(dev);
409 ath5k_hw_detach(sc->ah);
410 iounmap(sc->iobase);
411 free(sc->hwinfo);
412 net80211_free(dev);
413 }
414
415
416 /***********************\
417 * Driver Initialization *
418 \***********************/
419
420 static int
ath5k_attach(struct net80211_device * dev)421 ath5k_attach(struct net80211_device *dev)
422 {
423 struct ath5k_softc *sc = dev->priv;
424 struct ath5k_hw *ah = sc->ah;
425 int ret;
426
427 /*
428 * Collect the channel list. The 802.11 layer
429 * is resposible for filtering this list based
430 * on settings like the phy mode and regulatory
431 * domain restrictions.
432 */
433 ret = ath5k_setup_bands(dev);
434 if (ret) {
435 DBG("ath5k: can't get channels\n");
436 goto err;
437 }
438
439 /* NB: setup here so ath5k_rate_update is happy */
440 if (ah->ah_modes & AR5K_MODE_BIT_11A)
441 ath5k_setcurmode(sc, AR5K_MODE_11A);
442 else
443 ath5k_setcurmode(sc, AR5K_MODE_11B);
444
445 /*
446 * Allocate tx+rx descriptors and populate the lists.
447 */
448 ret = ath5k_desc_alloc(sc);
449 if (ret) {
450 DBG("ath5k: can't allocate descriptors\n");
451 goto err;
452 }
453
454 /*
455 * Allocate hardware transmit queues. Note that hw functions
456 * handle reseting these queues at the needed time.
457 */
458 ret = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
459 if (ret) {
460 DBG("ath5k: can't setup xmit queue\n");
461 goto err_desc;
462 }
463
464 sc->last_calib_ticks = currticks();
465
466 ret = ath5k_eeprom_read_mac(ah, sc->hwinfo->hwaddr);
467 if (ret) {
468 DBG("ath5k: unable to read address from EEPROM: 0x%04x\n",
469 sc->pdev->device);
470 goto err_queues;
471 }
472
473 memset(sc->bssidmask, 0xff, ETH_ALEN);
474 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
475
476 ret = net80211_register(sc->dev, &ath5k_ops, sc->hwinfo);
477 if (ret) {
478 DBG("ath5k: can't register ieee80211 hw\n");
479 goto err_queues;
480 }
481
482 return 0;
483 err_queues:
484 ath5k_txq_release(sc);
485 err_desc:
486 ath5k_desc_free(sc);
487 err:
488 return ret;
489 }
490
491 static void
ath5k_detach(struct net80211_device * dev)492 ath5k_detach(struct net80211_device *dev)
493 {
494 struct ath5k_softc *sc = dev->priv;
495
496 net80211_unregister(dev);
497 ath5k_desc_free(sc);
498 ath5k_txq_release(sc);
499 }
500
501
502
503
504 /********************\
505 * Channel/mode setup *
506 \********************/
507
508 /*
509 * Convert IEEE channel number to MHz frequency.
510 */
511 static inline short
ath5k_ieee2mhz(short chan)512 ath5k_ieee2mhz(short chan)
513 {
514 if (chan < 14)
515 return 2407 + 5 * chan;
516 if (chan == 14)
517 return 2484;
518 if (chan < 27)
519 return 2212 + 20 * chan;
520 return 5000 + 5 * chan;
521 }
522
523 static unsigned int
ath5k_copy_channels(struct ath5k_hw * ah,struct net80211_channel * channels,unsigned int mode,unsigned int max)524 ath5k_copy_channels(struct ath5k_hw *ah,
525 struct net80211_channel *channels,
526 unsigned int mode, unsigned int max)
527 {
528 unsigned int i, count, size, chfreq, freq, ch;
529
530 if (!(ah->ah_modes & (1 << mode)))
531 return 0;
532
533 switch (mode) {
534 case AR5K_MODE_11A:
535 case AR5K_MODE_11A_TURBO:
536 /* 1..220, but 2GHz frequencies are filtered by check_channel */
537 size = 220;
538 chfreq = CHANNEL_5GHZ;
539 break;
540 case AR5K_MODE_11B:
541 case AR5K_MODE_11G:
542 case AR5K_MODE_11G_TURBO:
543 size = 26;
544 chfreq = CHANNEL_2GHZ;
545 break;
546 default:
547 return 0;
548 }
549
550 for (i = 0, count = 0; i < size && max > 0; i++) {
551 ch = i + 1 ;
552 freq = ath5k_ieee2mhz(ch);
553
554 /* Check if channel is supported by the chipset */
555 if (!ath5k_channel_ok(ah, freq, chfreq))
556 continue;
557
558 /* Write channel info and increment counter */
559 channels[count].center_freq = freq;
560 channels[count].maxpower = 0; /* use regulatory */
561 channels[count].band = (chfreq == CHANNEL_2GHZ) ?
562 NET80211_BAND_2GHZ : NET80211_BAND_5GHZ;
563 switch (mode) {
564 case AR5K_MODE_11A:
565 case AR5K_MODE_11G:
566 channels[count].hw_value = chfreq | CHANNEL_OFDM;
567 break;
568 case AR5K_MODE_11A_TURBO:
569 case AR5K_MODE_11G_TURBO:
570 channels[count].hw_value = chfreq |
571 CHANNEL_OFDM | CHANNEL_TURBO;
572 break;
573 case AR5K_MODE_11B:
574 channels[count].hw_value = CHANNEL_B;
575 }
576
577 count++;
578 max--;
579 }
580
581 return count;
582 }
583
584 static int
ath5k_setup_bands(struct net80211_device * dev)585 ath5k_setup_bands(struct net80211_device *dev)
586 {
587 struct ath5k_softc *sc = dev->priv;
588 struct ath5k_hw *ah = sc->ah;
589 int max_c, count_c = 0;
590 int i;
591 int band;
592
593 max_c = sizeof(sc->hwinfo->channels) / sizeof(sc->hwinfo->channels[0]);
594
595 /* 2GHz band */
596 if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11G) {
597 /* G mode */
598 band = NET80211_BAND_2GHZ;
599 sc->hwinfo->bands = NET80211_BAND_BIT_2GHZ;
600 sc->hwinfo->modes = (NET80211_MODE_G | NET80211_MODE_B);
601
602 for (i = 0; i < 12; i++)
603 sc->hwinfo->rates[band][i] = ath5k_rates[i].bitrate;
604 sc->hwinfo->nr_rates[band] = 12;
605
606 sc->hwinfo->nr_channels =
607 ath5k_copy_channels(ah, sc->hwinfo->channels,
608 AR5K_MODE_11G, max_c);
609 count_c = sc->hwinfo->nr_channels;
610 max_c -= count_c;
611 } else if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11B) {
612 /* B mode */
613 band = NET80211_BAND_2GHZ;
614 sc->hwinfo->bands = NET80211_BAND_BIT_2GHZ;
615 sc->hwinfo->modes = NET80211_MODE_B;
616
617 for (i = 0; i < 4; i++)
618 sc->hwinfo->rates[band][i] = ath5k_rates[i].bitrate;
619 sc->hwinfo->nr_rates[band] = 4;
620
621 sc->hwinfo->nr_channels =
622 ath5k_copy_channels(ah, sc->hwinfo->channels,
623 AR5K_MODE_11B, max_c);
624 count_c = sc->hwinfo->nr_channels;
625 max_c -= count_c;
626 }
627
628 /* 5GHz band, A mode */
629 if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11A) {
630 band = NET80211_BAND_5GHZ;
631 sc->hwinfo->bands |= NET80211_BAND_BIT_5GHZ;
632 sc->hwinfo->modes |= NET80211_MODE_A;
633
634 for (i = 0; i < 8; i++)
635 sc->hwinfo->rates[band][i] = ath5k_rates[i+4].bitrate;
636 sc->hwinfo->nr_rates[band] = 8;
637
638 sc->hwinfo->nr_channels =
639 ath5k_copy_channels(ah, sc->hwinfo->channels,
640 AR5K_MODE_11B, max_c);
641 count_c = sc->hwinfo->nr_channels;
642 max_c -= count_c;
643 }
644
645 return 0;
646 }
647
648 /*
649 * Set/change channels. If the channel is really being changed,
650 * it's done by reseting the chip. To accomplish this we must
651 * first cleanup any pending DMA, then restart stuff after a la
652 * ath5k_init.
653 */
654 static int
ath5k_chan_set(struct ath5k_softc * sc,struct net80211_channel * chan)655 ath5k_chan_set(struct ath5k_softc *sc, struct net80211_channel *chan)
656 {
657 if (chan->center_freq != sc->curchan->center_freq ||
658 chan->hw_value != sc->curchan->hw_value) {
659 /*
660 * To switch channels clear any pending DMA operations;
661 * wait long enough for the RX fifo to drain, reset the
662 * hardware at the new frequency, and then re-enable
663 * the relevant bits of the h/w.
664 */
665 DBG2("ath5k: resetting for channel change (%d -> %d MHz)\n",
666 sc->curchan->center_freq, chan->center_freq);
667 return ath5k_reset(sc, chan);
668 }
669
670 return 0;
671 }
672
673 static void
ath5k_setcurmode(struct ath5k_softc * sc,unsigned int mode)674 ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
675 {
676 sc->curmode = mode;
677
678 if (mode == AR5K_MODE_11A) {
679 sc->curband = NET80211_BAND_5GHZ;
680 } else {
681 sc->curband = NET80211_BAND_2GHZ;
682 }
683 }
684
685 static void
ath5k_mode_setup(struct ath5k_softc * sc)686 ath5k_mode_setup(struct ath5k_softc *sc)
687 {
688 struct ath5k_hw *ah = sc->ah;
689 u32 rfilt;
690
691 /* configure rx filter */
692 rfilt = sc->filter_flags;
693 ath5k_hw_set_rx_filter(ah, rfilt);
694
695 if (ath5k_hw_hasbssidmask(ah))
696 ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
697
698 /* configure operational mode */
699 ath5k_hw_set_opmode(ah);
700
701 ath5k_hw_set_mcast_filter(ah, 0, 0);
702 }
703
704 static inline int
ath5k_hw_rix_to_bitrate(int hw_rix)705 ath5k_hw_rix_to_bitrate(int hw_rix)
706 {
707 int i;
708
709 for (i = 0; i < ATH5K_NR_RATES; i++) {
710 if (ath5k_rates[i].hw_code == hw_rix)
711 return ath5k_rates[i].bitrate;
712 }
713
714 DBG("ath5k: invalid rix %02x\n", hw_rix);
715 return 10; /* use lowest rate */
716 }
717
ath5k_bitrate_to_hw_rix(int bitrate)718 int ath5k_bitrate_to_hw_rix(int bitrate)
719 {
720 int i;
721
722 for (i = 0; i < ATH5K_NR_RATES; i++) {
723 if (ath5k_rates[i].bitrate == bitrate)
724 return ath5k_rates[i].hw_code;
725 }
726
727 DBG("ath5k: invalid bitrate %d\n", bitrate);
728 return ATH5K_RATE_CODE_1M; /* use lowest rate */
729 }
730
731 /***************\
732 * Buffers setup *
733 \***************/
734
735 static struct io_buffer *
ath5k_rx_iob_alloc(struct ath5k_softc * sc,u32 * iob_addr)736 ath5k_rx_iob_alloc(struct ath5k_softc *sc, u32 *iob_addr)
737 {
738 struct io_buffer *iob;
739 unsigned int off;
740
741 /*
742 * Allocate buffer with headroom_needed space for the
743 * fake physical layer header at the start.
744 */
745 iob = alloc_iob(sc->rxbufsize + sc->cachelsz - 1);
746
747 if (!iob) {
748 DBG("ath5k: can't alloc iobuf of size %d\n",
749 sc->rxbufsize + sc->cachelsz - 1);
750 return NULL;
751 }
752
753 *iob_addr = virt_to_bus(iob->data);
754
755 /*
756 * Cache-line-align. This is important (for the
757 * 5210 at least) as not doing so causes bogus data
758 * in rx'd frames.
759 */
760 off = *iob_addr % sc->cachelsz;
761 if (off != 0) {
762 iob_reserve(iob, sc->cachelsz - off);
763 *iob_addr += sc->cachelsz - off;
764 }
765
766 return iob;
767 }
768
769 static int
ath5k_rxbuf_setup(struct ath5k_softc * sc,struct ath5k_buf * bf)770 ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
771 {
772 struct ath5k_hw *ah = sc->ah;
773 struct io_buffer *iob = bf->iob;
774 struct ath5k_desc *ds;
775
776 if (!iob) {
777 iob = ath5k_rx_iob_alloc(sc, &bf->iobaddr);
778 if (!iob)
779 return -ENOMEM;
780 bf->iob = iob;
781 }
782
783 /*
784 * Setup descriptors. For receive we always terminate
785 * the descriptor list with a self-linked entry so we'll
786 * not get overrun under high load (as can happen with a
787 * 5212 when ANI processing enables PHY error frames).
788 *
789 * To insure the last descriptor is self-linked we create
790 * each descriptor as self-linked and add it to the end. As
791 * each additional descriptor is added the previous self-linked
792 * entry is ``fixed'' naturally. This should be safe even
793 * if DMA is happening. When processing RX interrupts we
794 * never remove/process the last, self-linked, entry on the
795 * descriptor list. This insures the hardware always has
796 * someplace to write a new frame.
797 */
798 ds = bf->desc;
799 ds->ds_link = bf->daddr; /* link to self */
800 ds->ds_data = bf->iobaddr;
801 if (ah->ah_setup_rx_desc(ah, ds,
802 iob_tailroom(iob), /* buffer size */
803 0) != 0) {
804 DBG("ath5k: error setting up RX descriptor for %zd bytes\n", iob_tailroom(iob));
805 return -EINVAL;
806 }
807
808 if (sc->rxlink != NULL)
809 *sc->rxlink = bf->daddr;
810 sc->rxlink = &ds->ds_link;
811 return 0;
812 }
813
814 static int
ath5k_txbuf_setup(struct ath5k_softc * sc,struct ath5k_buf * bf)815 ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
816 {
817 struct ath5k_hw *ah = sc->ah;
818 struct ath5k_txq *txq = &sc->txq;
819 struct ath5k_desc *ds = bf->desc;
820 struct io_buffer *iob = bf->iob;
821 unsigned int pktlen, flags;
822 int ret;
823 u16 duration = 0;
824 u16 cts_rate = 0;
825
826 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
827 bf->iobaddr = virt_to_bus(iob->data);
828 pktlen = iob_len(iob);
829
830 /* FIXME: If we are in g mode and rate is a CCK rate
831 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
832 * from tx power (value is in dB units already) */
833 if (sc->dev->phy_flags & NET80211_PHY_USE_PROTECTION) {
834 struct net80211_device *dev = sc->dev;
835
836 flags |= AR5K_TXDESC_CTSENA;
837 cts_rate = sc->hw_rtscts_rate;
838 duration = net80211_cts_duration(dev, pktlen);
839 }
840 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
841 IEEE80211_TYP_FRAME_HEADER_LEN,
842 AR5K_PKT_TYPE_NORMAL, sc->power_level * 2,
843 sc->hw_rate, ATH5K_RETRIES,
844 AR5K_TXKEYIX_INVALID, 0, flags,
845 cts_rate, duration);
846 if (ret)
847 return ret;
848
849 ds->ds_link = 0;
850 ds->ds_data = bf->iobaddr;
851
852 list_add_tail(&bf->list, &txq->q);
853 if (txq->link == NULL) /* is this first packet? */
854 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
855 else /* no, so only link it */
856 *txq->link = bf->daddr;
857
858 txq->link = &ds->ds_link;
859 ath5k_hw_start_tx_dma(ah, txq->qnum);
860 mb();
861
862 return 0;
863 }
864
865 /*******************\
866 * Descriptors setup *
867 \*******************/
868
869 static int
ath5k_desc_alloc(struct ath5k_softc * sc)870 ath5k_desc_alloc(struct ath5k_softc *sc)
871 {
872 struct ath5k_desc *ds;
873 struct ath5k_buf *bf;
874 u32 da;
875 unsigned int i;
876 int ret;
877
878 /* allocate descriptors */
879 sc->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + 1);
880 sc->desc = malloc_dma(sc->desc_len, ATH5K_DESC_ALIGN);
881 if (sc->desc == NULL) {
882 DBG("ath5k: can't allocate descriptors\n");
883 ret = -ENOMEM;
884 goto err;
885 }
886 memset(sc->desc, 0, sc->desc_len);
887 sc->desc_daddr = virt_to_bus(sc->desc);
888
889 ds = sc->desc;
890 da = sc->desc_daddr;
891
892 bf = calloc(ATH_TXBUF + ATH_RXBUF + 1, sizeof(struct ath5k_buf));
893 if (bf == NULL) {
894 DBG("ath5k: can't allocate buffer pointers\n");
895 ret = -ENOMEM;
896 goto err_free;
897 }
898 sc->bufptr = bf;
899
900 INIT_LIST_HEAD(&sc->rxbuf);
901 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
902 bf->desc = ds;
903 bf->daddr = da;
904 list_add_tail(&bf->list, &sc->rxbuf);
905 }
906
907 INIT_LIST_HEAD(&sc->txbuf);
908 sc->txbuf_len = ATH_TXBUF;
909 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
910 bf->desc = ds;
911 bf->daddr = da;
912 list_add_tail(&bf->list, &sc->txbuf);
913 }
914
915 return 0;
916
917 err_free:
918 free_dma(sc->desc, sc->desc_len);
919 err:
920 sc->desc = NULL;
921 return ret;
922 }
923
924 static void
ath5k_desc_free(struct ath5k_softc * sc)925 ath5k_desc_free(struct ath5k_softc *sc)
926 {
927 struct ath5k_buf *bf;
928
929 list_for_each_entry(bf, &sc->txbuf, list)
930 ath5k_txbuf_free(sc, bf);
931 list_for_each_entry(bf, &sc->rxbuf, list)
932 ath5k_rxbuf_free(sc, bf);
933
934 /* Free memory associated with all descriptors */
935 free_dma(sc->desc, sc->desc_len);
936
937 free(sc->bufptr);
938 sc->bufptr = NULL;
939 }
940
941
942
943
944
945 /**************\
946 * Queues setup *
947 \**************/
948
949 static int
ath5k_txq_setup(struct ath5k_softc * sc,int qtype,int subtype)950 ath5k_txq_setup(struct ath5k_softc *sc, int qtype, int subtype)
951 {
952 struct ath5k_hw *ah = sc->ah;
953 struct ath5k_txq *txq;
954 struct ath5k_txq_info qi = {
955 .tqi_subtype = subtype,
956 .tqi_aifs = AR5K_TXQ_USEDEFAULT,
957 .tqi_cw_min = AR5K_TXQ_USEDEFAULT,
958 .tqi_cw_max = AR5K_TXQ_USEDEFAULT
959 };
960 int qnum;
961
962 /*
963 * Enable interrupts only for EOL and DESC conditions.
964 * We mark tx descriptors to receive a DESC interrupt
965 * when a tx queue gets deep; otherwise waiting for the
966 * EOL to reap descriptors. Note that this is done to
967 * reduce interrupt load and this only defers reaping
968 * descriptors, never transmitting frames. Aside from
969 * reducing interrupts this also permits more concurrency.
970 * The only potential downside is if the tx queue backs
971 * up in which case the top half of the kernel may backup
972 * due to a lack of tx descriptors.
973 */
974 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
975 AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
976 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
977 if (qnum < 0) {
978 DBG("ath5k: can't set up a TX queue\n");
979 return -EIO;
980 }
981
982 txq = &sc->txq;
983 if (!txq->setup) {
984 txq->qnum = qnum;
985 txq->link = NULL;
986 INIT_LIST_HEAD(&txq->q);
987 txq->setup = 1;
988 }
989 return 0;
990 }
991
992 static void
ath5k_txq_drainq(struct ath5k_softc * sc,struct ath5k_txq * txq)993 ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
994 {
995 struct ath5k_buf *bf, *bf0;
996
997 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
998 ath5k_txbuf_free(sc, bf);
999
1000 list_del(&bf->list);
1001 list_add_tail(&bf->list, &sc->txbuf);
1002 sc->txbuf_len++;
1003 }
1004 txq->link = NULL;
1005 }
1006
1007 /*
1008 * Drain the transmit queues and reclaim resources.
1009 */
1010 static void
ath5k_txq_cleanup(struct ath5k_softc * sc)1011 ath5k_txq_cleanup(struct ath5k_softc *sc)
1012 {
1013 struct ath5k_hw *ah = sc->ah;
1014
1015 if (!(sc->status & ATH_STAT_INVALID)) {
1016 /* don't touch the hardware if marked invalid */
1017 if (sc->txq.setup) {
1018 ath5k_hw_stop_tx_dma(ah, sc->txq.qnum);
1019 DBG("ath5k: txq [%d] %x, link %p\n",
1020 sc->txq.qnum,
1021 ath5k_hw_get_txdp(ah, sc->txq.qnum),
1022 sc->txq.link);
1023 }
1024 }
1025
1026 if (sc->txq.setup)
1027 ath5k_txq_drainq(sc, &sc->txq);
1028 }
1029
1030 static void
ath5k_txq_release(struct ath5k_softc * sc)1031 ath5k_txq_release(struct ath5k_softc *sc)
1032 {
1033 if (sc->txq.setup) {
1034 ath5k_hw_release_tx_queue(sc->ah);
1035 sc->txq.setup = 0;
1036 }
1037 }
1038
1039
1040
1041
1042 /*************\
1043 * RX Handling *
1044 \*************/
1045
1046 /*
1047 * Enable the receive h/w following a reset.
1048 */
1049 static int
ath5k_rx_start(struct ath5k_softc * sc)1050 ath5k_rx_start(struct ath5k_softc *sc)
1051 {
1052 struct ath5k_hw *ah = sc->ah;
1053 struct ath5k_buf *bf;
1054 int ret;
1055
1056 sc->rxbufsize = IEEE80211_MAX_LEN;
1057 if (sc->rxbufsize % sc->cachelsz != 0)
1058 sc->rxbufsize += sc->cachelsz - (sc->rxbufsize % sc->cachelsz);
1059
1060 sc->rxlink = NULL;
1061
1062 list_for_each_entry(bf, &sc->rxbuf, list) {
1063 ret = ath5k_rxbuf_setup(sc, bf);
1064 if (ret != 0)
1065 return ret;
1066 }
1067
1068 bf = list_entry(sc->rxbuf.next, struct ath5k_buf, list);
1069
1070 ath5k_hw_set_rxdp(ah, bf->daddr);
1071 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1072 ath5k_mode_setup(sc); /* set filters, etc. */
1073 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1074
1075 return 0;
1076 }
1077
1078 /*
1079 * Disable the receive h/w in preparation for a reset.
1080 */
1081 static void
ath5k_rx_stop(struct ath5k_softc * sc)1082 ath5k_rx_stop(struct ath5k_softc *sc)
1083 {
1084 struct ath5k_hw *ah = sc->ah;
1085
1086 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1087 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1088 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
1089
1090 sc->rxlink = NULL; /* just in case */
1091 }
1092
1093 static void
ath5k_handle_rx(struct ath5k_softc * sc)1094 ath5k_handle_rx(struct ath5k_softc *sc)
1095 {
1096 struct ath5k_rx_status rs;
1097 struct io_buffer *iob, *next_iob;
1098 u32 next_iob_addr;
1099 struct ath5k_buf *bf, *bf_last;
1100 struct ath5k_desc *ds;
1101 int ret;
1102
1103 memset(&rs, 0, sizeof(rs));
1104
1105 if (list_empty(&sc->rxbuf)) {
1106 DBG("ath5k: empty rx buf pool\n");
1107 return;
1108 }
1109
1110 bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list);
1111
1112 do {
1113 bf = list_entry(sc->rxbuf.next, struct ath5k_buf, list);
1114 assert(bf->iob != NULL);
1115 iob = bf->iob;
1116 ds = bf->desc;
1117
1118 /*
1119 * last buffer must not be freed to ensure proper hardware
1120 * function. When the hardware finishes also a packet next to
1121 * it, we are sure, it doesn't use it anymore and we can go on.
1122 */
1123 if (bf_last == bf)
1124 bf->flags |= 1;
1125 if (bf->flags) {
1126 struct ath5k_buf *bf_next = list_entry(bf->list.next,
1127 struct ath5k_buf, list);
1128 ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc,
1129 &rs);
1130 if (ret)
1131 break;
1132 bf->flags &= ~1;
1133 /* skip the overwritten one (even status is martian) */
1134 goto next;
1135 }
1136
1137 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
1138 if (ret) {
1139 if (ret != -EINPROGRESS) {
1140 DBG("ath5k: error in processing rx desc: %s\n",
1141 strerror(ret));
1142 net80211_rx_err(sc->dev, NULL, -ret);
1143 } else {
1144 /* normal return, reached end of
1145 available descriptors */
1146 }
1147 return;
1148 }
1149
1150 if (rs.rs_more) {
1151 DBG("ath5k: unsupported fragmented rx\n");
1152 goto next;
1153 }
1154
1155 if (rs.rs_status) {
1156 if (rs.rs_status & AR5K_RXERR_PHY) {
1157 /* These are uncommon, and may indicate a real problem. */
1158 net80211_rx_err(sc->dev, NULL, EIO);
1159 goto next;
1160 }
1161 if (rs.rs_status & AR5K_RXERR_CRC) {
1162 /* These occur *all the time*. */
1163 goto next;
1164 }
1165 if (rs.rs_status & AR5K_RXERR_DECRYPT) {
1166 /*
1167 * Decrypt error. If the error occurred
1168 * because there was no hardware key, then
1169 * let the frame through so the upper layers
1170 * can process it. This is necessary for 5210
1171 * parts which have no way to setup a ``clear''
1172 * key cache entry.
1173 *
1174 * XXX do key cache faulting
1175 */
1176 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
1177 !(rs.rs_status & AR5K_RXERR_CRC))
1178 goto accept;
1179 }
1180
1181 /* any other error, unhandled */
1182 DBG("ath5k: packet rx status %x\n", rs.rs_status);
1183 goto next;
1184 }
1185 accept:
1186 next_iob = ath5k_rx_iob_alloc(sc, &next_iob_addr);
1187
1188 /*
1189 * If we can't replace bf->iob with a new iob under memory
1190 * pressure, just skip this packet
1191 */
1192 if (!next_iob) {
1193 DBG("ath5k: dropping packet under memory pressure\n");
1194 goto next;
1195 }
1196
1197 iob_put(iob, rs.rs_datalen);
1198
1199 /* The MAC header is padded to have 32-bit boundary if the
1200 * packet payload is non-zero. However, iPXE only
1201 * supports standard 802.11 packets with 24-byte
1202 * header, so no padding correction should be needed.
1203 */
1204
1205 DBG2("ath5k: rx %d bytes, signal %d\n", rs.rs_datalen,
1206 rs.rs_rssi);
1207
1208 net80211_rx(sc->dev, iob, rs.rs_rssi,
1209 ath5k_hw_rix_to_bitrate(rs.rs_rate));
1210
1211 bf->iob = next_iob;
1212 bf->iobaddr = next_iob_addr;
1213 next:
1214 list_del(&bf->list);
1215 list_add_tail(&bf->list, &sc->rxbuf);
1216 } while (ath5k_rxbuf_setup(sc, bf) == 0);
1217 }
1218
1219
1220
1221
1222 /*************\
1223 * TX Handling *
1224 \*************/
1225
1226 static void
ath5k_tx_processq(struct ath5k_softc * sc,struct ath5k_txq * txq)1227 ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1228 {
1229 struct ath5k_tx_status ts;
1230 struct ath5k_buf *bf, *bf0;
1231 struct ath5k_desc *ds;
1232 struct io_buffer *iob;
1233 int ret;
1234
1235 memset(&ts, 0, sizeof(ts));
1236
1237 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1238 ds = bf->desc;
1239
1240 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1241 if (ret) {
1242 if (ret != -EINPROGRESS) {
1243 DBG("ath5k: error in processing tx desc: %s\n",
1244 strerror(ret));
1245 } else {
1246 /* normal return, reached end of tx completions */
1247 }
1248 break;
1249 }
1250
1251 iob = bf->iob;
1252 bf->iob = NULL;
1253
1254 DBG2("ath5k: tx %zd bytes complete, %d retries\n",
1255 iob_len(iob), ts.ts_retry[0]);
1256
1257 net80211_tx_complete(sc->dev, iob, ts.ts_retry[0],
1258 ts.ts_status ? EIO : 0);
1259
1260 list_del(&bf->list);
1261 list_add_tail(&bf->list, &sc->txbuf);
1262 sc->txbuf_len++;
1263 }
1264
1265 if (list_empty(&txq->q))
1266 txq->link = NULL;
1267 }
1268
1269 static void
ath5k_handle_tx(struct ath5k_softc * sc)1270 ath5k_handle_tx(struct ath5k_softc *sc)
1271 {
1272 ath5k_tx_processq(sc, &sc->txq);
1273 }
1274
1275
1276 /********************\
1277 * Interrupt handling *
1278 \********************/
1279
1280 static void
ath5k_irq(struct net80211_device * dev,int enable)1281 ath5k_irq(struct net80211_device *dev, int enable)
1282 {
1283 struct ath5k_softc *sc = dev->priv;
1284 struct ath5k_hw *ah = sc->ah;
1285
1286 sc->irq_ena = enable;
1287 ah->ah_ier = enable ? AR5K_IER_ENABLE : AR5K_IER_DISABLE;
1288
1289 ath5k_hw_reg_write(ah, ah->ah_ier, AR5K_IER);
1290 ath5k_hw_set_imr(ah, sc->imask);
1291 }
1292
1293 static int
ath5k_init(struct ath5k_softc * sc)1294 ath5k_init(struct ath5k_softc *sc)
1295 {
1296 struct ath5k_hw *ah = sc->ah;
1297 int ret, i;
1298
1299 /*
1300 * Stop anything previously setup. This is safe
1301 * no matter this is the first time through or not.
1302 */
1303 ath5k_stop_hw(sc);
1304
1305 /*
1306 * The basic interface to setting the hardware in a good
1307 * state is ``reset''. On return the hardware is known to
1308 * be powered up and with interrupts disabled. This must
1309 * be followed by initialization of the appropriate bits
1310 * and then setup of the interrupt mask.
1311 */
1312 sc->curchan = sc->dev->channels + sc->dev->channel;
1313 sc->curband = sc->curchan->band;
1314 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
1315 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
1316 AR5K_INT_FATAL | AR5K_INT_GLOBAL;
1317 ret = ath5k_reset(sc, NULL);
1318 if (ret)
1319 goto done;
1320
1321 ath5k_rfkill_hw_start(ah);
1322
1323 /*
1324 * Reset the key cache since some parts do not reset the
1325 * contents on initial power up or resume from suspend.
1326 */
1327 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
1328 ath5k_hw_reset_key(ah, i);
1329
1330 /* Set ack to be sent at low bit-rates */
1331 ath5k_hw_set_ack_bitrate_high(ah, 0);
1332
1333 ret = 0;
1334 done:
1335 mb();
1336 return ret;
1337 }
1338
1339 static int
ath5k_stop_hw(struct ath5k_softc * sc)1340 ath5k_stop_hw(struct ath5k_softc *sc)
1341 {
1342 struct ath5k_hw *ah = sc->ah;
1343
1344 /*
1345 * Shutdown the hardware and driver:
1346 * stop output from above
1347 * disable interrupts
1348 * turn off timers
1349 * turn off the radio
1350 * clear transmit machinery
1351 * clear receive machinery
1352 * drain and release tx queues
1353 * reclaim beacon resources
1354 * power down hardware
1355 *
1356 * Note that some of this work is not possible if the
1357 * hardware is gone (invalid).
1358 */
1359
1360 if (!(sc->status & ATH_STAT_INVALID)) {
1361 ath5k_hw_set_imr(ah, 0);
1362 }
1363 ath5k_txq_cleanup(sc);
1364 if (!(sc->status & ATH_STAT_INVALID)) {
1365 ath5k_rx_stop(sc);
1366 ath5k_hw_phy_disable(ah);
1367 } else
1368 sc->rxlink = NULL;
1369
1370 ath5k_rfkill_hw_stop(sc->ah);
1371
1372 return 0;
1373 }
1374
1375 static void
ath5k_poll(struct net80211_device * dev)1376 ath5k_poll(struct net80211_device *dev)
1377 {
1378 struct ath5k_softc *sc = dev->priv;
1379 struct ath5k_hw *ah = sc->ah;
1380 enum ath5k_int status;
1381 unsigned int counter = 1000;
1382
1383 if (currticks() - sc->last_calib_ticks >
1384 ATH5K_CALIB_INTERVAL * TICKS_PER_SEC) {
1385 ath5k_calibrate(sc);
1386 sc->last_calib_ticks = currticks();
1387 }
1388
1389 if ((sc->status & ATH_STAT_INVALID) ||
1390 (sc->irq_ena && !ath5k_hw_is_intr_pending(ah)))
1391 return;
1392
1393 do {
1394 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
1395 DBGP("ath5k: status %#x/%#x\n", status, sc->imask);
1396 if (status & AR5K_INT_FATAL) {
1397 /*
1398 * Fatal errors are unrecoverable.
1399 * Typically these are caused by DMA errors.
1400 */
1401 DBG("ath5k: fatal error, resetting\n");
1402 ath5k_reset_wake(sc);
1403 } else if (status & AR5K_INT_RXORN) {
1404 DBG("ath5k: rx overrun, resetting\n");
1405 ath5k_reset_wake(sc);
1406 } else {
1407 if (status & AR5K_INT_RXEOL) {
1408 /*
1409 * NB: the hardware should re-read the link when
1410 * RXE bit is written, but it doesn't work at
1411 * least on older hardware revs.
1412 */
1413 DBG("ath5k: rx EOL\n");
1414 sc->rxlink = NULL;
1415 }
1416 if (status & AR5K_INT_TXURN) {
1417 /* bump tx trigger level */
1418 DBG("ath5k: tx underrun\n");
1419 ath5k_hw_update_tx_triglevel(ah, 1);
1420 }
1421 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
1422 ath5k_handle_rx(sc);
1423 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
1424 | AR5K_INT_TXERR | AR5K_INT_TXEOL))
1425 ath5k_handle_tx(sc);
1426 }
1427 } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0);
1428
1429 if (!counter)
1430 DBG("ath5k: too many interrupts, giving up for now\n");
1431 }
1432
1433 /*
1434 * Periodically recalibrate the PHY to account
1435 * for temperature/environment changes.
1436 */
1437 static void
ath5k_calibrate(struct ath5k_softc * sc)1438 ath5k_calibrate(struct ath5k_softc *sc)
1439 {
1440 struct ath5k_hw *ah = sc->ah;
1441
1442 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
1443 /*
1444 * Rfgain is out of bounds, reset the chip
1445 * to load new gain values.
1446 */
1447 DBG("ath5k: resetting for calibration\n");
1448 ath5k_reset_wake(sc);
1449 }
1450 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
1451 DBG("ath5k: calibration of channel %d failed\n",
1452 sc->curchan->channel_nr);
1453 }
1454
1455
1456 /********************\
1457 * Net80211 functions *
1458 \********************/
1459
1460 static int
ath5k_tx(struct net80211_device * dev,struct io_buffer * iob)1461 ath5k_tx(struct net80211_device *dev, struct io_buffer *iob)
1462 {
1463 struct ath5k_softc *sc = dev->priv;
1464 struct ath5k_buf *bf;
1465 int rc;
1466
1467 /*
1468 * The hardware expects the header padded to 4 byte boundaries.
1469 * iPXE only ever sends 24-byte headers, so no action necessary.
1470 */
1471
1472 if (list_empty(&sc->txbuf)) {
1473 DBG("ath5k: dropping packet because no tx bufs available\n");
1474 return -ENOBUFS;
1475 }
1476
1477 bf = list_entry(sc->txbuf.next, struct ath5k_buf, list);
1478 list_del(&bf->list);
1479 sc->txbuf_len--;
1480
1481 bf->iob = iob;
1482
1483 if ((rc = ath5k_txbuf_setup(sc, bf)) != 0) {
1484 bf->iob = NULL;
1485 list_add_tail(&bf->list, &sc->txbuf);
1486 sc->txbuf_len++;
1487 return rc;
1488 }
1489 return 0;
1490 }
1491
1492 /*
1493 * Reset the hardware. If chan is not NULL, then also pause rx/tx
1494 * and change to the given channel.
1495 */
1496 static int
ath5k_reset(struct ath5k_softc * sc,struct net80211_channel * chan)1497 ath5k_reset(struct ath5k_softc *sc, struct net80211_channel *chan)
1498 {
1499 struct ath5k_hw *ah = sc->ah;
1500 int ret;
1501
1502 if (chan) {
1503 ath5k_hw_set_imr(ah, 0);
1504 ath5k_txq_cleanup(sc);
1505 ath5k_rx_stop(sc);
1506
1507 sc->curchan = chan;
1508 sc->curband = chan->band;
1509 }
1510
1511 ret = ath5k_hw_reset(ah, sc->curchan, 1);
1512 if (ret) {
1513 DBG("ath5k: can't reset hardware: %s\n", strerror(ret));
1514 return ret;
1515 }
1516
1517 ret = ath5k_rx_start(sc);
1518 if (ret) {
1519 DBG("ath5k: can't start rx logic: %s\n", strerror(ret));
1520 return ret;
1521 }
1522
1523 /*
1524 * Change channels and update the h/w rate map if we're switching;
1525 * e.g. 11a to 11b/g.
1526 *
1527 * We may be doing a reset in response to an ioctl that changes the
1528 * channel so update any state that might change as a result.
1529 *
1530 * XXX needed?
1531 */
1532 /* ath5k_chan_change(sc, c); */
1533
1534 /* Reenable interrupts if necessary */
1535 ath5k_irq(sc->dev, sc->irq_ena);
1536
1537 return 0;
1538 }
1539
ath5k_reset_wake(struct ath5k_softc * sc)1540 static int ath5k_reset_wake(struct ath5k_softc *sc)
1541 {
1542 return ath5k_reset(sc, sc->curchan);
1543 }
1544
ath5k_start(struct net80211_device * dev)1545 static int ath5k_start(struct net80211_device *dev)
1546 {
1547 struct ath5k_softc *sc = dev->priv;
1548 int ret;
1549
1550 if ((ret = ath5k_init(sc)) != 0)
1551 return ret;
1552
1553 sc->assoc = 0;
1554 ath5k_configure_filter(sc);
1555 ath5k_hw_set_lladdr(sc->ah, dev->netdev->ll_addr);
1556
1557 return 0;
1558 }
1559
ath5k_stop(struct net80211_device * dev)1560 static void ath5k_stop(struct net80211_device *dev)
1561 {
1562 struct ath5k_softc *sc = dev->priv;
1563 u8 mac[ETH_ALEN] = {};
1564
1565 ath5k_hw_set_lladdr(sc->ah, mac);
1566
1567 ath5k_stop_hw(sc);
1568 }
1569
1570 static int
ath5k_config(struct net80211_device * dev,int changed)1571 ath5k_config(struct net80211_device *dev, int changed)
1572 {
1573 struct ath5k_softc *sc = dev->priv;
1574 struct ath5k_hw *ah = sc->ah;
1575 struct net80211_channel *chan = &dev->channels[dev->channel];
1576 int ret;
1577
1578 if (changed & NET80211_CFG_CHANNEL) {
1579 sc->power_level = chan->maxpower;
1580 if ((ret = ath5k_chan_set(sc, chan)) != 0)
1581 return ret;
1582 }
1583
1584 if ((changed & NET80211_CFG_RATE) ||
1585 (changed & NET80211_CFG_PHY_PARAMS)) {
1586 int spmbl = ATH5K_SPMBL_NO;
1587 u16 rate = dev->rates[dev->rate];
1588 u16 slowrate = dev->rates[dev->rtscts_rate];
1589 int i;
1590
1591 if (dev->phy_flags & NET80211_PHY_USE_SHORT_PREAMBLE)
1592 spmbl = ATH5K_SPMBL_YES;
1593
1594 for (i = 0; i < ATH5K_NR_RATES; i++) {
1595 if (ath5k_rates[i].bitrate == rate &&
1596 (ath5k_rates[i].short_pmbl & spmbl))
1597 sc->hw_rate = ath5k_rates[i].hw_code;
1598
1599 if (ath5k_rates[i].bitrate == slowrate &&
1600 (ath5k_rates[i].short_pmbl & spmbl))
1601 sc->hw_rtscts_rate = ath5k_rates[i].hw_code;
1602 }
1603 }
1604
1605 if (changed & NET80211_CFG_ASSOC) {
1606 sc->assoc = !!(dev->state & NET80211_ASSOCIATED);
1607 if (sc->assoc) {
1608 memcpy(ah->ah_bssid, dev->bssid, ETH_ALEN);
1609 } else {
1610 memset(ah->ah_bssid, 0xff, ETH_ALEN);
1611 }
1612 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
1613 }
1614
1615 return 0;
1616 }
1617
1618 /*
1619 * o always accept unicast, broadcast, and multicast traffic
1620 * o multicast traffic for all BSSIDs will be enabled if mac80211
1621 * says it should be
1622 * o maintain current state of phy ofdm or phy cck error reception.
1623 * If the hardware detects any of these type of errors then
1624 * ath5k_hw_get_rx_filter() will pass to us the respective
1625 * hardware filters to be able to receive these type of frames.
1626 * o probe request frames are accepted only when operating in
1627 * hostap, adhoc, or monitor modes
1628 * o enable promiscuous mode according to the interface state
1629 * o accept beacons:
1630 * - when operating in adhoc mode so the 802.11 layer creates
1631 * node table entries for peers,
1632 * - when operating in station mode for collecting rssi data when
1633 * the station is otherwise quiet, or
1634 * - when scanning
1635 */
ath5k_configure_filter(struct ath5k_softc * sc)1636 static void ath5k_configure_filter(struct ath5k_softc *sc)
1637 {
1638 struct ath5k_hw *ah = sc->ah;
1639 u32 mfilt[2], rfilt;
1640
1641 /* Enable all multicast */
1642 mfilt[0] = ~0;
1643 mfilt[1] = ~0;
1644
1645 /* Enable data frames and beacons */
1646 rfilt = (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
1647 AR5K_RX_FILTER_MCAST | AR5K_RX_FILTER_BEACON);
1648
1649 /* Set filters */
1650 ath5k_hw_set_rx_filter(ah, rfilt);
1651
1652 /* Set multicast bits */
1653 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
1654
1655 /* Set the cached hw filter flags, this will alter actually
1656 * be set in HW */
1657 sc->filter_flags = rfilt;
1658 }
1659