1 /* $OpenBSD: if_rtwn.c,v 1.6 2015/08/28 00:03:53 deraadt Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org>
6 * Copyright (c) 2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 #include "opt_wlan.h"
23
24 #include <sys/param.h>
25 #include <sys/lock.h>
26 #include <sys/mutex.h>
27 #include <sys/mbuf.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/queue.h>
33 #include <sys/taskqueue.h>
34 #include <sys/bus.h>
35 #include <sys/endian.h>
36
37 #include <machine/bus.h>
38 #include <machine/resource.h>
39 #include <sys/rman.h>
40
41 #include <net/if.h>
42 #include <net/ethernet.h>
43 #include <net/if_media.h>
44
45 #include <net80211/ieee80211_var.h>
46
47 #include <dev/rtwn/if_rtwnreg.h>
48 #include <dev/rtwn/if_rtwnvar.h>
49 #include <dev/rtwn/if_rtwn_debug.h>
50 #include <dev/rtwn/if_rtwn_rx.h>
51 #include <dev/rtwn/if_rtwn_task.h>
52 #include <dev/rtwn/if_rtwn_tx.h>
53
54 #include <dev/rtwn/pci/rtwn_pci_var.h>
55 #include <dev/rtwn/pci/rtwn_pci_rx.h>
56
57 void
rtwn_pci_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)58 rtwn_pci_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
59 int error)
60 {
61
62 if (error != 0)
63 return;
64 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
65 *(bus_addr_t *)arg = segs[0].ds_addr;
66 }
67
68 void
rtwn_pci_setup_rx_desc(struct rtwn_pci_softc * pc,struct rtwn_rx_stat_pci * desc,bus_addr_t addr,size_t len,int idx)69 rtwn_pci_setup_rx_desc(struct rtwn_pci_softc *pc,
70 struct rtwn_rx_stat_pci *desc, bus_addr_t addr, size_t len, int idx)
71 {
72
73 memset(desc, 0, sizeof(*desc));
74 desc->rxdw0 = htole32(SM(RTWN_RXDW0_PKTLEN, len) |
75 ((idx == RTWN_PCI_RX_LIST_COUNT - 1) ? RTWN_RXDW0_EOR : 0));
76 desc->rxbufaddr = htole32(addr);
77 bus_space_barrier(pc->pc_st, pc->pc_sh, 0, pc->pc_mapsize,
78 BUS_SPACE_BARRIER_WRITE);
79 desc->rxdw0 |= htole32(RTWN_RXDW0_OWN);
80 }
81
82 static void
rtwn_pci_rx_frame(struct rtwn_pci_softc * pc)83 rtwn_pci_rx_frame(struct rtwn_pci_softc *pc)
84 {
85 struct rtwn_softc *sc = &pc->pc_sc;
86 struct rtwn_rx_ring *ring = &pc->rx_ring;
87 struct rtwn_rx_stat_pci *rx_desc = &ring->desc[ring->cur];
88 struct rtwn_rx_data *rx_data = &ring->rx_data[ring->cur];
89 struct ieee80211com *ic = &sc->sc_ic;
90 struct ieee80211_node *ni;
91 uint32_t rxdw0;
92 struct mbuf *m, *m1;
93 int infosz, pktlen, shift, error;
94
95 /* Dump Rx descriptor. */
96 RTWN_DPRINTF(sc, RTWN_DEBUG_RECV_DESC,
97 "%s: dw: 0 %08X, 1 %08X, 2 %08X, 3 %08X, 4 %08X, tsfl %08X, "
98 "addr: %08X (64: %08X)\n",
99 __func__, le32toh(rx_desc->rxdw0), le32toh(rx_desc->rxdw1),
100 le32toh(rx_desc->rxdw2), le32toh(rx_desc->rxdw3),
101 le32toh(rx_desc->rxdw4), le32toh(rx_desc->tsf_low),
102 le32toh(rx_desc->rxbufaddr), le32toh(rx_desc->rxbufaddr64));
103
104 rxdw0 = le32toh(rx_desc->rxdw0);
105 if (__predict_false(rxdw0 & (RTWN_RXDW0_CRCERR | RTWN_RXDW0_ICVERR))) {
106 /*
107 * This should not happen since we setup our Rx filter
108 * to not receive these frames.
109 */
110 RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
111 "%s: RX flags error (%s)\n", __func__,
112 rxdw0 & RTWN_RXDW0_CRCERR ? "CRC" : "ICV");
113 goto fail;
114 }
115
116 pktlen = MS(rxdw0, RTWN_RXDW0_PKTLEN);
117 if (__predict_false(pktlen < sizeof(struct ieee80211_frame_ack) ||
118 pktlen > MJUMPAGESIZE)) {
119 RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
120 "%s: frame is too short/long: %d\n", __func__, pktlen);
121 goto fail;
122 }
123
124 infosz = MS(rxdw0, RTWN_RXDW0_INFOSZ) * 8;
125 shift = MS(rxdw0, RTWN_RXDW0_SHIFT);
126
127 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
128 if (__predict_false(m1 == NULL)) {
129 device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n",
130 __func__);
131 goto fail;
132 }
133 bus_dmamap_sync(ring->data_dmat, rx_data->map, BUS_DMASYNC_POSTREAD);
134 bus_dmamap_unload(ring->data_dmat, rx_data->map);
135
136 error = bus_dmamap_load(ring->data_dmat, rx_data->map, mtod(m1, void *),
137 MJUMPAGESIZE, rtwn_pci_dma_map_addr, &rx_data->paddr, 0);
138 if (error != 0) {
139 m_freem(m1);
140
141 error = bus_dmamap_load(ring->data_dmat, rx_data->map,
142 mtod(rx_data->m, void *), MJUMPAGESIZE,
143 rtwn_pci_dma_map_addr, &rx_data->paddr, BUS_DMA_NOWAIT);
144 if (error != 0)
145 panic("%s: could not load old RX mbuf",
146 device_get_name(sc->sc_dev));
147
148 goto fail;
149 }
150
151 /* Finalize mbuf. */
152 m = rx_data->m;
153 rx_data->m = m1;
154 m->m_pkthdr.len = m->m_len = pktlen + infosz + shift;
155
156 ni = rtwn_rx_common(sc, m, rx_desc);
157
158 RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
159 "%s: Rx frame len %d, infosz %d, shift %d\n",
160 __func__, pktlen, infosz, shift);
161
162 /* Send the frame to the 802.11 layer. */
163 RTWN_UNLOCK(sc);
164 if (ni != NULL) {
165 (void)ieee80211_input_mimo(ni, m);
166 /* Node is no longer needed. */
167 ieee80211_free_node(ni);
168 } else
169 (void)ieee80211_input_mimo_all(ic, m);
170
171 RTWN_LOCK(sc);
172
173 return;
174
175 fail:
176 counter_u64_add(ic->ic_ierrors, 1);
177 }
178
179 static int
rtwn_pci_rx_buf_copy(struct rtwn_pci_softc * pc)180 rtwn_pci_rx_buf_copy(struct rtwn_pci_softc *pc)
181 {
182 struct rtwn_rx_ring *ring = &pc->rx_ring;
183 struct rtwn_rx_stat_pci *rx_desc = &ring->desc[ring->cur];
184 struct rtwn_rx_data *rx_data = &ring->rx_data[ring->cur];
185 uint32_t rxdw0;
186 int desc_size, pktlen;
187
188 /*
189 * NB: tx_report() / c2h_report() expects to see USB Rx
190 * descriptor - same as for PCIe, but without rxbufaddr* fields.
191 */
192 desc_size = sizeof(struct rtwn_rx_stat_common);
193 KASSERT(sizeof(pc->pc_rx_buf) >= desc_size,
194 ("adjust size for PCIe Rx buffer!"));
195
196 memcpy(pc->pc_rx_buf, rx_desc, desc_size);
197
198 rxdw0 = le32toh(rx_desc->rxdw0);
199 pktlen = MS(rxdw0, RTWN_RXDW0_PKTLEN);
200
201 if (pktlen > sizeof(pc->pc_rx_buf) - desc_size)
202 {
203 /* Looks like an ordinary Rx frame. */
204 return (desc_size);
205 }
206
207 bus_dmamap_sync(ring->data_dmat, rx_data->map, BUS_DMASYNC_POSTREAD);
208 memcpy(pc->pc_rx_buf + desc_size, mtod(rx_data->m, void *), pktlen);
209
210 return (desc_size + pktlen);
211 }
212
213 static void
rtwn_pci_tx_report(struct rtwn_pci_softc * pc,int len)214 rtwn_pci_tx_report(struct rtwn_pci_softc *pc, int len)
215 {
216 struct rtwn_softc *sc = &pc->pc_sc;
217
218 if (sc->sc_ratectl != RTWN_RATECTL_NET80211) {
219 /* shouldn't happen */
220 device_printf(sc->sc_dev,
221 "%s called while ratectl = %d!\n",
222 __func__, sc->sc_ratectl);
223 return;
224 }
225
226 RTWN_NT_LOCK(sc);
227 rtwn_handle_tx_report(sc, pc->pc_rx_buf, len);
228 RTWN_NT_UNLOCK(sc);
229
230 #ifdef IEEE80211_SUPPORT_SUPERG
231 /*
232 * NB: this will executed only when 'report' bit is set.
233 */
234 if (sc->sc_tx_n_active > 0 && --sc->sc_tx_n_active <= 1)
235 rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
236 #endif
237 }
238
239 static void
rtwn_pci_c2h_report(struct rtwn_pci_softc * pc,int len)240 rtwn_pci_c2h_report(struct rtwn_pci_softc *pc, int len)
241 {
242 rtwn_handle_c2h_report(&pc->pc_sc, pc->pc_rx_buf, len);
243 }
244
245 static void
rtwn_pci_tx_done(struct rtwn_softc * sc,int qid)246 rtwn_pci_tx_done(struct rtwn_softc *sc, int qid)
247 {
248 struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
249 struct rtwn_tx_ring *ring = &pc->tx_ring[qid];
250 struct rtwn_tx_desc_common *desc;
251 struct rtwn_tx_data *data;
252
253 RTWN_DPRINTF(sc, RTWN_DEBUG_INTR, "%s: qid %d, last %d, cur %d\n",
254 __func__, qid, ring->last, ring->cur);
255
256 bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
257 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
258
259 while(ring->last != ring->cur) {
260 data = &ring->tx_data[ring->last];
261 desc = (struct rtwn_tx_desc_common *)
262 ((uint8_t *)ring->desc + sc->txdesc_len * ring->last);
263
264 KASSERT(data->m != NULL, ("no mbuf"));
265
266 if (desc->flags0 & RTWN_FLAGS0_OWN)
267 break;
268
269 /* Unmap and free mbuf. */
270 bus_dmamap_sync(ring->data_dmat, data->map,
271 BUS_DMASYNC_POSTWRITE);
272 bus_dmamap_unload(ring->data_dmat, data->map);
273
274 if (data->ni != NULL) { /* not a beacon frame */
275 ieee80211_tx_complete(data->ni, data->m, 0);
276
277 data->ni = NULL;
278 ring->queued--;
279 KASSERT(ring->queued >= 0,
280 ("ring->queued (qid %d) underflow!\n", qid));
281 } else
282 m_freem(data->m);
283
284 data->m = NULL;
285 ring->last = (ring->last + 1) % RTWN_PCI_TX_LIST_COUNT;
286 #ifndef D4054
287 if (ring->queued > 0)
288 sc->sc_tx_timer = 5;
289 else
290 sc->sc_tx_timer = 0;
291 #endif
292 }
293
294 if ((sc->qfullmsk & (1 << qid)) != 0 &&
295 ring->queued < (RTWN_PCI_TX_LIST_COUNT - 1)) {
296 sc->qfullmsk &= ~(1 << qid);
297 rtwn_start(sc);
298 }
299
300 #ifdef IEEE80211_SUPPORT_SUPERG
301 /*
302 * If the TX active queue drops below a certain
303 * threshold, ensure we age fast-frames out so they're
304 * transmitted.
305 */
306 if (sc->sc_ratectl != RTWN_RATECTL_NET80211 && ring->queued <= 1) {
307 /*
308 * XXX TODO: just make this a callout timer schedule
309 * so we can flush the FF staging queue if we're
310 * approaching idle.
311 */
312 rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
313 }
314 #endif
315 }
316
317 static void
rtwn_pci_rx_done(struct rtwn_softc * sc)318 rtwn_pci_rx_done(struct rtwn_softc *sc)
319 {
320 struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
321 struct rtwn_rx_ring *ring = &pc->rx_ring;
322 struct rtwn_rx_stat_pci *rx_desc;
323 struct rtwn_rx_data *rx_data;
324 int len;
325
326 bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTREAD);
327
328 for (;;) {
329 rx_desc = &ring->desc[ring->cur];
330 rx_data = &ring->rx_data[ring->cur];
331
332 if (le32toh(rx_desc->rxdw0) & RTWN_RXDW0_OWN)
333 break;
334
335 len = rtwn_pci_rx_buf_copy(pc);
336
337 switch (rtwn_classify_intr(sc, pc->pc_rx_buf, len)) {
338 case RTWN_RX_DATA:
339 rtwn_pci_rx_frame(pc);
340 break;
341 case RTWN_RX_TX_REPORT:
342 rtwn_pci_tx_report(pc, len);
343 break;
344 case RTWN_RX_OTHER:
345 rtwn_pci_c2h_report(pc, len);
346 break;
347 default:
348 /* NOTREACHED */
349 KASSERT(0, ("unknown Rx classification code"));
350 break;
351 }
352
353 /* Update / reset RX descriptor (and set OWN bit). */
354 rtwn_pci_setup_rx_desc(pc, rx_desc, rx_data->paddr,
355 MJUMPAGESIZE, ring->cur);
356
357 if (!(sc->sc_flags & RTWN_RUNNING))
358 return;
359
360 /* NB: device can reuse current descriptor. */
361 bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
362 BUS_DMASYNC_POSTREAD);
363
364 if (le32toh(rx_desc->rxdw0) & RTWN_RXDW0_OWN)
365 ring->cur = (ring->cur + 1) % RTWN_PCI_RX_LIST_COUNT;
366 }
367 }
368
369 void
rtwn_pci_intr(void * arg)370 rtwn_pci_intr(void *arg)
371 {
372 struct rtwn_softc *sc = arg;
373 struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
374 int i, status, tx_rings;
375
376 RTWN_LOCK(sc);
377 status = rtwn_pci_get_intr_status(pc, &tx_rings);
378 RTWN_DPRINTF(sc, RTWN_DEBUG_INTR, "%s: status %08X, tx_rings %08X\n",
379 __func__, status, tx_rings);
380 if (status == 0 && tx_rings == 0)
381 goto unlock;
382
383 if (status & (RTWN_PCI_INTR_RX | RTWN_PCI_INTR_TX_REPORT)) {
384 rtwn_pci_rx_done(sc);
385 if (!(sc->sc_flags & RTWN_RUNNING))
386 goto unlock;
387 }
388
389 if (tx_rings != 0)
390 for (i = 0; i < RTWN_PCI_NTXQUEUES; i++)
391 if (tx_rings & (1 << i))
392 rtwn_pci_tx_done(sc, i);
393
394 if (sc->sc_flags & RTWN_RUNNING)
395 rtwn_pci_enable_intr(pc);
396 unlock:
397 RTWN_UNLOCK(sc);
398 }
399