1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #include "opt_wlan.h"
30 
31 #ifdef	IEEE80211_SUPPORT_SUPERG
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/mbuf.h>
36 #include <sys/kernel.h>
37 #include <sys/endian.h>
38 
39 #include <sys/socket.h>
40 
41 #include <net/if.h>
42 #include <net/if_var.h>
43 #include <net/if_llc.h>
44 #include <net/if_media.h>
45 #include <net/bpf.h>
46 #include <net/ethernet.h>
47 
48 #include <netproto/802_11/ieee80211_var.h>
49 #include <netproto/802_11/ieee80211_input.h>
50 #include <netproto/802_11/ieee80211_phy.h>
51 #include <netproto/802_11/ieee80211_superg.h>
52 
53 /*
54  * Atheros fast-frame encapsulation format.
55  * FF max payload:
56  * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500:
57  *   8   +   4   +  4   +   14  +   8   + 1500 +  6   +   14  +   8   + 1500
58  * = 3066
59  */
60 /* fast frame header is 32-bits */
61 #define	ATH_FF_PROTO	0x0000003f	/* protocol */
62 #define	ATH_FF_PROTO_S	0
63 #define	ATH_FF_FTYPE	0x000000c0	/* frame type */
64 #define	ATH_FF_FTYPE_S	6
65 #define	ATH_FF_HLEN32	0x00000300	/* optional hdr length */
66 #define	ATH_FF_HLEN32_S	8
67 #define	ATH_FF_SEQNUM	0x001ffc00	/* sequence number */
68 #define	ATH_FF_SEQNUM_S	10
69 #define	ATH_FF_OFFSET	0xffe00000	/* offset to 2nd payload */
70 #define	ATH_FF_OFFSET_S	21
71 
72 #define	ATH_FF_MAX_HDR_PAD	4
73 #define	ATH_FF_MAX_SEP_PAD	6
74 #define	ATH_FF_MAX_HDR		30
75 
76 #define	ATH_FF_PROTO_L2TUNNEL	0	/* L2 tunnel protocol */
77 #define	ATH_FF_ETH_TYPE		0x88bd	/* Ether type for encapsulated frames */
78 #define	ATH_FF_SNAP_ORGCODE_0	0x00
79 #define	ATH_FF_SNAP_ORGCODE_1	0x03
80 #define	ATH_FF_SNAP_ORGCODE_2	0x7f
81 
82 #define	ATH_FF_TXQMIN	2		/* min txq depth for staging */
83 #define	ATH_FF_TXQMAX	50		/* maximum # of queued frames allowed */
84 #define	ATH_FF_STAGEMAX	5		/* max waiting period for staged frame*/
85 
86 #define	ETHER_HEADER_COPY(dst, src) \
87 	memcpy(dst, src, sizeof(struct ether_header))
88 
89 static	int ieee80211_ffppsmin = 2;	/* pps threshold for ff aggregation */
90 SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLFLAG_RW,
91 	&ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging");
92 static	int ieee80211_ffagemax = -1;	/* max time frames held on stage q */
93 SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW,
94 	&ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I",
95 	"max hold time for fast-frame staging (ms)");
96 
97 void
98 ieee80211_superg_attach(struct ieee80211com *ic)
99 {
100 	struct ieee80211_superg *sg;
101 
102 #if defined(__DragonFly__)
103 	sg = (struct ieee80211_superg *) kmalloc(
104 		sizeof(struct ieee80211_superg), M_80211_VAP,
105 		M_INTWAIT | M_ZERO);
106 #else
107 	sg = (struct ieee80211_superg *) IEEE80211_MALLOC(
108 		sizeof(struct ieee80211_superg), M_80211_VAP,
109 		IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
110 #endif
111 	if (sg == NULL) {
112 		kprintf("%s: cannot allocate SuperG state block\n",
113 		    __func__);
114 		return;
115 	}
116 	ic->ic_superg = sg;
117 
118 	/*
119 	 * Default to not being so aggressive for FF/AMSDU
120 	 * aging, otherwise we may hold a frame around
121 	 * for way too long before we expire it out.
122 	 */
123 	ieee80211_ffagemax = msecs_to_ticks(2);
124 }
125 
126 void
127 ieee80211_superg_detach(struct ieee80211com *ic)
128 {
129 	if (ic->ic_superg != NULL) {
130 		IEEE80211_FREE(ic->ic_superg, M_80211_VAP);
131 		ic->ic_superg = NULL;
132 	}
133 }
134 
135 void
136 ieee80211_superg_vattach(struct ieee80211vap *vap)
137 {
138 	struct ieee80211com *ic = vap->iv_ic;
139 
140 	if (ic->ic_superg == NULL)	/* NB: can't do fast-frames w/o state */
141 		vap->iv_caps &= ~IEEE80211_C_FF;
142 	if (vap->iv_caps & IEEE80211_C_FF)
143 		vap->iv_flags |= IEEE80211_F_FF;
144 	/* NB: we only implement sta mode */
145 	if (vap->iv_opmode == IEEE80211_M_STA &&
146 	    (vap->iv_caps & IEEE80211_C_TURBOP))
147 		vap->iv_flags |= IEEE80211_F_TURBOP;
148 }
149 
150 void
151 ieee80211_superg_vdetach(struct ieee80211vap *vap)
152 {
153 }
154 
155 #define	ATH_OUI_BYTES		0x00, 0x03, 0x7f
156 /*
157  * Add a WME information element to a frame.
158  */
159 uint8_t *
160 ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix)
161 {
162 	static const struct ieee80211_ath_ie info = {
163 		.ath_id		= IEEE80211_ELEMID_VENDOR,
164 		.ath_len	= sizeof(struct ieee80211_ath_ie) - 2,
165 		.ath_oui	= { ATH_OUI_BYTES },
166 		.ath_oui_type	= ATH_OUI_TYPE,
167 		.ath_oui_subtype= ATH_OUI_SUBTYPE,
168 		.ath_version	= ATH_OUI_VERSION,
169 	};
170 	struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm;
171 
172 	memcpy(frm, &info, sizeof(info));
173 	ath->ath_capability = caps;
174 	if (defkeyix != IEEE80211_KEYIX_NONE) {
175 		ath->ath_defkeyix[0] = (defkeyix & 0xff);
176 		ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff);
177 	} else {
178 		ath->ath_defkeyix[0] = 0xff;
179 		ath->ath_defkeyix[1] = 0x7f;
180 	}
181 	return frm + sizeof(info);
182 }
183 #undef ATH_OUI_BYTES
184 
185 uint8_t *
186 ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss)
187 {
188 	const struct ieee80211vap *vap = bss->ni_vap;
189 
190 	return ieee80211_add_ath(frm,
191 	    vap->iv_flags & IEEE80211_F_ATHEROS,
192 	    ((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
193 	    bss->ni_authmode != IEEE80211_AUTH_8021X) ?
194 	    vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
195 }
196 
197 void
198 ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie)
199 {
200 	const struct ieee80211_ath_ie *ath =
201 		(const struct ieee80211_ath_ie *) ie;
202 
203 	ni->ni_ath_flags = ath->ath_capability;
204 	ni->ni_ath_defkeyix = le16dec(&ath->ath_defkeyix);
205 }
206 
207 int
208 ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm,
209 	const struct ieee80211_frame *wh)
210 {
211 	struct ieee80211vap *vap = ni->ni_vap;
212 	const struct ieee80211_ath_ie *ath;
213 	u_int len = frm[1];
214 	int capschanged;
215 	uint16_t defkeyix;
216 
217 	if (len < sizeof(struct ieee80211_ath_ie)-2) {
218 		IEEE80211_DISCARD_IE(vap,
219 		    IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG,
220 		    wh, "Atheros", "too short, len %u", len);
221 		return -1;
222 	}
223 	ath = (const struct ieee80211_ath_ie *)frm;
224 	capschanged = (ni->ni_ath_flags != ath->ath_capability);
225 	defkeyix = le16dec(ath->ath_defkeyix);
226 	if (capschanged || defkeyix != ni->ni_ath_defkeyix) {
227 		ni->ni_ath_flags = ath->ath_capability;
228 		ni->ni_ath_defkeyix = defkeyix;
229 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
230 		    "ath ie change: new caps 0x%x defkeyix 0x%x",
231 		    ni->ni_ath_flags, ni->ni_ath_defkeyix);
232 	}
233 	if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) {
234 		uint16_t curflags, newflags;
235 
236 		/*
237 		 * Check for turbo mode switch.  Calculate flags
238 		 * for the new mode and effect the switch.
239 		 */
240 		newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags;
241 		/* NB: BOOST is not in ic_flags, so get it from the ie */
242 		if (ath->ath_capability & ATHEROS_CAP_BOOST)
243 			newflags |= IEEE80211_CHAN_TURBO;
244 		else
245 			newflags &= ~IEEE80211_CHAN_TURBO;
246 		if (newflags != curflags)
247 			ieee80211_dturbo_switch(vap, newflags);
248 	}
249 	return capschanged;
250 }
251 
252 /*
253  * Decap the encapsulated frame pair and dispatch the first
254  * for delivery.  The second frame is returned for delivery
255  * via the normal path.
256  */
257 struct mbuf *
258 ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m)
259 {
260 #define	FF_LLC_SIZE	(sizeof(struct ether_header) + sizeof(struct llc))
261 #define	MS(x,f)	(((x) & f) >> f##_S)
262 	struct ieee80211vap *vap = ni->ni_vap;
263 	struct llc *llc;
264 	uint32_t ath;
265 	struct mbuf *n;
266 	int framelen;
267 
268 	/* NB: we assume caller does this check for us */
269 	KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF),
270 	    ("ff not negotiated"));
271 	/*
272 	 * Check for fast-frame tunnel encapsulation.
273 	 */
274 	if (m->m_pkthdr.len < 3*FF_LLC_SIZE)
275 		return m;
276 	if (m->m_len < FF_LLC_SIZE &&
277 	    (m = m_pullup(m, FF_LLC_SIZE)) == NULL) {
278 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
279 		    ni->ni_macaddr, "fast-frame",
280 		    "%s", "m_pullup(llc) failed");
281 		vap->iv_stats.is_rx_tooshort++;
282 		return NULL;
283 	}
284 	llc = (struct llc *)(mtod(m, uint8_t *) +
285 	    sizeof(struct ether_header));
286 	if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE))
287 		return m;
288 	m_adj(m, FF_LLC_SIZE);
289 	m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath);
290 	if (MS(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) {
291 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
292 		    ni->ni_macaddr, "fast-frame",
293 		    "unsupport tunnel protocol, header 0x%x", ath);
294 		vap->iv_stats.is_ff_badhdr++;
295 		m_freem(m);
296 		return NULL;
297 	}
298 	/* NB: skip header and alignment padding */
299 	m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2);
300 
301 	vap->iv_stats.is_ff_decap++;
302 
303 	/*
304 	 * Decap the first frame, bust it apart from the
305 	 * second and deliver; then decap the second frame
306 	 * and return it to the caller for normal delivery.
307 	 */
308 	m = ieee80211_decap1(m, &framelen);
309 	if (m == NULL) {
310 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
311 		    ni->ni_macaddr, "fast-frame", "%s", "first decap failed");
312 		vap->iv_stats.is_ff_tooshort++;
313 		return NULL;
314 	}
315 	n = m_split(m, framelen, M_NOWAIT);
316 	if (n == NULL) {
317 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
318 		    ni->ni_macaddr, "fast-frame",
319 		    "%s", "unable to split encapsulated frames");
320 		vap->iv_stats.is_ff_split++;
321 		m_freem(m);			/* NB: must reclaim */
322 		return NULL;
323 	}
324 	/* XXX not right for WDS */
325 	vap->iv_deliver_data(vap, ni, m);	/* 1st of pair */
326 
327 	/*
328 	 * Decap second frame.
329 	 */
330 	m_adj(n, roundup2(framelen, 4) - framelen);	/* padding */
331 	n = ieee80211_decap1(n, &framelen);
332 	if (n == NULL) {
333 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
334 		    ni->ni_macaddr, "fast-frame", "%s", "second decap failed");
335 		vap->iv_stats.is_ff_tooshort++;
336 	}
337 	/* XXX verify framelen against mbuf contents */
338 	return n;				/* 2nd delivered by caller */
339 #undef MS
340 #undef FF_LLC_SIZE
341 }
342 
343 /*
344  * Fast frame encapsulation.  There must be two packets
345  * chained with m_nextpkt.  We do header adjustment for
346  * each, add the tunnel encapsulation, and then concatenate
347  * the mbuf chains to form a single frame for transmission.
348  */
349 struct mbuf *
350 ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
351 	struct ieee80211_key *key)
352 {
353 	struct mbuf *m2;
354 	struct ether_header eh1, eh2;
355 	struct llc *llc;
356 	struct mbuf *m;
357 	int pad;
358 
359 	m2 = m1->m_nextpkt;
360 	if (m2 == NULL) {
361 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
362 		    "%s: only one frame\n", __func__);
363 		goto bad;
364 	}
365 	m1->m_nextpkt = NULL;
366 
367 	/*
368 	 * Adjust to include 802.11 header requirement.
369 	 */
370 	KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
371 	ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
372 	m1 = ieee80211_mbuf_adjust(vap, hdrspace, key, m1);
373 	if (m1 == NULL) {
374 		kprintf("%s: failed initial mbuf_adjust\n", __func__);
375 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
376 		m_freem(m2);
377 		goto bad;
378 	}
379 
380 	/*
381 	 * Copy second frame's Ethernet header out of line
382 	 * and adjust for possible padding in case there isn't room
383 	 * at the end of first frame.
384 	 */
385 	KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
386 	ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
387 	m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2);
388 	if (m2 == NULL) {
389 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
390 		kprintf("%s: failed second \n", __func__);
391 		goto bad;
392 	}
393 
394 	/*
395 	 * Now do tunnel encapsulation.  First, each
396 	 * frame gets a standard encapsulation.
397 	 */
398 	m1 = ieee80211_ff_encap1(vap, m1, &eh1);
399 	if (m1 == NULL)
400 		goto bad;
401 	m2 = ieee80211_ff_encap1(vap, m2, &eh2);
402 	if (m2 == NULL)
403 		goto bad;
404 
405 	/*
406 	 * Pad leading frame to a 4-byte boundary.  If there
407 	 * is space at the end of the first frame, put it
408 	 * there; otherwise prepend to the front of the second
409 	 * frame.  We know doing the second will always work
410 	 * because we reserve space above.  We prefer appending
411 	 * as this typically has better DMA alignment properties.
412 	 */
413 	for (m = m1; m->m_next != NULL; m = m->m_next)
414 		;
415 	pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
416 	if (pad) {
417 		if (M_TRAILINGSPACE(m) < pad) {		/* prepend to second */
418 			m2->m_data -= pad;
419 			m2->m_len += pad;
420 			m2->m_pkthdr.len += pad;
421 		} else {				/* append to first */
422 			m->m_len += pad;
423 			m1->m_pkthdr.len += pad;
424 		}
425 	}
426 
427 	/*
428 	 * A-MSDU's are just appended; the "I'm A-MSDU!" bit is in the
429 	 * QoS header.
430 	 *
431 	 * XXX optimize by prepending together
432 	 */
433 	m->m_next = m2;			/* NB: last mbuf from above */
434 	m1->m_pkthdr.len += m2->m_pkthdr.len;
435 	M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT);
436 	if (m1 == NULL) {		/* XXX cannot happen */
437 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
438 		    "%s: no space for tunnel header\n", __func__);
439 		vap->iv_stats.is_tx_nobuf++;
440 		return NULL;
441 	}
442 	memset(mtod(m1, void *), 0, sizeof(uint32_t)+2);
443 
444 	M_PREPEND(m1, sizeof(struct llc), M_NOWAIT);
445 	if (m1 == NULL) {		/* XXX cannot happen */
446 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
447 		    "%s: no space for llc header\n", __func__);
448 		vap->iv_stats.is_tx_nobuf++;
449 		return NULL;
450 	}
451 	llc = mtod(m1, struct llc *);
452 	llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
453 	llc->llc_control = LLC_UI;
454 	llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0;
455 	llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1;
456 	llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2;
457 	llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE);
458 
459 	vap->iv_stats.is_ff_encap++;
460 
461 	return m1;
462 bad:
463 	vap->iv_stats.is_ff_encapfail++;
464 	if (m1 != NULL)
465 		m_freem(m1);
466 	if (m2 != NULL)
467 		m_freem(m2);
468 	return NULL;
469 }
470 
471 /*
472  * A-MSDU encapsulation.
473  *
474  * This assumes just two frames for now, since we're borrowing the
475  * same queuing code and infrastructure as fast-frames.
476  *
477  * There must be two packets chained with m_nextpkt.
478  * We do header adjustment for each, and then concatenate the mbuf chains
479  * to form a single frame for transmission.
480  */
481 struct mbuf *
482 ieee80211_amsdu_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
483 	struct ieee80211_key *key)
484 {
485 	struct mbuf *m2;
486 	struct ether_header eh1, eh2;
487 	struct mbuf *m;
488 	int pad;
489 
490 	m2 = m1->m_nextpkt;
491 	if (m2 == NULL) {
492 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
493 		    "%s: only one frame\n", __func__);
494 		goto bad;
495 	}
496 	m1->m_nextpkt = NULL;
497 
498 	/*
499 	 * Include A-MSDU header in adjusting header layout.
500 	 */
501 	KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
502 	ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
503 	m1 = ieee80211_mbuf_adjust(vap,
504 		hdrspace + sizeof(struct llc) + sizeof(uint32_t) +
505 		    sizeof(struct ether_header),
506 		key, m1);
507 	if (m1 == NULL) {
508 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
509 		m_freem(m2);
510 		goto bad;
511 	}
512 
513 	/*
514 	 * Copy second frame's Ethernet header out of line
515 	 * and adjust for encapsulation headers.  Note that
516 	 * we make room for padding in case there isn't room
517 	 * at the end of first frame.
518 	 */
519 	KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
520 	ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
521 	m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2);
522 	if (m2 == NULL) {
523 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
524 		goto bad;
525 	}
526 
527 	/*
528 	 * Now do tunnel encapsulation.  First, each
529 	 * frame gets a standard encapsulation.
530 	 */
531 	m1 = ieee80211_ff_encap1(vap, m1, &eh1);
532 	if (m1 == NULL)
533 		goto bad;
534 	m2 = ieee80211_ff_encap1(vap, m2, &eh2);
535 	if (m2 == NULL)
536 		goto bad;
537 
538 	/*
539 	 * Pad leading frame to a 4-byte boundary.  If there
540 	 * is space at the end of the first frame, put it
541 	 * there; otherwise prepend to the front of the second
542 	 * frame.  We know doing the second will always work
543 	 * because we reserve space above.  We prefer appending
544 	 * as this typically has better DMA alignment properties.
545 	 */
546 	for (m = m1; m->m_next != NULL; m = m->m_next)
547 		;
548 	pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
549 	if (pad) {
550 		if (M_TRAILINGSPACE(m) < pad) {		/* prepend to second */
551 			m2->m_data -= pad;
552 			m2->m_len += pad;
553 			m2->m_pkthdr.len += pad;
554 		} else {				/* append to first */
555 			m->m_len += pad;
556 			m1->m_pkthdr.len += pad;
557 		}
558 	}
559 
560 	/*
561 	 * Now, stick 'em together.
562 	 */
563 	m->m_next = m2;			/* NB: last mbuf from above */
564 	m1->m_pkthdr.len += m2->m_pkthdr.len;
565 
566 	vap->iv_stats.is_amsdu_encap++;
567 
568 	return m1;
569 bad:
570 	vap->iv_stats.is_amsdu_encapfail++;
571 	if (m1 != NULL)
572 		m_freem(m1);
573 	if (m2 != NULL)
574 		m_freem(m2);
575 	return NULL;
576 }
577 
578 
579 static void
580 ff_transmit(struct ieee80211_node *ni, struct mbuf *m)
581 {
582 	struct ieee80211vap *vap = ni->ni_vap;
583 	struct ieee80211com *ic = ni->ni_ic;
584 	int error;
585 
586 	IEEE80211_TX_LOCK_ASSERT(vap->iv_ic);
587 
588 	/* encap and xmit */
589 	m = ieee80211_encap(vap, ni, m);
590 	if (m != NULL) {
591 		struct ifnet *ifp = vap->iv_ifp;
592 
593 		error = ieee80211_parent_xmitpkt(ic, m);
594 		if (!error)
595 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
596 	} else
597 		ieee80211_free_node(ni);
598 }
599 
600 /*
601  * Flush frames to device; note we re-use the linked list
602  * the frames were stored on and use the sentinel (unchanged)
603  * which may be non-NULL.
604  */
605 static void
606 ff_flush(struct mbuf *head, struct mbuf *last)
607 {
608 	struct mbuf *m, *next;
609 	struct ieee80211_node *ni;
610 	struct ieee80211vap *vap;
611 
612 	for (m = head; m != last; m = next) {
613 		next = m->m_nextpkt;
614 		m->m_nextpkt = NULL;
615 
616 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
617 		vap = ni->ni_vap;
618 
619 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
620 		    "%s: flush frame, age %u", __func__, M_AGE_GET(m));
621 		vap->iv_stats.is_ff_flush++;
622 
623 		ff_transmit(ni, m);
624 	}
625 }
626 
627 /*
628  * Age frames on the staging queue.
629  *
630  * This is called without the comlock held, but it does all its work
631  * behind the comlock.  Because of this, it's possible that the
632  * staging queue will be serviced between the function which called
633  * it and now; thus simply checking that the queue has work in it
634  * may fail.
635  *
636  * See PR kern/174283 for more details.
637  */
638 void
639 ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq,
640     int quanta)
641 {
642 	struct mbuf *m, *head;
643 	struct ieee80211_node *ni;
644 
645 #if 0
646 	KASSERT(sq->head != NULL, ("stageq empty"));
647 #endif
648 
649 	IEEE80211_LOCK(ic);
650 	head = sq->head;
651 	while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) {
652 		int tid = WME_AC_TO_TID(M_WME_GETAC(m));
653 
654 		/* clear staging ref to frame */
655 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
656 		KASSERT(ni->ni_tx_superg[tid] == m, ("staging queue empty"));
657 		ni->ni_tx_superg[tid] = NULL;
658 
659 		sq->head = m->m_nextpkt;
660 		sq->depth--;
661 	}
662 	if (m == NULL)
663 		sq->tail = NULL;
664 	else
665 		M_AGE_SUB(m, quanta);
666 	IEEE80211_UNLOCK(ic);
667 
668 	IEEE80211_TX_LOCK(ic);
669 	ff_flush(head, m);
670 	IEEE80211_TX_UNLOCK(ic);
671 }
672 
673 static void
674 stageq_add(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *m)
675 {
676 	int age = ieee80211_ffagemax;
677 
678 	IEEE80211_LOCK_ASSERT(ic);
679 
680 	if (sq->tail != NULL) {
681 		sq->tail->m_nextpkt = m;
682 		age -= M_AGE_GET(sq->head);
683 	} else
684 		sq->head = m;
685 	KASSERT(age >= 0, ("age %d", age));
686 	M_AGE_SET(m, age);
687 	m->m_nextpkt = NULL;
688 	sq->tail = m;
689 	sq->depth++;
690 }
691 
692 static void
693 stageq_remove(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *mstaged)
694 {
695 	struct mbuf *m, *mprev;
696 
697 	IEEE80211_LOCK_ASSERT(ic);
698 
699 	mprev = NULL;
700 	for (m = sq->head; m != NULL; m = m->m_nextpkt) {
701 		if (m == mstaged) {
702 			if (mprev == NULL)
703 				sq->head = m->m_nextpkt;
704 			else
705 				mprev->m_nextpkt = m->m_nextpkt;
706 			if (sq->tail == m)
707 				sq->tail = mprev;
708 			sq->depth--;
709 			return;
710 		}
711 		mprev = m;
712 	}
713 	kprintf("%s: packet not found\n", __func__);
714 }
715 
716 static uint32_t
717 ff_approx_txtime(struct ieee80211_node *ni,
718 	const struct mbuf *m1, const struct mbuf *m2)
719 {
720 	struct ieee80211com *ic = ni->ni_ic;
721 	struct ieee80211vap *vap = ni->ni_vap;
722 	uint32_t framelen;
723 	uint32_t frame_time;
724 
725 	/*
726 	 * Approximate the frame length to be transmitted. A swag to add
727 	 * the following maximal values to the skb payload:
728 	 *   - 32: 802.11 encap + CRC
729 	 *   - 24: encryption overhead (if wep bit)
730 	 *   - 4 + 6: fast-frame header and padding
731 	 *   - 16: 2 LLC FF tunnel headers
732 	 *   - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd)
733 	 */
734 	framelen = m1->m_pkthdr.len + 32 +
735 	    ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR;
736 	if (vap->iv_flags & IEEE80211_F_PRIVACY)
737 		framelen += 24;
738 	if (m2 != NULL)
739 		framelen += m2->m_pkthdr.len;
740 
741 	/*
742 	 * For now, we assume non-shortgi, 20MHz, just because I want to
743 	 * at least test 802.11n.
744 	 */
745 	if (ni->ni_txrate & IEEE80211_RATE_MCS)
746 		frame_time = ieee80211_compute_duration_ht(framelen,
747 		    ni->ni_txrate,
748 		    IEEE80211_HT_RC_2_STREAMS(ni->ni_txrate),
749 		    0, /* isht40 */
750 		    0); /* isshortgi */
751 	else
752 		frame_time = ieee80211_compute_duration(ic->ic_rt, framelen,
753 			    ni->ni_txrate, 0);
754 	return (frame_time);
755 }
756 
757 /*
758  * Check if the supplied frame can be partnered with an existing
759  * or pending frame.  Return a reference to any frame that should be
760  * sent on return; otherwise return NULL.
761  */
762 struct mbuf *
763 ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
764 {
765 	struct ieee80211vap *vap = ni->ni_vap;
766 	struct ieee80211com *ic = ni->ni_ic;
767 	struct ieee80211_superg *sg = ic->ic_superg;
768 	const int pri = M_WME_GETAC(m);
769 	struct ieee80211_stageq *sq;
770 	struct ieee80211_tx_ampdu *tap;
771 	struct mbuf *mstaged;
772 	uint32_t txtime, limit;
773 
774 	IEEE80211_TX_UNLOCK_ASSERT(ic);
775 
776 	/*
777 	 * Check if the supplied frame can be aggregated.
778 	 *
779 	 * NB: we allow EAPOL frames to be aggregated with other ucast traffic.
780 	 *     Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
781 	 *     be aggregated with other types of frames when encryption is on?
782 	 */
783 	IEEE80211_LOCK(ic);
784 	tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)];
785 	mstaged = ni->ni_tx_superg[WME_AC_TO_TID(pri)];
786 	/* XXX NOTE: reusing packet counter state from A-MPDU */
787 	/*
788 	 * XXX NOTE: this means we're double-counting; it should just
789 	 * be done in ieee80211_output.c once for both superg and A-MPDU.
790 	 */
791 	ieee80211_txampdu_count_packet(tap);
792 
793 	/*
794 	 * When not in station mode never aggregate a multicast
795 	 * frame; this insures, for example, that a combined frame
796 	 * does not require multiple encryption keys.
797 	 */
798 	if (vap->iv_opmode != IEEE80211_M_STA &&
799 	    ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) {
800 		/* XXX flush staged frame? */
801 		IEEE80211_UNLOCK(ic);
802 		return m;
803 	}
804 	/*
805 	 * If there is no frame to combine with and the pps is
806 	 * too low; then do not attempt to aggregate this frame.
807 	 */
808 	if (mstaged == NULL &&
809 	    ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) {
810 		IEEE80211_UNLOCK(ic);
811 		return m;
812 	}
813 	sq = &sg->ff_stageq[pri];
814 	/*
815 	 * Check the txop limit to insure the aggregate fits.
816 	 */
817 	limit = IEEE80211_TXOP_TO_US(
818 		ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
819 	if (limit != 0 &&
820 	    (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) {
821 		/*
822 		 * Aggregate too long, return to the caller for direct
823 		 * transmission.  In addition, flush any pending frame
824 		 * before sending this one.
825 		 */
826 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
827 		    "%s: txtime %u exceeds txop limit %u\n",
828 		    __func__, txtime, limit);
829 
830 		ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL;
831 		if (mstaged != NULL)
832 			stageq_remove(ic, sq, mstaged);
833 		IEEE80211_UNLOCK(ic);
834 
835 		if (mstaged != NULL) {
836 			IEEE80211_TX_LOCK(ic);
837 			IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
838 			    "%s: flush staged frame", __func__);
839 			/* encap and xmit */
840 			ff_transmit(ni, mstaged);
841 			IEEE80211_TX_UNLOCK(ic);
842 		}
843 		return m;		/* NB: original frame */
844 	}
845 	/*
846 	 * An aggregation candidate.  If there's a frame to partner
847 	 * with then combine and return for processing.  Otherwise
848 	 * save this frame and wait for a partner to show up (or
849 	 * the frame to be flushed).  Note that staged frames also
850 	 * hold their node reference.
851 	 */
852 	if (mstaged != NULL) {
853 		ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL;
854 		stageq_remove(ic, sq, mstaged);
855 		IEEE80211_UNLOCK(ic);
856 
857 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
858 		    "%s: aggregate fast-frame", __func__);
859 		/*
860 		 * Release the node reference; we only need
861 		 * the one already in mstaged.
862 		 */
863 		KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni,
864 		    ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni));
865 		ieee80211_free_node(ni);
866 
867 		m->m_nextpkt = NULL;
868 		mstaged->m_nextpkt = m;
869 		mstaged->m_flags |= M_FF; /* NB: mark for encap work */
870 	} else {
871 		KASSERT(ni->ni_tx_superg[WME_AC_TO_TID(pri)]== NULL,
872 		    ("ni_tx_superg[]: %p",
873 		    ni->ni_tx_superg[WME_AC_TO_TID(pri)]));
874 		ni->ni_tx_superg[WME_AC_TO_TID(pri)] = m;
875 
876 		stageq_add(ic, sq, m);
877 		IEEE80211_UNLOCK(ic);
878 
879 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
880 		    "%s: stage frame, %u queued", __func__, sq->depth);
881 		/* NB: mstaged is NULL */
882 	}
883 	return mstaged;
884 }
885 
886 struct mbuf *
887 ieee80211_amsdu_check(struct ieee80211_node *ni, struct mbuf *m)
888 {
889 	/*
890 	 * XXX TODO: actually enforce the node support
891 	 * and HTCAP requirements for the maximum A-MSDU
892 	 * size.
893 	 */
894 
895 	/* First: software A-MSDU transmit? */
896 	if (! ieee80211_amsdu_tx_ok(ni))
897 		return (m);
898 
899 	/* Next - EAPOL? Nope, don't aggregate; we don't QoS encap them */
900 	if (m->m_flags & (M_EAPOL | M_MCAST | M_BCAST))
901 		return (m);
902 
903 	/* Next - needs to be a data frame, non-broadcast, etc */
904 	if (ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
905 		return (m);
906 
907 	return (ieee80211_ff_check(ni, m));
908 }
909 
910 void
911 ieee80211_ff_node_init(struct ieee80211_node *ni)
912 {
913 	/*
914 	 * Clean FF state on re-associate.  This handles the case
915 	 * where a station leaves w/o notifying us and then returns
916 	 * before node is reaped for inactivity.
917 	 */
918 	ieee80211_ff_node_cleanup(ni);
919 }
920 
921 void
922 ieee80211_ff_node_cleanup(struct ieee80211_node *ni)
923 {
924 	struct ieee80211com *ic = ni->ni_ic;
925 	struct ieee80211_superg *sg = ic->ic_superg;
926 	struct mbuf *m, *next_m, *head;
927 	int tid;
928 
929 	IEEE80211_LOCK(ic);
930 	head = NULL;
931 	for (tid = 0; tid < WME_NUM_TID; tid++) {
932 		int ac = TID_TO_WME_AC(tid);
933 		/*
934 		 * XXX Initialise the packet counter.
935 		 *
936 		 * This may be double-work for 11n stations;
937 		 * but without it we never setup things.
938 		 */
939 		ieee80211_txampdu_init_pps(&ni->ni_tx_ampdu[tid]);
940 		m = ni->ni_tx_superg[tid];
941 		if (m != NULL) {
942 			ni->ni_tx_superg[tid] = NULL;
943 			stageq_remove(ic, &sg->ff_stageq[ac], m);
944 			m->m_nextpkt = head;
945 			head = m;
946 		}
947 	}
948 	IEEE80211_UNLOCK(ic);
949 
950 	/*
951 	 * Free mbufs, taking care to not dereference the mbuf after
952 	 * we free it (hence grabbing m_nextpkt before we free it.)
953 	 */
954 	m = head;
955 	while (m != NULL) {
956 		next_m = m->m_nextpkt;
957 		m_freem(m);
958 		ieee80211_free_node(ni);
959 		m = next_m;
960 	}
961 }
962 
963 /*
964  * Switch between turbo and non-turbo operating modes.
965  * Use the specified channel flags to locate the new
966  * channel, update 802.11 state, and then call back into
967  * the driver to effect the change.
968  */
969 void
970 ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags)
971 {
972 	struct ieee80211com *ic = vap->iv_ic;
973 	struct ieee80211_channel *chan;
974 
975 	chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags);
976 	if (chan == NULL) {		/* XXX should not happen */
977 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
978 		    "%s: no channel with freq %u flags 0x%x\n",
979 		    __func__, ic->ic_bsschan->ic_freq, newflags);
980 		return;
981 	}
982 
983 	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
984 	    "%s: %s -> %s (freq %u flags 0x%x)\n", __func__,
985 	    ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)],
986 	    ieee80211_phymode_name[ieee80211_chan2mode(chan)],
987 	    chan->ic_freq, chan->ic_flags);
988 
989 	ic->ic_bsschan = chan;
990 	ic->ic_prevchan = ic->ic_curchan;
991 	ic->ic_curchan = chan;
992 	ic->ic_rt = ieee80211_get_ratetable(chan);
993 	ic->ic_set_channel(ic);
994 	ieee80211_radiotap_chan_change(ic);
995 	/* NB: do not need to reset ERP state 'cuz we're in sta mode */
996 }
997 
998 /*
999  * Return the current ``state'' of an Atheros capbility.
1000  * If associated in station mode report the negotiated
1001  * setting. Otherwise report the current setting.
1002  */
1003 static int
1004 getathcap(struct ieee80211vap *vap, int cap)
1005 {
1006 	if (vap->iv_opmode == IEEE80211_M_STA &&
1007 	    vap->iv_state == IEEE80211_S_RUN)
1008 		return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0;
1009 	else
1010 		return (vap->iv_flags & cap) != 0;
1011 }
1012 
1013 static int
1014 superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
1015 {
1016 	switch (ireq->i_type) {
1017 	case IEEE80211_IOC_FF:
1018 		ireq->i_val = getathcap(vap, IEEE80211_F_FF);
1019 		break;
1020 	case IEEE80211_IOC_TURBOP:
1021 		ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP);
1022 		break;
1023 	default:
1024 		return ENOSYS;
1025 	}
1026 	return 0;
1027 }
1028 IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211);
1029 
1030 static int
1031 superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
1032 {
1033 	switch (ireq->i_type) {
1034 	case IEEE80211_IOC_FF:
1035 		if (ireq->i_val) {
1036 			if ((vap->iv_caps & IEEE80211_C_FF) == 0)
1037 				return EOPNOTSUPP;
1038 			vap->iv_flags |= IEEE80211_F_FF;
1039 		} else
1040 			vap->iv_flags &= ~IEEE80211_F_FF;
1041 		return ENETRESET;
1042 	case IEEE80211_IOC_TURBOP:
1043 		if (ireq->i_val) {
1044 			if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0)
1045 				return EOPNOTSUPP;
1046 			vap->iv_flags |= IEEE80211_F_TURBOP;
1047 		} else
1048 			vap->iv_flags &= ~IEEE80211_F_TURBOP;
1049 		return ENETRESET;
1050 	default:
1051 		return ENOSYS;
1052 	}
1053 	return 0;
1054 }
1055 IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211);
1056 
1057 #endif	/* IEEE80211_SUPPORT_SUPERG */
1058