1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #include "opt_wlan.h"
30 
31 #ifdef	IEEE80211_SUPPORT_SUPERG
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/kernel.h>
38 #include <sys/endian.h>
39 
40 #include <sys/socket.h>
41 
42 #include <net/if.h>
43 #include <net/if_var.h>
44 #include <net/if_llc.h>
45 #include <net/if_media.h>
46 #include <net/bpf.h>
47 #include <net/ethernet.h>
48 
49 #include <netproto/802_11/ieee80211_var.h>
50 #include <netproto/802_11/ieee80211_input.h>
51 #include <netproto/802_11/ieee80211_phy.h>
52 #include <netproto/802_11/ieee80211_superg.h>
53 
54 /*
55  * Atheros fast-frame encapsulation format.
56  * FF max payload:
57  * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500:
58  *   8   +   4   +  4   +   14  +   8   + 1500 +  6   +   14  +   8   + 1500
59  * = 3066
60  */
61 /* fast frame header is 32-bits */
62 #define	ATH_FF_PROTO	0x0000003f	/* protocol */
63 #define	ATH_FF_PROTO_S	0
64 #define	ATH_FF_FTYPE	0x000000c0	/* frame type */
65 #define	ATH_FF_FTYPE_S	6
66 #define	ATH_FF_HLEN32	0x00000300	/* optional hdr length */
67 #define	ATH_FF_HLEN32_S	8
68 #define	ATH_FF_SEQNUM	0x001ffc00	/* sequence number */
69 #define	ATH_FF_SEQNUM_S	10
70 #define	ATH_FF_OFFSET	0xffe00000	/* offset to 2nd payload */
71 #define	ATH_FF_OFFSET_S	21
72 
73 #define	ATH_FF_MAX_HDR_PAD	4
74 #define	ATH_FF_MAX_SEP_PAD	6
75 #define	ATH_FF_MAX_HDR		30
76 
77 #define	ATH_FF_PROTO_L2TUNNEL	0	/* L2 tunnel protocol */
78 #define	ATH_FF_ETH_TYPE		0x88bd	/* Ether type for encapsulated frames */
79 #define	ATH_FF_SNAP_ORGCODE_0	0x00
80 #define	ATH_FF_SNAP_ORGCODE_1	0x03
81 #define	ATH_FF_SNAP_ORGCODE_2	0x7f
82 
83 #define	ATH_FF_TXQMIN	2		/* min txq depth for staging */
84 #define	ATH_FF_TXQMAX	50		/* maximum # of queued frames allowed */
85 #define	ATH_FF_STAGEMAX	5		/* max waiting period for staged frame*/
86 
87 #define	ETHER_HEADER_COPY(dst, src) \
88 	memcpy(dst, src, sizeof(struct ether_header))
89 
90 static	int ieee80211_ffppsmin = 2;	/* pps threshold for ff aggregation */
91 SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLFLAG_RW,
92 	&ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging");
93 static	int ieee80211_ffagemax = -1;	/* max time frames held on stage q */
94 SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW,
95 	&ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I",
96 	"max hold time for fast-frame staging (ms)");
97 
98 void
99 ieee80211_superg_attach(struct ieee80211com *ic)
100 {
101 	struct ieee80211_superg *sg;
102 
103 #if defined(__DragonFly__)
104 	sg = (struct ieee80211_superg *) kmalloc(
105 		sizeof(struct ieee80211_superg), M_80211_VAP,
106 		M_INTWAIT | M_ZERO);
107 #else
108 	sg = (struct ieee80211_superg *) IEEE80211_MALLOC(
109 		sizeof(struct ieee80211_superg), M_80211_VAP,
110 		IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
111 #endif
112 	if (sg == NULL) {
113 		kprintf("%s: cannot allocate SuperG state block\n",
114 		    __func__);
115 		return;
116 	}
117 	ic->ic_superg = sg;
118 
119 	/*
120 	 * Default to not being so aggressive for FF/AMSDU
121 	 * aging, otherwise we may hold a frame around
122 	 * for way too long before we expire it out.
123 	 */
124 	ieee80211_ffagemax = msecs_to_ticks(2);
125 }
126 
127 void
128 ieee80211_superg_detach(struct ieee80211com *ic)
129 {
130 	if (ic->ic_superg != NULL) {
131 		IEEE80211_FREE(ic->ic_superg, M_80211_VAP);
132 		ic->ic_superg = NULL;
133 	}
134 }
135 
136 void
137 ieee80211_superg_vattach(struct ieee80211vap *vap)
138 {
139 	struct ieee80211com *ic = vap->iv_ic;
140 
141 	if (ic->ic_superg == NULL)	/* NB: can't do fast-frames w/o state */
142 		vap->iv_caps &= ~IEEE80211_C_FF;
143 	if (vap->iv_caps & IEEE80211_C_FF)
144 		vap->iv_flags |= IEEE80211_F_FF;
145 	/* NB: we only implement sta mode */
146 	if (vap->iv_opmode == IEEE80211_M_STA &&
147 	    (vap->iv_caps & IEEE80211_C_TURBOP))
148 		vap->iv_flags |= IEEE80211_F_TURBOP;
149 }
150 
151 void
152 ieee80211_superg_vdetach(struct ieee80211vap *vap)
153 {
154 }
155 
156 #define	ATH_OUI_BYTES		0x00, 0x03, 0x7f
157 /*
158  * Add a WME information element to a frame.
159  */
160 uint8_t *
161 ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix)
162 {
163 	static const struct ieee80211_ath_ie info = {
164 		.ath_id		= IEEE80211_ELEMID_VENDOR,
165 		.ath_len	= sizeof(struct ieee80211_ath_ie) - 2,
166 		.ath_oui	= { ATH_OUI_BYTES },
167 		.ath_oui_type	= ATH_OUI_TYPE,
168 		.ath_oui_subtype= ATH_OUI_SUBTYPE,
169 		.ath_version	= ATH_OUI_VERSION,
170 	};
171 	struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm;
172 
173 	memcpy(frm, &info, sizeof(info));
174 	ath->ath_capability = caps;
175 	if (defkeyix != IEEE80211_KEYIX_NONE) {
176 		ath->ath_defkeyix[0] = (defkeyix & 0xff);
177 		ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff);
178 	} else {
179 		ath->ath_defkeyix[0] = 0xff;
180 		ath->ath_defkeyix[1] = 0x7f;
181 	}
182 	return frm + sizeof(info);
183 }
184 #undef ATH_OUI_BYTES
185 
186 uint8_t *
187 ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss)
188 {
189 	const struct ieee80211vap *vap = bss->ni_vap;
190 
191 	return ieee80211_add_ath(frm,
192 	    vap->iv_flags & IEEE80211_F_ATHEROS,
193 	    ((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
194 	    bss->ni_authmode != IEEE80211_AUTH_8021X) ?
195 	    vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
196 }
197 
198 void
199 ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie)
200 {
201 	const struct ieee80211_ath_ie *ath =
202 		(const struct ieee80211_ath_ie *) ie;
203 
204 	ni->ni_ath_flags = ath->ath_capability;
205 	ni->ni_ath_defkeyix = le16dec(&ath->ath_defkeyix);
206 }
207 
208 int
209 ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm,
210 	const struct ieee80211_frame *wh)
211 {
212 	struct ieee80211vap *vap = ni->ni_vap;
213 	const struct ieee80211_ath_ie *ath;
214 	u_int len = frm[1];
215 	int capschanged;
216 	uint16_t defkeyix;
217 
218 	if (len < sizeof(struct ieee80211_ath_ie)-2) {
219 		IEEE80211_DISCARD_IE(vap,
220 		    IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG,
221 		    wh, "Atheros", "too short, len %u", len);
222 		return -1;
223 	}
224 	ath = (const struct ieee80211_ath_ie *)frm;
225 	capschanged = (ni->ni_ath_flags != ath->ath_capability);
226 	defkeyix = le16dec(ath->ath_defkeyix);
227 	if (capschanged || defkeyix != ni->ni_ath_defkeyix) {
228 		ni->ni_ath_flags = ath->ath_capability;
229 		ni->ni_ath_defkeyix = defkeyix;
230 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
231 		    "ath ie change: new caps 0x%x defkeyix 0x%x",
232 		    ni->ni_ath_flags, ni->ni_ath_defkeyix);
233 	}
234 	if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) {
235 		uint16_t curflags, newflags;
236 
237 		/*
238 		 * Check for turbo mode switch.  Calculate flags
239 		 * for the new mode and effect the switch.
240 		 */
241 		newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags;
242 		/* NB: BOOST is not in ic_flags, so get it from the ie */
243 		if (ath->ath_capability & ATHEROS_CAP_BOOST)
244 			newflags |= IEEE80211_CHAN_TURBO;
245 		else
246 			newflags &= ~IEEE80211_CHAN_TURBO;
247 		if (newflags != curflags)
248 			ieee80211_dturbo_switch(vap, newflags);
249 	}
250 	return capschanged;
251 }
252 
253 /*
254  * Decap the encapsulated frame pair and dispatch the first
255  * for delivery.  The second frame is returned for delivery
256  * via the normal path.
257  */
258 struct mbuf *
259 ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m)
260 {
261 #define	FF_LLC_SIZE	(sizeof(struct ether_header) + sizeof(struct llc))
262 #define	MS(x,f)	(((x) & f) >> f##_S)
263 	struct ieee80211vap *vap = ni->ni_vap;
264 	struct llc *llc;
265 	uint32_t ath;
266 	struct mbuf *n;
267 	int framelen;
268 
269 	/* NB: we assume caller does this check for us */
270 	KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF),
271 	    ("ff not negotiated"));
272 	/*
273 	 * Check for fast-frame tunnel encapsulation.
274 	 */
275 	if (m->m_pkthdr.len < 3*FF_LLC_SIZE)
276 		return m;
277 	if (m->m_len < FF_LLC_SIZE &&
278 	    (m = m_pullup(m, FF_LLC_SIZE)) == NULL) {
279 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
280 		    ni->ni_macaddr, "fast-frame",
281 		    "%s", "m_pullup(llc) failed");
282 		vap->iv_stats.is_rx_tooshort++;
283 		return NULL;
284 	}
285 	llc = (struct llc *)(mtod(m, uint8_t *) +
286 	    sizeof(struct ether_header));
287 	if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE))
288 		return m;
289 	m_adj(m, FF_LLC_SIZE);
290 	m_copydata(m, 0, sizeof(uint32_t), &ath);
291 	if (MS(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) {
292 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
293 		    ni->ni_macaddr, "fast-frame",
294 		    "unsupport tunnel protocol, header 0x%x", ath);
295 		vap->iv_stats.is_ff_badhdr++;
296 		m_freem(m);
297 		return NULL;
298 	}
299 	/* NB: skip header and alignment padding */
300 	m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2);
301 
302 	vap->iv_stats.is_ff_decap++;
303 
304 	/*
305 	 * Decap the first frame, bust it apart from the
306 	 * second and deliver; then decap the second frame
307 	 * and return it to the caller for normal delivery.
308 	 */
309 	m = ieee80211_decap1(m, &framelen);
310 	if (m == NULL) {
311 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
312 		    ni->ni_macaddr, "fast-frame", "%s", "first decap failed");
313 		vap->iv_stats.is_ff_tooshort++;
314 		return NULL;
315 	}
316 	n = m_split(m, framelen, M_NOWAIT);
317 	if (n == NULL) {
318 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
319 		    ni->ni_macaddr, "fast-frame",
320 		    "%s", "unable to split encapsulated frames");
321 		vap->iv_stats.is_ff_split++;
322 		m_freem(m);			/* NB: must reclaim */
323 		return NULL;
324 	}
325 	/* XXX not right for WDS */
326 	vap->iv_deliver_data(vap, ni, m);	/* 1st of pair */
327 
328 	/*
329 	 * Decap second frame.
330 	 */
331 	m_adj(n, roundup2(framelen, 4) - framelen);	/* padding */
332 	n = ieee80211_decap1(n, &framelen);
333 	if (n == NULL) {
334 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
335 		    ni->ni_macaddr, "fast-frame", "%s", "second decap failed");
336 		vap->iv_stats.is_ff_tooshort++;
337 	}
338 	/* XXX verify framelen against mbuf contents */
339 	return n;				/* 2nd delivered by caller */
340 #undef MS
341 #undef FF_LLC_SIZE
342 }
343 
344 /*
345  * Fast frame encapsulation.  There must be two packets
346  * chained with m_nextpkt.  We do header adjustment for
347  * each, add the tunnel encapsulation, and then concatenate
348  * the mbuf chains to form a single frame for transmission.
349  */
350 struct mbuf *
351 ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
352 	struct ieee80211_key *key)
353 {
354 	struct mbuf *m2;
355 	struct ether_header eh1, eh2;
356 	struct llc *llc;
357 	struct mbuf *m;
358 	int pad;
359 
360 	m2 = m1->m_nextpkt;
361 	if (m2 == NULL) {
362 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
363 		    "%s: only one frame\n", __func__);
364 		goto bad;
365 	}
366 	m1->m_nextpkt = NULL;
367 
368 	/*
369 	 * Adjust to include 802.11 header requirement.
370 	 */
371 	KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
372 	ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
373 	m1 = ieee80211_mbuf_adjust(vap, hdrspace, key, m1);
374 	if (m1 == NULL) {
375 		kprintf("%s: failed initial mbuf_adjust\n", __func__);
376 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
377 		m_freem(m2);
378 		goto bad;
379 	}
380 
381 	/*
382 	 * Copy second frame's Ethernet header out of line
383 	 * and adjust for possible padding in case there isn't room
384 	 * at the end of first frame.
385 	 */
386 	KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
387 	ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
388 	m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2);
389 	if (m2 == NULL) {
390 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
391 		kprintf("%s: failed second \n", __func__);
392 		goto bad;
393 	}
394 
395 	/*
396 	 * Now do tunnel encapsulation.  First, each
397 	 * frame gets a standard encapsulation.
398 	 */
399 	m1 = ieee80211_ff_encap1(vap, m1, &eh1);
400 	if (m1 == NULL)
401 		goto bad;
402 	m2 = ieee80211_ff_encap1(vap, m2, &eh2);
403 	if (m2 == NULL)
404 		goto bad;
405 
406 	/*
407 	 * Pad leading frame to a 4-byte boundary.  If there
408 	 * is space at the end of the first frame, put it
409 	 * there; otherwise prepend to the front of the second
410 	 * frame.  We know doing the second will always work
411 	 * because we reserve space above.  We prefer appending
412 	 * as this typically has better DMA alignment properties.
413 	 */
414 	for (m = m1; m->m_next != NULL; m = m->m_next)
415 		;
416 	pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
417 	if (pad) {
418 		if (M_TRAILINGSPACE(m) < pad) {		/* prepend to second */
419 			m2->m_data -= pad;
420 			m2->m_len += pad;
421 			m2->m_pkthdr.len += pad;
422 		} else {				/* append to first */
423 			m->m_len += pad;
424 			m1->m_pkthdr.len += pad;
425 		}
426 	}
427 
428 	/*
429 	 * A-MSDU's are just appended; the "I'm A-MSDU!" bit is in the
430 	 * QoS header.
431 	 *
432 	 * XXX optimize by prepending together
433 	 */
434 	m->m_next = m2;			/* NB: last mbuf from above */
435 	m1->m_pkthdr.len += m2->m_pkthdr.len;
436 	M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT);
437 	if (m1 == NULL) {		/* XXX cannot happen */
438 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
439 		    "%s: no space for tunnel header\n", __func__);
440 		vap->iv_stats.is_tx_nobuf++;
441 		return NULL;
442 	}
443 	memset(mtod(m1, void *), 0, sizeof(uint32_t)+2);
444 
445 	M_PREPEND(m1, sizeof(struct llc), M_NOWAIT);
446 	if (m1 == NULL) {		/* XXX cannot happen */
447 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
448 		    "%s: no space for llc header\n", __func__);
449 		vap->iv_stats.is_tx_nobuf++;
450 		return NULL;
451 	}
452 	llc = mtod(m1, struct llc *);
453 	llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
454 	llc->llc_control = LLC_UI;
455 	llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0;
456 	llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1;
457 	llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2;
458 	llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE);
459 
460 	vap->iv_stats.is_ff_encap++;
461 
462 	return m1;
463 bad:
464 	vap->iv_stats.is_ff_encapfail++;
465 	if (m1 != NULL)
466 		m_freem(m1);
467 	if (m2 != NULL)
468 		m_freem(m2);
469 	return NULL;
470 }
471 
472 /*
473  * A-MSDU encapsulation.
474  *
475  * This assumes just two frames for now, since we're borrowing the
476  * same queuing code and infrastructure as fast-frames.
477  *
478  * There must be two packets chained with m_nextpkt.
479  * We do header adjustment for each, and then concatenate the mbuf chains
480  * to form a single frame for transmission.
481  */
482 struct mbuf *
483 ieee80211_amsdu_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
484 	struct ieee80211_key *key)
485 {
486 	struct mbuf *m2;
487 	struct ether_header eh1, eh2;
488 	struct mbuf *m;
489 	int pad;
490 
491 	m2 = m1->m_nextpkt;
492 	if (m2 == NULL) {
493 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
494 		    "%s: only one frame\n", __func__);
495 		goto bad;
496 	}
497 	m1->m_nextpkt = NULL;
498 
499 	/*
500 	 * Include A-MSDU header in adjusting header layout.
501 	 */
502 	KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
503 	ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
504 	m1 = ieee80211_mbuf_adjust(vap,
505 		hdrspace + sizeof(struct llc) + sizeof(uint32_t) +
506 		    sizeof(struct ether_header),
507 		key, m1);
508 	if (m1 == NULL) {
509 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
510 		m_freem(m2);
511 		goto bad;
512 	}
513 
514 	/*
515 	 * Copy second frame's Ethernet header out of line
516 	 * and adjust for encapsulation headers.  Note that
517 	 * we make room for padding in case there isn't room
518 	 * at the end of first frame.
519 	 */
520 	KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
521 	ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
522 	m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2);
523 	if (m2 == NULL) {
524 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
525 		goto bad;
526 	}
527 
528 	/*
529 	 * Now do tunnel encapsulation.  First, each
530 	 * frame gets a standard encapsulation.
531 	 */
532 	m1 = ieee80211_ff_encap1(vap, m1, &eh1);
533 	if (m1 == NULL)
534 		goto bad;
535 	m2 = ieee80211_ff_encap1(vap, m2, &eh2);
536 	if (m2 == NULL)
537 		goto bad;
538 
539 	/*
540 	 * Pad leading frame to a 4-byte boundary.  If there
541 	 * is space at the end of the first frame, put it
542 	 * there; otherwise prepend to the front of the second
543 	 * frame.  We know doing the second will always work
544 	 * because we reserve space above.  We prefer appending
545 	 * as this typically has better DMA alignment properties.
546 	 */
547 	for (m = m1; m->m_next != NULL; m = m->m_next)
548 		;
549 	pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
550 	if (pad) {
551 		if (M_TRAILINGSPACE(m) < pad) {		/* prepend to second */
552 			m2->m_data -= pad;
553 			m2->m_len += pad;
554 			m2->m_pkthdr.len += pad;
555 		} else {				/* append to first */
556 			m->m_len += pad;
557 			m1->m_pkthdr.len += pad;
558 		}
559 	}
560 
561 	/*
562 	 * Now, stick 'em together.
563 	 */
564 	m->m_next = m2;			/* NB: last mbuf from above */
565 	m1->m_pkthdr.len += m2->m_pkthdr.len;
566 
567 	vap->iv_stats.is_amsdu_encap++;
568 
569 	return m1;
570 bad:
571 	vap->iv_stats.is_amsdu_encapfail++;
572 	if (m1 != NULL)
573 		m_freem(m1);
574 	if (m2 != NULL)
575 		m_freem(m2);
576 	return NULL;
577 }
578 
579 
580 static void
581 ff_transmit(struct ieee80211_node *ni, struct mbuf *m)
582 {
583 	struct ieee80211vap *vap = ni->ni_vap;
584 	struct ieee80211com *ic = ni->ni_ic;
585 	int error;
586 
587 	IEEE80211_TX_LOCK_ASSERT(vap->iv_ic);
588 
589 	/* encap and xmit */
590 	m = ieee80211_encap(vap, ni, m);
591 	if (m != NULL) {
592 		struct ifnet *ifp = vap->iv_ifp;
593 
594 		error = ieee80211_parent_xmitpkt(ic, m);
595 		if (!error)
596 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
597 	} else
598 		ieee80211_free_node(ni);
599 }
600 
601 /*
602  * Flush frames to device; note we re-use the linked list
603  * the frames were stored on and use the sentinel (unchanged)
604  * which may be non-NULL.
605  */
606 static void
607 ff_flush(struct mbuf *head, struct mbuf *last)
608 {
609 	struct mbuf *m, *next;
610 	struct ieee80211_node *ni;
611 	struct ieee80211vap *vap;
612 
613 	for (m = head; m != last; m = next) {
614 		next = m->m_nextpkt;
615 		m->m_nextpkt = NULL;
616 
617 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
618 		vap = ni->ni_vap;
619 
620 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
621 		    "%s: flush frame, age %u", __func__, M_AGE_GET(m));
622 		vap->iv_stats.is_ff_flush++;
623 
624 		ff_transmit(ni, m);
625 	}
626 }
627 
628 /*
629  * Age frames on the staging queue.
630  *
631  * This is called without the comlock held, but it does all its work
632  * behind the comlock.  Because of this, it's possible that the
633  * staging queue will be serviced between the function which called
634  * it and now; thus simply checking that the queue has work in it
635  * may fail.
636  *
637  * See PR kern/174283 for more details.
638  */
639 void
640 ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq,
641     int quanta)
642 {
643 	struct mbuf *m, *head;
644 	struct ieee80211_node *ni;
645 
646 #if 0
647 	KASSERT(sq->head != NULL, ("stageq empty"));
648 #endif
649 
650 	IEEE80211_LOCK(ic);
651 	head = sq->head;
652 	while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) {
653 		int tid = WME_AC_TO_TID(M_WME_GETAC(m));
654 
655 		/* clear staging ref to frame */
656 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
657 		KASSERT(ni->ni_tx_superg[tid] == m, ("staging queue empty"));
658 		ni->ni_tx_superg[tid] = NULL;
659 
660 		sq->head = m->m_nextpkt;
661 		sq->depth--;
662 	}
663 	if (m == NULL)
664 		sq->tail = NULL;
665 	else
666 		M_AGE_SUB(m, quanta);
667 	IEEE80211_UNLOCK(ic);
668 
669 	IEEE80211_TX_LOCK(ic);
670 	ff_flush(head, m);
671 	IEEE80211_TX_UNLOCK(ic);
672 }
673 
674 static void
675 stageq_add(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *m)
676 {
677 	int age = ieee80211_ffagemax;
678 
679 	IEEE80211_LOCK_ASSERT(ic);
680 
681 	if (sq->tail != NULL) {
682 		sq->tail->m_nextpkt = m;
683 		age -= M_AGE_GET(sq->head);
684 	} else
685 		sq->head = m;
686 	KASSERT(age >= 0, ("age %d", age));
687 	M_AGE_SET(m, age);
688 	m->m_nextpkt = NULL;
689 	sq->tail = m;
690 	sq->depth++;
691 }
692 
693 static void
694 stageq_remove(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *mstaged)
695 {
696 	struct mbuf *m, *mprev;
697 
698 	IEEE80211_LOCK_ASSERT(ic);
699 
700 	mprev = NULL;
701 	for (m = sq->head; m != NULL; m = m->m_nextpkt) {
702 		if (m == mstaged) {
703 			if (mprev == NULL)
704 				sq->head = m->m_nextpkt;
705 			else
706 				mprev->m_nextpkt = m->m_nextpkt;
707 			if (sq->tail == m)
708 				sq->tail = mprev;
709 			sq->depth--;
710 			return;
711 		}
712 		mprev = m;
713 	}
714 	kprintf("%s: packet not found\n", __func__);
715 }
716 
717 static uint32_t
718 ff_approx_txtime(struct ieee80211_node *ni,
719 	const struct mbuf *m1, const struct mbuf *m2)
720 {
721 	struct ieee80211com *ic = ni->ni_ic;
722 	struct ieee80211vap *vap = ni->ni_vap;
723 	uint32_t framelen;
724 	uint32_t frame_time;
725 
726 	/*
727 	 * Approximate the frame length to be transmitted. A swag to add
728 	 * the following maximal values to the skb payload:
729 	 *   - 32: 802.11 encap + CRC
730 	 *   - 24: encryption overhead (if wep bit)
731 	 *   - 4 + 6: fast-frame header and padding
732 	 *   - 16: 2 LLC FF tunnel headers
733 	 *   - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd)
734 	 */
735 	framelen = m1->m_pkthdr.len + 32 +
736 	    ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR;
737 	if (vap->iv_flags & IEEE80211_F_PRIVACY)
738 		framelen += 24;
739 	if (m2 != NULL)
740 		framelen += m2->m_pkthdr.len;
741 
742 	/*
743 	 * For now, we assume non-shortgi, 20MHz, just because I want to
744 	 * at least test 802.11n.
745 	 */
746 	if (ni->ni_txrate & IEEE80211_RATE_MCS)
747 		frame_time = ieee80211_compute_duration_ht(framelen,
748 		    ni->ni_txrate,
749 		    IEEE80211_HT_RC_2_STREAMS(ni->ni_txrate),
750 		    0, /* isht40 */
751 		    0); /* isshortgi */
752 	else
753 		frame_time = ieee80211_compute_duration(ic->ic_rt, framelen,
754 			    ni->ni_txrate, 0);
755 	return (frame_time);
756 }
757 
758 /*
759  * Check if the supplied frame can be partnered with an existing
760  * or pending frame.  Return a reference to any frame that should be
761  * sent on return; otherwise return NULL.
762  */
763 struct mbuf *
764 ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
765 {
766 	struct ieee80211vap *vap = ni->ni_vap;
767 	struct ieee80211com *ic = ni->ni_ic;
768 	struct ieee80211_superg *sg = ic->ic_superg;
769 	const int pri = M_WME_GETAC(m);
770 	struct ieee80211_stageq *sq;
771 	struct ieee80211_tx_ampdu *tap;
772 	struct mbuf *mstaged;
773 	uint32_t txtime, limit;
774 
775 	IEEE80211_TX_UNLOCK_ASSERT(ic);
776 
777 	/*
778 	 * Check if the supplied frame can be aggregated.
779 	 *
780 	 * NB: we allow EAPOL frames to be aggregated with other ucast traffic.
781 	 *     Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
782 	 *     be aggregated with other types of frames when encryption is on?
783 	 */
784 	IEEE80211_LOCK(ic);
785 	tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)];
786 	mstaged = ni->ni_tx_superg[WME_AC_TO_TID(pri)];
787 	/* XXX NOTE: reusing packet counter state from A-MPDU */
788 	/*
789 	 * XXX NOTE: this means we're double-counting; it should just
790 	 * be done in ieee80211_output.c once for both superg and A-MPDU.
791 	 */
792 	ieee80211_txampdu_count_packet(tap);
793 
794 	/*
795 	 * When not in station mode never aggregate a multicast
796 	 * frame; this insures, for example, that a combined frame
797 	 * does not require multiple encryption keys.
798 	 */
799 	if (vap->iv_opmode != IEEE80211_M_STA &&
800 	    ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) {
801 		/* XXX flush staged frame? */
802 		IEEE80211_UNLOCK(ic);
803 		return m;
804 	}
805 	/*
806 	 * If there is no frame to combine with and the pps is
807 	 * too low; then do not attempt to aggregate this frame.
808 	 */
809 	if (mstaged == NULL &&
810 	    ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) {
811 		IEEE80211_UNLOCK(ic);
812 		return m;
813 	}
814 	sq = &sg->ff_stageq[pri];
815 	/*
816 	 * Check the txop limit to insure the aggregate fits.
817 	 */
818 	limit = IEEE80211_TXOP_TO_US(
819 		ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
820 	if (limit != 0 &&
821 	    (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) {
822 		/*
823 		 * Aggregate too long, return to the caller for direct
824 		 * transmission.  In addition, flush any pending frame
825 		 * before sending this one.
826 		 */
827 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
828 		    "%s: txtime %u exceeds txop limit %u\n",
829 		    __func__, txtime, limit);
830 
831 		ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL;
832 		if (mstaged != NULL)
833 			stageq_remove(ic, sq, mstaged);
834 		IEEE80211_UNLOCK(ic);
835 
836 		if (mstaged != NULL) {
837 			IEEE80211_TX_LOCK(ic);
838 			IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
839 			    "%s: flush staged frame", __func__);
840 			/* encap and xmit */
841 			ff_transmit(ni, mstaged);
842 			IEEE80211_TX_UNLOCK(ic);
843 		}
844 		return m;		/* NB: original frame */
845 	}
846 	/*
847 	 * An aggregation candidate.  If there's a frame to partner
848 	 * with then combine and return for processing.  Otherwise
849 	 * save this frame and wait for a partner to show up (or
850 	 * the frame to be flushed).  Note that staged frames also
851 	 * hold their node reference.
852 	 */
853 	if (mstaged != NULL) {
854 		ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL;
855 		stageq_remove(ic, sq, mstaged);
856 		IEEE80211_UNLOCK(ic);
857 
858 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
859 		    "%s: aggregate fast-frame", __func__);
860 		/*
861 		 * Release the node reference; we only need
862 		 * the one already in mstaged.
863 		 */
864 		KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni,
865 		    ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni));
866 		ieee80211_free_node(ni);
867 
868 		m->m_nextpkt = NULL;
869 		mstaged->m_nextpkt = m;
870 		mstaged->m_flags |= M_FF; /* NB: mark for encap work */
871 	} else {
872 		KASSERT(ni->ni_tx_superg[WME_AC_TO_TID(pri)]== NULL,
873 		    ("ni_tx_superg[]: %p",
874 		    ni->ni_tx_superg[WME_AC_TO_TID(pri)]));
875 		ni->ni_tx_superg[WME_AC_TO_TID(pri)] = m;
876 
877 		stageq_add(ic, sq, m);
878 		IEEE80211_UNLOCK(ic);
879 
880 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
881 		    "%s: stage frame, %u queued", __func__, sq->depth);
882 		/* NB: mstaged is NULL */
883 	}
884 	return mstaged;
885 }
886 
887 struct mbuf *
888 ieee80211_amsdu_check(struct ieee80211_node *ni, struct mbuf *m)
889 {
890 	/*
891 	 * XXX TODO: actually enforce the node support
892 	 * and HTCAP requirements for the maximum A-MSDU
893 	 * size.
894 	 */
895 
896 	/* First: software A-MSDU transmit? */
897 	if (! ieee80211_amsdu_tx_ok(ni))
898 		return (m);
899 
900 	/* Next - EAPOL? Nope, don't aggregate; we don't QoS encap them */
901 	if (m->m_flags & (M_EAPOL | M_MCAST | M_BCAST))
902 		return (m);
903 
904 	/* Next - needs to be a data frame, non-broadcast, etc */
905 	if (ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
906 		return (m);
907 
908 	return (ieee80211_ff_check(ni, m));
909 }
910 
911 void
912 ieee80211_ff_node_init(struct ieee80211_node *ni)
913 {
914 	/*
915 	 * Clean FF state on re-associate.  This handles the case
916 	 * where a station leaves w/o notifying us and then returns
917 	 * before node is reaped for inactivity.
918 	 */
919 	ieee80211_ff_node_cleanup(ni);
920 }
921 
922 void
923 ieee80211_ff_node_cleanup(struct ieee80211_node *ni)
924 {
925 	struct ieee80211com *ic = ni->ni_ic;
926 	struct ieee80211_superg *sg = ic->ic_superg;
927 	struct mbuf *m, *next_m, *head;
928 	int tid;
929 
930 	IEEE80211_LOCK(ic);
931 	head = NULL;
932 	for (tid = 0; tid < WME_NUM_TID; tid++) {
933 		int ac = TID_TO_WME_AC(tid);
934 		/*
935 		 * XXX Initialise the packet counter.
936 		 *
937 		 * This may be double-work for 11n stations;
938 		 * but without it we never setup things.
939 		 */
940 		ieee80211_txampdu_init_pps(&ni->ni_tx_ampdu[tid]);
941 		m = ni->ni_tx_superg[tid];
942 		if (m != NULL) {
943 			ni->ni_tx_superg[tid] = NULL;
944 			stageq_remove(ic, &sg->ff_stageq[ac], m);
945 			m->m_nextpkt = head;
946 			head = m;
947 		}
948 	}
949 	IEEE80211_UNLOCK(ic);
950 
951 	/*
952 	 * Free mbufs, taking care to not dereference the mbuf after
953 	 * we free it (hence grabbing m_nextpkt before we free it.)
954 	 */
955 	m = head;
956 	while (m != NULL) {
957 		next_m = m->m_nextpkt;
958 		m_freem(m);
959 		ieee80211_free_node(ni);
960 		m = next_m;
961 	}
962 }
963 
964 /*
965  * Switch between turbo and non-turbo operating modes.
966  * Use the specified channel flags to locate the new
967  * channel, update 802.11 state, and then call back into
968  * the driver to effect the change.
969  */
970 void
971 ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags)
972 {
973 	struct ieee80211com *ic = vap->iv_ic;
974 	struct ieee80211_channel *chan;
975 
976 	chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags);
977 	if (chan == NULL) {		/* XXX should not happen */
978 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
979 		    "%s: no channel with freq %u flags 0x%x\n",
980 		    __func__, ic->ic_bsschan->ic_freq, newflags);
981 		return;
982 	}
983 
984 	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
985 	    "%s: %s -> %s (freq %u flags 0x%x)\n", __func__,
986 	    ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)],
987 	    ieee80211_phymode_name[ieee80211_chan2mode(chan)],
988 	    chan->ic_freq, chan->ic_flags);
989 
990 	ic->ic_bsschan = chan;
991 	ic->ic_prevchan = ic->ic_curchan;
992 	ic->ic_curchan = chan;
993 	ic->ic_rt = ieee80211_get_ratetable(chan);
994 	ic->ic_set_channel(ic);
995 	ieee80211_radiotap_chan_change(ic);
996 	/* NB: do not need to reset ERP state 'cuz we're in sta mode */
997 }
998 
999 /*
1000  * Return the current ``state'' of an Atheros capbility.
1001  * If associated in station mode report the negotiated
1002  * setting. Otherwise report the current setting.
1003  */
1004 static int
1005 getathcap(struct ieee80211vap *vap, int cap)
1006 {
1007 	if (vap->iv_opmode == IEEE80211_M_STA &&
1008 	    vap->iv_state == IEEE80211_S_RUN)
1009 		return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0;
1010 	else
1011 		return (vap->iv_flags & cap) != 0;
1012 }
1013 
1014 static int
1015 superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
1016 {
1017 	switch (ireq->i_type) {
1018 	case IEEE80211_IOC_FF:
1019 		ireq->i_val = getathcap(vap, IEEE80211_F_FF);
1020 		break;
1021 	case IEEE80211_IOC_TURBOP:
1022 		ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP);
1023 		break;
1024 	default:
1025 		return ENOSYS;
1026 	}
1027 	return 0;
1028 }
1029 IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211);
1030 
1031 static int
1032 superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
1033 {
1034 	switch (ireq->i_type) {
1035 	case IEEE80211_IOC_FF:
1036 		if (ireq->i_val) {
1037 			if ((vap->iv_caps & IEEE80211_C_FF) == 0)
1038 				return EOPNOTSUPP;
1039 			vap->iv_flags |= IEEE80211_F_FF;
1040 		} else
1041 			vap->iv_flags &= ~IEEE80211_F_FF;
1042 		return ENETRESET;
1043 	case IEEE80211_IOC_TURBOP:
1044 		if (ireq->i_val) {
1045 			if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0)
1046 				return EOPNOTSUPP;
1047 			vap->iv_flags |= IEEE80211_F_TURBOP;
1048 		} else
1049 			vap->iv_flags &= ~IEEE80211_F_TURBOP;
1050 		return ENETRESET;
1051 	default:
1052 		return ENOSYS;
1053 	}
1054 	return 0;
1055 }
1056 IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211);
1057 
1058 #endif	/* IEEE80211_SUPPORT_SUPERG */
1059