xref: /dragonfly/sys/netbt/hci_link.c (revision cc93b0eb)
1 /* $DragonFly: src/sys/netbt/hci_link.c,v 1.2 2008/03/18 13:41:42 hasso Exp $ */
2 /* $OpenBSD: src/sys/netbt/hci_link.c,v 1.7 2008/02/24 21:34:48 uwe Exp $ */
3 /* $NetBSD: hci_link.c,v 1.16 2007/11/10 23:12:22 plunky Exp $ */
4 
5 /*-
6  * Copyright (c) 2005 Iain Hibbert.
7  * Copyright (c) 2006 Itronix Inc.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of Itronix Inc. may not be used to endorse
19  *    or promote products derived from this software without specific
20  *    prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
26  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
29  * ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/proc.h>
40 #include <sys/queue.h>
41 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/callout.h>
44 #include <net/if.h>
45 #include <net/pf/pfvar.h>
46 #include <sys/bus.h>
47 
48 #include <netbt/bluetooth.h>
49 #include <netbt/hci.h>
50 #include <netbt/l2cap.h>
51 #include <netbt/sco.h>
52 
53 /*******************************************************************************
54  *
55  *	HCI ACL Connections
56  */
57 
58 /*
59  * Automatically expire unused ACL connections after this number of
60  * seconds (if zero, do not expire unused connections) [sysctl]
61  */
62 int hci_acl_expiry = 10;	/* seconds */
63 
64 /*
65  * hci_acl_open(unit, bdaddr)
66  *
67  * open ACL connection to remote bdaddr. Only one ACL connection is permitted
68  * between any two Bluetooth devices, so we look for an existing one before
69  * trying to start a new one.
70  */
71 struct hci_link *
72 hci_acl_open(struct hci_unit *unit, bdaddr_t *bdaddr)
73 {
74 	struct hci_link *link;
75 	struct hci_memo *memo;
76 	hci_create_con_cp cp;
77 	int err;
78 
79 	KKASSERT(unit != NULL);
80 	KKASSERT(bdaddr != NULL);
81 
82 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
83 	if (link == NULL) {
84 		link = hci_link_alloc(unit);
85 		if (link == NULL)
86 			return NULL;
87 
88 		link->hl_type = HCI_LINK_ACL;
89 		bdaddr_copy(&link->hl_bdaddr, bdaddr);
90 	}
91 
92 	switch(link->hl_state) {
93 	case HCI_LINK_CLOSED:
94 		/*
95 		 * open connection to remote device
96 		 */
97 		memset(&cp, 0, sizeof(cp));
98 		bdaddr_copy(&cp.bdaddr, bdaddr);
99 		cp.pkt_type = htole16(unit->hci_packet_type);
100 
101 		memo = hci_memo_find(unit, bdaddr);
102 		if (memo != NULL) {
103 			cp.page_scan_rep_mode = memo->page_scan_rep_mode;
104 			cp.page_scan_mode = memo->page_scan_mode;
105 			cp.clock_offset = memo->clock_offset;
106 		}
107 
108 		if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
109 			cp.accept_role_switch = 1;
110 
111 		err = hci_send_cmd(unit, HCI_CMD_CREATE_CON, &cp, sizeof(cp));
112 		if (err) {
113 			hci_link_free(link, err);
114 			return NULL;
115 		}
116 
117 		link->hl_state = HCI_LINK_WAIT_CONNECT;
118 		break;
119 
120 	case HCI_LINK_WAIT_CONNECT:
121 	case HCI_LINK_WAIT_AUTH:
122 	case HCI_LINK_WAIT_ENCRYPT:
123 	case HCI_LINK_WAIT_SECURE:
124 		/*
125 		 * somebody else already trying to connect, we just
126 		 * sit on the bench with them..
127 		 */
128 		break;
129 
130 	case HCI_LINK_OPEN:
131 		/*
132 		 * If already open, halt any expiry callouts. We dont need
133 		 * to care about already invoking callouts since refcnt >0
134 		 * will keep the link alive.
135 		 */
136 		callout_stop(&link->hl_expire);
137 		break;
138 
139 	default:
140 		UNKNOWN(link->hl_state);
141 		return NULL;
142 	}
143 
144 	/* open */
145 	link->hl_refcnt++;
146 
147 	return link;
148 }
149 
150 /*
151  * Close ACL connection. When there are no more references to this link,
152  * we can either close it down or schedule a delayed closedown.
153  */
154 void
155 hci_acl_close(struct hci_link *link, int err)
156 {
157 	KKASSERT(link != NULL);
158 
159 	if (--link->hl_refcnt == 0) {
160 		if (link->hl_state == HCI_LINK_CLOSED)
161 			hci_link_free(link, err);
162 		else if (hci_acl_expiry > 0)
163 			callout_reset(&link->hl_expire, hci_acl_expiry * hz,
164 			    hci_acl_timeout, link);
165 	}
166 }
167 
168 /*
169  * Incoming ACL connection.
170  *
171  * For now, we accept all connections but it would be better to check
172  * the L2CAP listen list and only accept when there is a listener
173  * available.
174  *
175  * There should not be a link to the same bdaddr already, we check
176  * anyway though its left unhandled for now.
177  */
178 struct hci_link *
179 hci_acl_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
180 {
181 	struct hci_link *link;
182 
183 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
184 	if (link != NULL)
185 		return NULL;
186 
187 	link = hci_link_alloc(unit);
188 	if (link != NULL) {
189 		link->hl_state = HCI_LINK_WAIT_CONNECT;
190 		link->hl_type = HCI_LINK_ACL;
191 		bdaddr_copy(&link->hl_bdaddr, bdaddr);
192 
193 		if (hci_acl_expiry > 0)
194 			callout_reset(&link->hl_expire, hci_acl_expiry * hz,
195 			    hci_acl_timeout, link);
196 	}
197 
198 	return link;
199 }
200 
201 void
202 hci_acl_timeout(void *arg)
203 {
204 	struct hci_link *link = arg;
205 	hci_discon_cp cp;
206 	int err;
207 
208 	crit_enter();
209 
210 	if (link->hl_refcnt > 0)
211 		goto out;
212 
213 	DPRINTF("link #%d expired\n", link->hl_handle);
214 
215 	switch (link->hl_state) {
216 	case HCI_LINK_CLOSED:
217 	case HCI_LINK_WAIT_CONNECT:
218 		hci_link_free(link, ECONNRESET);
219 		break;
220 
221 	case HCI_LINK_WAIT_AUTH:
222 	case HCI_LINK_WAIT_ENCRYPT:
223 	case HCI_LINK_WAIT_SECURE:
224 	case HCI_LINK_OPEN:
225 		cp.con_handle = htole16(link->hl_handle);
226 		cp.reason = 0x13; /* "Remote User Terminated Connection" */
227 
228 		err = hci_send_cmd(link->hl_unit, HCI_CMD_DISCONNECT,
229 					&cp, sizeof(cp));
230 
231 		if (err) {
232 			DPRINTF("error %d sending HCI_CMD_DISCONNECT\n",
233 			    err);
234 		}
235 
236 		break;
237 
238 	default:
239 		UNKNOWN(link->hl_state);
240 		break;
241 	}
242 
243 out:
244 	crit_exit();
245 }
246 
247 /*
248  * Initiate any Link Mode change requests.
249  */
250 int
251 hci_acl_setmode(struct hci_link *link)
252 {
253 	int err;
254 
255 	KKASSERT(link != NULL);
256 	KKASSERT(link->hl_unit != NULL);
257 
258 	if (link->hl_state != HCI_LINK_OPEN)
259 		return EINPROGRESS;
260 
261 	if ((link->hl_flags & HCI_LINK_AUTH_REQ)
262 	    && !(link->hl_flags & HCI_LINK_AUTH)) {
263 		hci_auth_req_cp cp;
264 
265 		DPRINTF("(%s) requesting auth for handle #%d\n",
266 		    device_get_nameunit(link->hl_unit->hci_dev),
267 		    link->hl_handle);
268 
269 		link->hl_state = HCI_LINK_WAIT_AUTH;
270 		cp.con_handle = htole16(link->hl_handle);
271 		err = hci_send_cmd(link->hl_unit, HCI_CMD_AUTH_REQ,
272 				   &cp, sizeof(cp));
273 
274 		return (err == 0 ? EINPROGRESS : err);
275 	}
276 
277 	if ((link->hl_flags & HCI_LINK_ENCRYPT_REQ)
278 	    && !(link->hl_flags & HCI_LINK_ENCRYPT)) {
279 		hci_set_con_encryption_cp cp;
280 
281 		/* XXX we should check features for encryption capability */
282 
283 		DPRINTF("(%s) requesting encryption for handle #%d\n",
284 		    device_get_nameunit(link->hl_unit->hci_dev),
285 		    link->hl_handle);
286 
287 		link->hl_state = HCI_LINK_WAIT_ENCRYPT;
288 		cp.con_handle = htole16(link->hl_handle);
289 		cp.encryption_enable = 0x01;
290 
291 		err = hci_send_cmd(link->hl_unit, HCI_CMD_SET_CON_ENCRYPTION,
292 				   &cp, sizeof(cp));
293 
294 		return (err == 0 ? EINPROGRESS : err);
295 	}
296 
297 	if ((link->hl_flags & HCI_LINK_SECURE_REQ)) {
298 		hci_change_con_link_key_cp cp;
299 
300 		/* always change link key for SECURE requests */
301 		link->hl_flags &= ~HCI_LINK_SECURE;
302 
303 		DPRINTF("(%s) changing link key for handle #%d\n",
304 		    device_get_nameunit(link->hl_unit->hci_dev),
305 		    link->hl_handle);
306 
307 		link->hl_state = HCI_LINK_WAIT_SECURE;
308 		cp.con_handle = htole16(link->hl_handle);
309 
310 		err = hci_send_cmd(link->hl_unit, HCI_CMD_CHANGE_CON_LINK_KEY,
311 				   &cp, sizeof(cp));
312 
313 		return (err == 0 ? EINPROGRESS : err);
314 	}
315 
316 	return 0;
317 }
318 
319 /*
320  * Link Mode changed.
321  *
322  * This is called from event handlers when the mode change
323  * is complete. We notify upstream and restart the link.
324  */
325 void
326 hci_acl_linkmode(struct hci_link *link)
327 {
328 	struct l2cap_channel *chan, *next;
329 	int err, mode = 0;
330 
331 	DPRINTF("(%s) handle #%d, auth %s, encrypt %s, secure %s\n",
332 	    device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle,
333 	    (link->hl_flags & HCI_LINK_AUTH ? "on" : "off"),
334 	    (link->hl_flags & HCI_LINK_ENCRYPT ? "on" : "off"),
335 	    (link->hl_flags & HCI_LINK_SECURE ? "on" : "off"));
336 
337 	if (link->hl_flags & HCI_LINK_AUTH)
338 		mode |= L2CAP_LM_AUTH;
339 
340 	if (link->hl_flags & HCI_LINK_ENCRYPT)
341 		mode |= L2CAP_LM_ENCRYPT;
342 
343 	if (link->hl_flags & HCI_LINK_SECURE)
344 		mode |= L2CAP_LM_SECURE;
345 
346 	/*
347 	 * The link state will only be OPEN here if the mode change
348 	 * was successful. So, we can proceed with L2CAP connections,
349 	 * or notify already establshed channels, to allow any that
350 	 * are dissatisfied to disconnect before we restart.
351 	 */
352 	next = LIST_FIRST(&l2cap_active_list);
353 	while ((chan = next) != NULL) {
354 		next = LIST_NEXT(chan, lc_ncid);
355 
356 		if (chan->lc_link != link)
357 			continue;
358 
359 		switch(chan->lc_state) {
360 		case L2CAP_WAIT_SEND_CONNECT_REQ: /* we are connecting */
361 			if ((mode & chan->lc_mode) != chan->lc_mode) {
362 				l2cap_close(chan, ECONNABORTED);
363 				break;
364 			}
365 
366 			chan->lc_state = L2CAP_WAIT_RECV_CONNECT_RSP;
367 			err = l2cap_send_connect_req(chan);
368 			if (err) {
369 				l2cap_close(chan, err);
370 				break;
371 			}
372 			break;
373 
374 		case L2CAP_WAIT_SEND_CONNECT_RSP: /* they are connecting */
375 			if ((mode & chan->lc_mode) != chan->lc_mode) {
376 				l2cap_send_connect_rsp(link, chan->lc_ident,
377 							0, chan->lc_rcid,
378 							L2CAP_SECURITY_BLOCK);
379 
380 				l2cap_close(chan, ECONNABORTED);
381 				break;
382 			}
383 
384 			l2cap_send_connect_rsp(link, chan->lc_ident,
385 						chan->lc_lcid, chan->lc_rcid,
386 						L2CAP_SUCCESS);
387 
388 			chan->lc_state = L2CAP_WAIT_CONFIG;
389 			chan->lc_flags |= (L2CAP_WAIT_CONFIG_RSP | L2CAP_WAIT_CONFIG_REQ);
390 			err = l2cap_send_config_req(chan);
391 			if (err) {
392 				l2cap_close(chan, err);
393 				break;
394 			}
395 			break;
396 
397 		case L2CAP_WAIT_RECV_CONNECT_RSP:
398 		case L2CAP_WAIT_CONFIG:
399 		case L2CAP_OPEN: /* already established */
400 			(*chan->lc_proto->linkmode)(chan->lc_upper, mode);
401 			break;
402 
403 		default:
404 			break;
405 		}
406 	}
407 
408 	link->hl_state = HCI_LINK_OPEN;
409 	hci_acl_start(link);
410 }
411 
412 /*
413  * Receive ACL Data
414  *
415  * we accumulate packet fragments on the hci_link structure
416  * until a full L2CAP frame is ready, then send it on.
417  */
418 void
419 hci_acl_recv(struct mbuf *m, struct hci_unit *unit)
420 {
421 	struct hci_link *link;
422 	hci_acldata_hdr_t hdr;
423 	uint16_t handle, want;
424 	int pb, got;
425 
426 	KKASSERT(m != NULL);
427 	KKASSERT(unit != NULL);
428 
429 	KKASSERT(m->m_pkthdr.len >= sizeof(hdr));
430 	m_copydata(m, 0, sizeof(hdr), (caddr_t)&hdr);
431 	m_adj(m, sizeof(hdr));
432 
433 #ifdef DIAGNOSTIC
434 	if (hdr.type != HCI_ACL_DATA_PKT) {
435 		kprintf("%s: bad ACL packet type\n",
436 		    device_get_nameunit(unit->hci_dev));
437 		goto bad;
438 	}
439 
440 	if (m->m_pkthdr.len != letoh16(hdr.length)) {
441 		kprintf("%s: bad ACL packet length (%d != %d)\n",
442 		    device_get_nameunit(unit->hci_dev), m->m_pkthdr.len,
443 		    letoh16(hdr.length));
444 		goto bad;
445 	}
446 #endif
447 
448 	hdr.length = letoh16(hdr.length);
449 	hdr.con_handle = letoh16(hdr.con_handle);
450 	handle = HCI_CON_HANDLE(hdr.con_handle);
451 	pb = HCI_PB_FLAG(hdr.con_handle);
452 
453 	link = hci_link_lookup_handle(unit, handle);
454 	if (link == NULL) {
455 		hci_discon_cp cp;
456 
457 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
458 		    device_get_nameunit(unit->hci_dev), handle);
459 
460 		/*
461 		 * There is no way to find out what this connection handle is
462 		 * for, just get rid of it. This may happen, if a USB dongle
463 		 * is plugged into a self powered hub and does not reset when
464 		 * the system is shut down.
465 		 */
466 		cp.con_handle = htole16(handle);
467 		cp.reason = 0x13; /* "Remote User Terminated Connection" */
468 		hci_send_cmd(unit, HCI_CMD_DISCONNECT, &cp, sizeof(cp));
469 		goto bad;
470 	}
471 
472 	switch (pb) {
473 	case HCI_PACKET_START:
474 		if (link->hl_rxp != NULL)
475 			kprintf("%s: dropped incomplete ACL packet\n",
476 			    device_get_nameunit(unit->hci_dev));
477 
478 		if (m->m_pkthdr.len < sizeof(l2cap_hdr_t)) {
479 			kprintf("%s: short ACL packet\n",
480 			    device_get_nameunit(unit->hci_dev));
481 
482 			goto bad;
483 		}
484 
485 		link->hl_rxp = m;
486 		got = m->m_pkthdr.len;
487 		break;
488 
489 	case HCI_PACKET_FRAGMENT:
490 		if (link->hl_rxp == NULL) {
491 			kprintf("%s: unexpected packet fragment\n",
492 			    device_get_nameunit(unit->hci_dev));
493 
494 			goto bad;
495 		}
496 
497 		got = m->m_pkthdr.len + link->hl_rxp->m_pkthdr.len;
498 		m_cat(link->hl_rxp, m);
499 		m = link->hl_rxp;
500 		m->m_pkthdr.len = got;
501 		break;
502 
503 	default:
504 		kprintf("%s: unknown packet type\n",
505 		    device_get_nameunit(unit->hci_dev));
506 
507 		goto bad;
508 	}
509 
510 	m_copydata(m, 0, sizeof(want), (caddr_t)&want);
511 	want = letoh16(want) + sizeof(l2cap_hdr_t) - got;
512 
513 	if (want > 0)
514 		return;
515 
516 	link->hl_rxp = NULL;
517 
518 	if (want == 0) {
519 		l2cap_recv_frame(m, link);
520 		return;
521 	}
522 
523 bad:
524 	m_freem(m);
525 }
526 
527 /*
528  * Send ACL data on link
529  *
530  * We must fragment packets into chunks of less than unit->hci_max_acl_size and
531  * prepend a relevant ACL header to each fragment. We keep a PDU structure
532  * attached to the link, so that completed fragments can be marked off and
533  * more data requested from above once the PDU is sent.
534  */
535 int
536 hci_acl_send(struct mbuf *m, struct hci_link *link,
537 		struct l2cap_channel *chan)
538 {
539 	struct l2cap_pdu *pdu;
540 	struct mbuf *n = NULL;
541 	int plen, mlen, num = 0;
542 
543 	KKASSERT(link != NULL);
544 	KKASSERT(m != NULL);
545 	KKASSERT(m->m_flags & M_PKTHDR);
546 	KKASSERT(m->m_pkthdr.len > 0);
547 
548 	if (link->hl_state == HCI_LINK_CLOSED) {
549 		m_freem(m);
550 		return ENETDOWN;
551 	}
552 
553 	pdu = pool_get(&l2cap_pdu_pool, PR_NOWAIT);
554 	if (pdu == NULL)
555 		goto nomem;
556 
557 	bzero(pdu, sizeof *pdu);
558 	pdu->lp_chan = chan;
559 	pdu->lp_pending = 0;
560 
561 	plen = m->m_pkthdr.len;
562 	mlen = link->hl_unit->hci_max_acl_size;
563 
564 	DPRINTFN(5, "%s: handle #%d, plen = %d, max = %d\n",
565 	    device_get_nameunit(link->hl_unit->hci_dev),
566 	    link->hl_handle, plen, mlen);
567 
568 	while (plen > 0) {
569 		if (plen > mlen) {
570 			n = m_split(m, mlen, MB_DONTWAIT);
571 			if (n == NULL)
572 				goto nomem;
573 		} else {
574 			mlen = plen;
575 		}
576 
577 		if (num++ == 0)
578 			m->m_flags |= M_PROTO1;	/* tag first fragment */
579 
580 		DPRINTFN(10, "(%s) chunk of %d (plen = %d) bytes\n",
581 		    device_get_nameunit(link->hl_unit->hci_dev), mlen, plen);
582 		IF_ENQUEUE(&pdu->lp_data, m);
583 		m = n;
584 		plen -= mlen;
585 	}
586 
587 	TAILQ_INSERT_TAIL(&link->hl_txq, pdu, lp_next);
588 	link->hl_txqlen += num;
589 
590 	hci_acl_start(link);
591 
592 	return 0;
593 
594 nomem:
595 	if (m) m_freem(m);
596 	if (pdu) {
597 		IF_DRAIN(&pdu->lp_data);
598 		pool_put(&l2cap_pdu_pool, pdu);
599 	}
600 
601 	return ENOMEM;
602 }
603 
604 /*
605  * Start sending ACL data on link.
606  *
607  *	This is called when the queue may need restarting: as new data
608  * is queued, after link mode changes have completed, or when device
609  * buffers have cleared.
610  *
611  *	We may use all the available packet slots. The reason that we add
612  * the ACL encapsulation here rather than in hci_acl_send() is that L2CAP
613  * signal packets may be queued before the handle is given to us..
614  */
615 void
616 hci_acl_start(struct hci_link *link)
617 {
618 	struct hci_unit *unit;
619 	hci_acldata_hdr_t *hdr;
620 	struct l2cap_pdu *pdu;
621 	struct mbuf *m;
622 	uint16_t handle;
623 
624 	KKASSERT(link != NULL);
625 
626 	unit = link->hl_unit;
627 	KKASSERT(unit != NULL);
628 
629 	/* this is mainly to block ourselves (below) */
630 	if (link->hl_state != HCI_LINK_OPEN)
631 		return;
632 
633 	if (link->hl_txqlen == 0 || unit->hci_num_acl_pkts == 0)
634 		return;
635 
636 	/* find first PDU with data to send */
637 	pdu = TAILQ_FIRST(&link->hl_txq);
638 	for (;;) {
639 		if (pdu == NULL)
640 			return;
641 
642 		if (!IF_QEMPTY(&pdu->lp_data))
643 			break;
644 
645 		pdu = TAILQ_NEXT(pdu, lp_next);
646 	}
647 
648 	while (unit->hci_num_acl_pkts > 0) {
649 		IF_DEQUEUE(&pdu->lp_data, m);
650 		KKASSERT(m != NULL);
651 
652 		if (m->m_flags & M_PROTO1)
653 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
654 						HCI_PACKET_START, 0);
655 		else
656 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
657 						HCI_PACKET_FRAGMENT, 0);
658 
659 		M_PREPEND(m, sizeof(*hdr), MB_DONTWAIT);
660 		if (m == NULL)
661 			break;
662 
663 		hdr = mtod(m, hci_acldata_hdr_t *);
664 		hdr->type = HCI_ACL_DATA_PKT;
665 		hdr->con_handle = htole16(handle);
666 		hdr->length = htole16(m->m_pkthdr.len - sizeof(*hdr));
667 
668 		link->hl_txqlen--;
669 		pdu->lp_pending++;
670 
671 		hci_output_acl(unit, m);
672 
673 		if (IF_QEMPTY(&pdu->lp_data)) {
674 			if (pdu->lp_chan) {
675 				/*
676 				 * This should enable streaming of PDUs - when
677 				 * we have placed all the fragments on the acl
678 				 * output queue, we trigger the L2CAP layer to
679 				 * send us down one more. Use a false state so
680 				 * we dont run into ourselves coming back from
681 				 * the future..
682 				 */
683 				link->hl_state = HCI_LINK_BLOCK;
684 				l2cap_start(pdu->lp_chan);
685 				link->hl_state = HCI_LINK_OPEN;
686 			}
687 
688 			pdu = TAILQ_NEXT(pdu, lp_next);
689 			if (pdu == NULL)
690 				break;
691 		}
692 	}
693 
694 	/*
695 	 * We had our turn now, move to the back of the queue to let
696 	 * other links have a go at the output buffers..
697 	 */
698 	if (TAILQ_NEXT(link, hl_next)) {
699 		TAILQ_REMOVE(&unit->hci_links, link, hl_next);
700 		TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
701 	}
702 }
703 
704 /*
705  * Confirm ACL packets cleared from Controller buffers. We scan our PDU
706  * list to clear pending fragments and signal upstream for more data
707  * when a PDU is complete.
708  */
709 void
710 hci_acl_complete(struct hci_link *link, int num)
711 {
712 	struct l2cap_pdu *pdu;
713 	struct l2cap_channel *chan;
714 
715 	DPRINTFN(5, "(%s) handle #%d (%d)\n",
716 	    device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle, num);
717 
718 	while (num > 0) {
719 		pdu = TAILQ_FIRST(&link->hl_txq);
720 		if (pdu == NULL) {
721 			kprintf("%s: %d packets completed on handle #%x "
722 				"but none pending!\n",
723 				device_get_nameunit(link->hl_unit->hci_dev),
724 				num, link->hl_handle);
725 			return;
726 		}
727 
728 		if (num >= pdu->lp_pending) {
729 			num -= pdu->lp_pending;
730 			pdu->lp_pending = 0;
731 
732 			if (IF_QEMPTY(&pdu->lp_data)) {
733 				TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
734 				chan = pdu->lp_chan;
735 				if (chan != NULL) {
736 					chan->lc_pending--;
737 					(*chan->lc_proto->complete)
738 							(chan->lc_upper, 1);
739 
740 					if (chan->lc_pending == 0)
741 						l2cap_start(chan);
742 				}
743 
744 				pool_put(&l2cap_pdu_pool, pdu);
745 			}
746 		} else {
747 			pdu->lp_pending -= num;
748 			num = 0;
749 		}
750 	}
751 }
752 
753 /*******************************************************************************
754  *
755  *	HCI SCO Connections
756  */
757 
758 /*
759  * Incoming SCO Connection. We check the list for anybody willing
760  * to take it.
761  */
762 struct hci_link *
763 hci_sco_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
764 {
765 	struct sockaddr_bt laddr, raddr;
766 	struct sco_pcb *pcb, *new;
767 	struct hci_link *sco, *acl;
768 
769 	memset(&laddr, 0, sizeof(laddr));
770 	laddr.bt_len = sizeof(laddr);
771 	laddr.bt_family = AF_BLUETOOTH;
772 	bdaddr_copy(&laddr.bt_bdaddr, &unit->hci_bdaddr);
773 
774 	memset(&raddr, 0, sizeof(raddr));
775 	raddr.bt_len = sizeof(raddr);
776 	raddr.bt_family = AF_BLUETOOTH;
777 	bdaddr_copy(&raddr.bt_bdaddr, bdaddr);
778 
779 	/*
780 	 * There should already be an ACL link up and running before
781 	 * the controller sends us SCO connection requests, but you
782 	 * never know..
783 	 */
784 	acl = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
785 	if (acl == NULL || acl->hl_state != HCI_LINK_OPEN)
786 		return NULL;
787 
788 	LIST_FOREACH(pcb, &sco_pcb, sp_next) {
789 		if ((pcb->sp_flags & SP_LISTENING) == 0)
790 			continue;
791 
792 		new = (*pcb->sp_proto->newconn)(pcb->sp_upper, &laddr, &raddr);
793 		if (new == NULL)
794 			continue;
795 
796 		/*
797 		 * Ok, got new pcb so we can start a new link and fill
798 		 * in all the details.
799 		 */
800 		bdaddr_copy(&new->sp_laddr, &unit->hci_bdaddr);
801 		bdaddr_copy(&new->sp_raddr, bdaddr);
802 
803 		sco = hci_link_alloc(unit);
804 		if (sco == NULL) {
805 			sco_detach(&new);
806 			return NULL;
807 		}
808 
809 		sco->hl_type = HCI_LINK_SCO;
810 		bdaddr_copy(&sco->hl_bdaddr, bdaddr);
811 
812 		sco->hl_link = hci_acl_open(unit, bdaddr);
813 		KKASSERT(sco->hl_link == acl);
814 
815 		sco->hl_sco = new;
816 		new->sp_link = sco;
817 
818 		new->sp_mtu = unit->hci_max_sco_size;
819 		return sco;
820 	}
821 
822 	return NULL;
823 }
824 
825 /*
826  * receive SCO packet, we only need to strip the header and send
827  * it to the right handler
828  */
829 void
830 hci_sco_recv(struct mbuf *m, struct hci_unit *unit)
831 {
832 	struct hci_link *link;
833 	hci_scodata_hdr_t hdr;
834 	uint16_t handle;
835 
836 	KKASSERT(m != NULL);
837 	KKASSERT(unit != NULL);
838 
839 	KKASSERT(m->m_pkthdr.len >= sizeof(hdr));
840 	m_copydata(m, 0, sizeof(hdr), (caddr_t)&hdr);
841 	m_adj(m, sizeof(hdr));
842 
843 #ifdef DIAGNOSTIC
844 	if (hdr.type != HCI_SCO_DATA_PKT) {
845 		kprintf("%s: bad SCO packet type\n",
846 		    device_get_nameunit(unit->hci_dev));
847 		goto bad;
848 	}
849 
850 	if (m->m_pkthdr.len != hdr.length) {
851 		kprintf("%s: bad SCO packet length (%d != %d)\n",
852 		    device_get_nameunit(unit->hci_dev), m->m_pkthdr.len,
853 		    hdr.length);
854 		goto bad;
855 	}
856 #endif
857 
858 	hdr.con_handle = letoh16(hdr.con_handle);
859 	handle = HCI_CON_HANDLE(hdr.con_handle);
860 
861 	link = hci_link_lookup_handle(unit, handle);
862 	if (link == NULL || link->hl_type == HCI_LINK_ACL) {
863 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
864 		    device_get_nameunit(unit->hci_dev), handle);
865 
866 		goto bad;
867 	}
868 
869 	(*link->hl_sco->sp_proto->input)(link->hl_sco->sp_upper, m);
870 	return;
871 
872 bad:
873 	m_freem(m);
874 }
875 
876 void
877 hci_sco_start(struct hci_link *link)
878 {
879 }
880 
881 /*
882  * SCO packets have completed at the controller, so we can
883  * signal up to free the buffer space.
884  */
885 void
886 hci_sco_complete(struct hci_link *link, int num)
887 {
888 
889 	DPRINTFN(5, "handle #%d (num=%d)\n", link->hl_handle, num);
890 	link->hl_sco->sp_pending--;
891 	(*link->hl_sco->sp_proto->complete)(link->hl_sco->sp_upper, num);
892 }
893 
894 /*******************************************************************************
895  *
896  *	Generic HCI Connection alloc/free/lookup etc
897  */
898 
899 struct hci_link *
900 hci_link_alloc(struct hci_unit *unit)
901 {
902 	struct hci_link *link;
903 
904 	KKASSERT(unit != NULL);
905 
906 	link = kmalloc(sizeof *link, M_BLUETOOTH, M_NOWAIT | M_ZERO);
907 	if (link == NULL)
908 		return NULL;
909 
910 	link->hl_unit = unit;
911 	link->hl_state = HCI_LINK_CLOSED;
912 
913 	/* init ACL portion */
914 	callout_init(&link->hl_expire);
915 
916 	crit_enter();
917 	TAILQ_INIT(&link->hl_txq);	/* outgoing packets */
918 	TAILQ_INIT(&link->hl_reqs);	/* request queue */
919 
920 	link->hl_mtu = L2CAP_MTU_DEFAULT;		/* L2CAP signal mtu */
921 	link->hl_flush = L2CAP_FLUSH_TIMO_DEFAULT;	/* flush timeout */
922 
923 	/* init SCO portion */
924 	/* &link->hl_data is already zero-initialized. */
925 
926 	/* attach to unit */
927 	TAILQ_INSERT_HEAD(&unit->hci_links, link, hl_next);
928 	crit_exit();
929 	return link;
930 }
931 
932 void
933 hci_link_free(struct hci_link *link, int err)
934 {
935 	struct l2cap_req *req;
936 	struct l2cap_pdu *pdu;
937 	struct l2cap_channel *chan, *next;
938 
939 	KKASSERT(link != NULL);
940 
941 	DPRINTF("(%s) #%d, type = %d, state = %d, refcnt = %d\n",
942 	    device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle,
943 	    link->hl_type, link->hl_state, link->hl_refcnt);
944 
945 	/* ACL reference count */
946 	if (link->hl_refcnt > 0) {
947 		next = LIST_FIRST(&l2cap_active_list);
948 		while ((chan = next) != NULL) {
949 			next = LIST_NEXT(chan, lc_ncid);
950 			if (chan->lc_link == link)
951 				l2cap_close(chan, err);
952 		}
953 	}
954 	KKASSERT(link->hl_refcnt == 0);
955 
956 	/* ACL L2CAP requests.. */
957 	while ((req = TAILQ_FIRST(&link->hl_reqs)) != NULL)
958 		l2cap_request_free(req);
959 
960 	KKASSERT(TAILQ_EMPTY(&link->hl_reqs));
961 
962 	/* ACL outgoing data queue */
963 	while ((pdu = TAILQ_FIRST(&link->hl_txq)) != NULL) {
964 		TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
965 		IF_DRAIN(&pdu->lp_data);
966 		if (pdu->lp_pending)
967 			link->hl_unit->hci_num_acl_pkts += pdu->lp_pending;
968 
969 		pool_put(&l2cap_pdu_pool, pdu);
970 	}
971 
972 	KKASSERT(TAILQ_EMPTY(&link->hl_txq));
973 
974 	/* ACL incoming data packet */
975 	if (link->hl_rxp != NULL) {
976 		m_freem(link->hl_rxp);
977 		link->hl_rxp = NULL;
978 	}
979 
980 	/* SCO master ACL link */
981 	if (link->hl_link != NULL) {
982 		hci_acl_close(link->hl_link, err);
983 		link->hl_link = NULL;
984 	}
985 
986 	/* SCO pcb */
987 	if (link->hl_sco != NULL) {
988 		struct sco_pcb *pcb;
989 
990 		pcb = link->hl_sco;
991 		pcb->sp_link = NULL;
992 		link->hl_sco = NULL;
993 		(*pcb->sp_proto->disconnected)(pcb->sp_upper, err);
994 	}
995 
996 	/* flush any SCO data */
997 	crit_enter();
998 	IF_DRAIN(&link->hl_data);
999 	crit_exit();
1000 
1001 	/*
1002 	 * Halt the timeout - if its already running we cannot free the
1003 	 * link structure but the timeout function will call us back in
1004 	 * any case.
1005 	 */
1006 	link->hl_state = HCI_LINK_CLOSED;
1007 	callout_stop(&link->hl_expire);
1008 	if (callout_active(&link->hl_expire))
1009 		return;
1010 
1011 	/*
1012 	 * If we made a note of clock offset, keep it in a memo
1013 	 * to facilitate reconnections to this device
1014 	 */
1015 	if (link->hl_clock != 0) {
1016 		struct hci_memo *memo;
1017 
1018 		memo = hci_memo_new(link->hl_unit, &link->hl_bdaddr);
1019 		if (memo != NULL)
1020 			memo->clock_offset = link->hl_clock;
1021 	}
1022 
1023 	crit_enter();
1024 	TAILQ_REMOVE(&link->hl_unit->hci_links, link, hl_next);
1025 	crit_exit();
1026 	kfree(link, M_BLUETOOTH);
1027 }
1028 
1029 /*
1030  * Lookup HCI link by type and state.
1031  */
1032 struct hci_link *
1033 hci_link_lookup_state(struct hci_unit *unit, uint16_t type, uint16_t state)
1034 {
1035 	struct hci_link *link;
1036 
1037 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1038 		if (link->hl_type == type && link->hl_state == state)
1039 			break;
1040 	}
1041 
1042 	return link;
1043 }
1044 
1045 /*
1046  * Lookup HCI link by address and type. Note that for SCO links there may
1047  * be more than one link per address, so we only return links with no
1048  * handle (ie new links)
1049  */
1050 struct hci_link *
1051 hci_link_lookup_bdaddr(struct hci_unit *unit, bdaddr_t *bdaddr, uint16_t type)
1052 {
1053 	struct hci_link *link;
1054 
1055 	KKASSERT(unit != NULL);
1056 	KKASSERT(bdaddr != NULL);
1057 
1058 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1059 		if (link->hl_type != type)
1060 			continue;
1061 
1062 		if (type == HCI_LINK_SCO && link->hl_handle != 0)
1063 			continue;
1064 
1065 		if (bdaddr_same(&link->hl_bdaddr, bdaddr))
1066 			break;
1067 	}
1068 
1069 	return link;
1070 }
1071 
1072 struct hci_link *
1073 hci_link_lookup_handle(struct hci_unit *unit, uint16_t handle)
1074 {
1075 	struct hci_link *link;
1076 
1077 	KKASSERT(unit != NULL);
1078 
1079 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1080 		if (handle == link->hl_handle)
1081 			break;
1082 	}
1083 
1084 	return link;
1085 }
1086