xref: /dragonfly/sys/netbt/hci_link.c (revision 5c694678)
1 /* $OpenBSD: src/sys/netbt/hci_link.c,v 1.7 2008/02/24 21:34:48 uwe Exp $ */
2 /* $NetBSD: hci_link.c,v 1.16 2007/11/10 23:12:22 plunky Exp $ */
3 
4 /*-
5  * Copyright (c) 2005 Iain Hibbert.
6  * Copyright (c) 2006 Itronix Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of Itronix Inc. may not be used to endorse
18  *    or promote products derived from this software without specific
19  *    prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
25  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28  * ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/proc.h>
39 #include <sys/queue.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/callout.h>
43 #include <net/if.h>
44 #include <sys/bus.h>
45 
46 #include <netbt/bluetooth.h>
47 #include <netbt/hci.h>
48 #include <netbt/l2cap.h>
49 #include <netbt/sco.h>
50 
51 /*******************************************************************************
52  *
53  *	HCI ACL Connections
54  */
55 
56 /*
57  * Automatically expire unused ACL connections after this number of
58  * seconds (if zero, do not expire unused connections) [sysctl]
59  */
60 int hci_acl_expiry = 10;	/* seconds */
61 
62 /*
63  * hci_acl_open(unit, bdaddr)
64  *
65  * open ACL connection to remote bdaddr. Only one ACL connection is permitted
66  * between any two Bluetooth devices, so we look for an existing one before
67  * trying to start a new one.
68  */
69 struct hci_link *
70 hci_acl_open(struct hci_unit *unit, bdaddr_t *bdaddr)
71 {
72 	struct hci_link *link;
73 	struct hci_memo *memo;
74 	hci_create_con_cp cp;
75 	int err;
76 
77 	KKASSERT(unit != NULL);
78 	KKASSERT(bdaddr != NULL);
79 
80 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
81 	if (link == NULL) {
82 		link = hci_link_alloc(unit);
83 		if (link == NULL)
84 			return NULL;
85 
86 		link->hl_type = HCI_LINK_ACL;
87 		bdaddr_copy(&link->hl_bdaddr, bdaddr);
88 	}
89 
90 	switch(link->hl_state) {
91 	case HCI_LINK_CLOSED:
92 		/*
93 		 * open connection to remote device
94 		 */
95 		memset(&cp, 0, sizeof(cp));
96 		bdaddr_copy(&cp.bdaddr, bdaddr);
97 		cp.pkt_type = htole16(unit->hci_packet_type);
98 
99 		memo = hci_memo_find(unit, bdaddr);
100 		if (memo != NULL) {
101 			cp.page_scan_rep_mode = memo->page_scan_rep_mode;
102 			cp.page_scan_mode = memo->page_scan_mode;
103 			cp.clock_offset = memo->clock_offset;
104 		}
105 
106 		if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
107 			cp.accept_role_switch = 1;
108 
109 		err = hci_send_cmd(unit, HCI_CMD_CREATE_CON, &cp, sizeof(cp));
110 		if (err) {
111 			hci_link_free(link, err);
112 			return NULL;
113 		}
114 
115 		link->hl_state = HCI_LINK_WAIT_CONNECT;
116 		break;
117 
118 	case HCI_LINK_WAIT_CONNECT:
119 	case HCI_LINK_WAIT_AUTH:
120 	case HCI_LINK_WAIT_ENCRYPT:
121 	case HCI_LINK_WAIT_SECURE:
122 		/*
123 		 * somebody else already trying to connect, we just
124 		 * sit on the bench with them..
125 		 */
126 		break;
127 
128 	case HCI_LINK_OPEN:
129 		/*
130 		 * If already open, halt any expiry callouts. We dont need
131 		 * to care about already invoking callouts since refcnt >0
132 		 * will keep the link alive.
133 		 */
134 		callout_stop(&link->hl_expire);
135 		break;
136 
137 	default:
138 		UNKNOWN(link->hl_state);
139 		return NULL;
140 	}
141 
142 	/* open */
143 	link->hl_refcnt++;
144 
145 	return link;
146 }
147 
148 /*
149  * Close ACL connection. When there are no more references to this link,
150  * we can either close it down or schedule a delayed closedown.
151  */
152 void
153 hci_acl_close(struct hci_link *link, int err)
154 {
155 	KKASSERT(link != NULL);
156 
157 	if (--link->hl_refcnt == 0) {
158 		if (link->hl_state == HCI_LINK_CLOSED)
159 			hci_link_free(link, err);
160 		else if (hci_acl_expiry > 0)
161 			callout_reset(&link->hl_expire, hci_acl_expiry * hz,
162 			    hci_acl_timeout, link);
163 	}
164 }
165 
166 /*
167  * Incoming ACL connection.
168  *
169  * For now, we accept all connections but it would be better to check
170  * the L2CAP listen list and only accept when there is a listener
171  * available.
172  *
173  * There should not be a link to the same bdaddr already, we check
174  * anyway though its left unhandled for now.
175  */
176 struct hci_link *
177 hci_acl_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
178 {
179 	struct hci_link *link;
180 
181 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
182 	if (link != NULL)
183 		return NULL;
184 
185 	link = hci_link_alloc(unit);
186 	if (link != NULL) {
187 		link->hl_state = HCI_LINK_WAIT_CONNECT;
188 		link->hl_type = HCI_LINK_ACL;
189 		bdaddr_copy(&link->hl_bdaddr, bdaddr);
190 
191 		if (hci_acl_expiry > 0)
192 			callout_reset(&link->hl_expire, hci_acl_expiry * hz,
193 			    hci_acl_timeout, link);
194 	}
195 
196 	return link;
197 }
198 
199 void
200 hci_acl_timeout(void *arg)
201 {
202 	struct hci_link *link = arg;
203 	hci_discon_cp cp;
204 	int err;
205 
206 	crit_enter();
207 
208 	if (link->hl_refcnt > 0)
209 		goto out;
210 
211 	DPRINTF("link #%d expired\n", link->hl_handle);
212 
213 	switch (link->hl_state) {
214 	case HCI_LINK_CLOSED:
215 	case HCI_LINK_WAIT_CONNECT:
216 		hci_link_free(link, ECONNRESET);
217 		break;
218 
219 	case HCI_LINK_WAIT_AUTH:
220 	case HCI_LINK_WAIT_ENCRYPT:
221 	case HCI_LINK_WAIT_SECURE:
222 	case HCI_LINK_OPEN:
223 		cp.con_handle = htole16(link->hl_handle);
224 		cp.reason = 0x13; /* "Remote User Terminated Connection" */
225 
226 		err = hci_send_cmd(link->hl_unit, HCI_CMD_DISCONNECT,
227 					&cp, sizeof(cp));
228 
229 		if (err) {
230 			DPRINTF("error %d sending HCI_CMD_DISCONNECT\n",
231 			    err);
232 		}
233 
234 		break;
235 
236 	default:
237 		UNKNOWN(link->hl_state);
238 		break;
239 	}
240 
241 out:
242 	crit_exit();
243 }
244 
245 /*
246  * Initiate any Link Mode change requests.
247  */
248 int
249 hci_acl_setmode(struct hci_link *link)
250 {
251 	int err;
252 
253 	KKASSERT(link != NULL);
254 	KKASSERT(link->hl_unit != NULL);
255 
256 	if (link->hl_state != HCI_LINK_OPEN)
257 		return EINPROGRESS;
258 
259 	if ((link->hl_flags & HCI_LINK_AUTH_REQ)
260 	    && !(link->hl_flags & HCI_LINK_AUTH)) {
261 		hci_auth_req_cp cp;
262 
263 		DPRINTF("(%s) requesting auth for handle #%d\n",
264 		    device_get_nameunit(link->hl_unit->hci_dev),
265 		    link->hl_handle);
266 
267 		link->hl_state = HCI_LINK_WAIT_AUTH;
268 		cp.con_handle = htole16(link->hl_handle);
269 		err = hci_send_cmd(link->hl_unit, HCI_CMD_AUTH_REQ,
270 				   &cp, sizeof(cp));
271 
272 		return (err == 0 ? EINPROGRESS : err);
273 	}
274 
275 	if ((link->hl_flags & HCI_LINK_ENCRYPT_REQ)
276 	    && !(link->hl_flags & HCI_LINK_ENCRYPT)) {
277 		hci_set_con_encryption_cp cp;
278 
279 		/* XXX we should check features for encryption capability */
280 
281 		DPRINTF("(%s) requesting encryption for handle #%d\n",
282 		    device_get_nameunit(link->hl_unit->hci_dev),
283 		    link->hl_handle);
284 
285 		link->hl_state = HCI_LINK_WAIT_ENCRYPT;
286 		cp.con_handle = htole16(link->hl_handle);
287 		cp.encryption_enable = 0x01;
288 
289 		err = hci_send_cmd(link->hl_unit, HCI_CMD_SET_CON_ENCRYPTION,
290 				   &cp, sizeof(cp));
291 
292 		return (err == 0 ? EINPROGRESS : err);
293 	}
294 
295 	if ((link->hl_flags & HCI_LINK_SECURE_REQ)) {
296 		hci_change_con_link_key_cp cp;
297 
298 		/* always change link key for SECURE requests */
299 		link->hl_flags &= ~HCI_LINK_SECURE;
300 
301 		DPRINTF("(%s) changing link key for handle #%d\n",
302 		    device_get_nameunit(link->hl_unit->hci_dev),
303 		    link->hl_handle);
304 
305 		link->hl_state = HCI_LINK_WAIT_SECURE;
306 		cp.con_handle = htole16(link->hl_handle);
307 
308 		err = hci_send_cmd(link->hl_unit, HCI_CMD_CHANGE_CON_LINK_KEY,
309 				   &cp, sizeof(cp));
310 
311 		return (err == 0 ? EINPROGRESS : err);
312 	}
313 
314 	return 0;
315 }
316 
317 /*
318  * Link Mode changed.
319  *
320  * This is called from event handlers when the mode change
321  * is complete. We notify upstream and restart the link.
322  */
323 void
324 hci_acl_linkmode(struct hci_link *link)
325 {
326 	struct l2cap_channel *chan, *next;
327 	int err, mode = 0;
328 
329 	DPRINTF("(%s) handle #%d, auth %s, encrypt %s, secure %s\n",
330 	    device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle,
331 	    (link->hl_flags & HCI_LINK_AUTH ? "on" : "off"),
332 	    (link->hl_flags & HCI_LINK_ENCRYPT ? "on" : "off"),
333 	    (link->hl_flags & HCI_LINK_SECURE ? "on" : "off"));
334 
335 	if (link->hl_flags & HCI_LINK_AUTH)
336 		mode |= L2CAP_LM_AUTH;
337 
338 	if (link->hl_flags & HCI_LINK_ENCRYPT)
339 		mode |= L2CAP_LM_ENCRYPT;
340 
341 	if (link->hl_flags & HCI_LINK_SECURE)
342 		mode |= L2CAP_LM_SECURE;
343 
344 	/*
345 	 * The link state will only be OPEN here if the mode change
346 	 * was successful. So, we can proceed with L2CAP connections,
347 	 * or notify already establshed channels, to allow any that
348 	 * are dissatisfied to disconnect before we restart.
349 	 */
350 	next = LIST_FIRST(&l2cap_active_list);
351 	while ((chan = next) != NULL) {
352 		next = LIST_NEXT(chan, lc_ncid);
353 
354 		if (chan->lc_link != link)
355 			continue;
356 
357 		switch(chan->lc_state) {
358 		case L2CAP_WAIT_SEND_CONNECT_REQ: /* we are connecting */
359 			if ((mode & chan->lc_mode) != chan->lc_mode) {
360 				l2cap_close(chan, ECONNABORTED);
361 				break;
362 			}
363 
364 			chan->lc_state = L2CAP_WAIT_RECV_CONNECT_RSP;
365 			err = l2cap_send_connect_req(chan);
366 			if (err) {
367 				l2cap_close(chan, err);
368 				break;
369 			}
370 			break;
371 
372 		case L2CAP_WAIT_SEND_CONNECT_RSP: /* they are connecting */
373 			if ((mode & chan->lc_mode) != chan->lc_mode) {
374 				l2cap_send_connect_rsp(link, chan->lc_ident,
375 							0, chan->lc_rcid,
376 							L2CAP_SECURITY_BLOCK);
377 
378 				l2cap_close(chan, ECONNABORTED);
379 				break;
380 			}
381 
382 			l2cap_send_connect_rsp(link, chan->lc_ident,
383 						chan->lc_lcid, chan->lc_rcid,
384 						L2CAP_SUCCESS);
385 
386 			chan->lc_state = L2CAP_WAIT_CONFIG;
387 			chan->lc_flags |= (L2CAP_WAIT_CONFIG_RSP | L2CAP_WAIT_CONFIG_REQ);
388 			err = l2cap_send_config_req(chan);
389 			if (err) {
390 				l2cap_close(chan, err);
391 				break;
392 			}
393 			break;
394 
395 		case L2CAP_WAIT_RECV_CONNECT_RSP:
396 		case L2CAP_WAIT_CONFIG:
397 		case L2CAP_OPEN: /* already established */
398 			(*chan->lc_proto->linkmode)(chan->lc_upper, mode);
399 			break;
400 
401 		default:
402 			break;
403 		}
404 	}
405 
406 	link->hl_state = HCI_LINK_OPEN;
407 	hci_acl_start(link);
408 }
409 
410 /*
411  * Receive ACL Data
412  *
413  * we accumulate packet fragments on the hci_link structure
414  * until a full L2CAP frame is ready, then send it on.
415  */
416 void
417 hci_acl_recv(struct mbuf *m, struct hci_unit *unit)
418 {
419 	struct hci_link *link;
420 	hci_acldata_hdr_t hdr;
421 	uint16_t handle, want;
422 	int pb, got;
423 
424 	KKASSERT(m != NULL);
425 	KKASSERT(unit != NULL);
426 
427 	KKASSERT(m->m_pkthdr.len >= sizeof(hdr));
428 	m_copydata(m, 0, sizeof(hdr), &hdr);
429 	m_adj(m, sizeof(hdr));
430 
431 #ifdef DIAGNOSTIC
432 	if (hdr.type != HCI_ACL_DATA_PKT) {
433 		kprintf("%s: bad ACL packet type\n",
434 		    device_get_nameunit(unit->hci_dev));
435 		goto bad;
436 	}
437 
438 	if (m->m_pkthdr.len != letoh16(hdr.length)) {
439 		kprintf("%s: bad ACL packet length (%d != %d)\n",
440 		    device_get_nameunit(unit->hci_dev), m->m_pkthdr.len,
441 		    letoh16(hdr.length));
442 		goto bad;
443 	}
444 #endif
445 
446 	hdr.length = letoh16(hdr.length);
447 	hdr.con_handle = letoh16(hdr.con_handle);
448 	handle = HCI_CON_HANDLE(hdr.con_handle);
449 	pb = HCI_PB_FLAG(hdr.con_handle);
450 
451 	link = hci_link_lookup_handle(unit, handle);
452 	if (link == NULL) {
453 		hci_discon_cp cp;
454 
455 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
456 		    device_get_nameunit(unit->hci_dev), handle);
457 
458 		/*
459 		 * There is no way to find out what this connection handle is
460 		 * for, just get rid of it. This may happen, if a USB dongle
461 		 * is plugged into a self powered hub and does not reset when
462 		 * the system is shut down.
463 		 */
464 		cp.con_handle = htole16(handle);
465 		cp.reason = 0x13; /* "Remote User Terminated Connection" */
466 		hci_send_cmd(unit, HCI_CMD_DISCONNECT, &cp, sizeof(cp));
467 		goto bad;
468 	}
469 
470 	switch (pb) {
471 	case HCI_PACKET_START:
472 		if (link->hl_rxp != NULL)
473 			kprintf("%s: dropped incomplete ACL packet\n",
474 			    device_get_nameunit(unit->hci_dev));
475 
476 		if (m->m_pkthdr.len < sizeof(l2cap_hdr_t)) {
477 			kprintf("%s: short ACL packet\n",
478 			    device_get_nameunit(unit->hci_dev));
479 
480 			goto bad;
481 		}
482 
483 		link->hl_rxp = m;
484 		got = m->m_pkthdr.len;
485 		break;
486 
487 	case HCI_PACKET_FRAGMENT:
488 		if (link->hl_rxp == NULL) {
489 			kprintf("%s: unexpected packet fragment\n",
490 			    device_get_nameunit(unit->hci_dev));
491 
492 			goto bad;
493 		}
494 
495 		got = m->m_pkthdr.len + link->hl_rxp->m_pkthdr.len;
496 		m_cat(link->hl_rxp, m);
497 		m = link->hl_rxp;
498 		m->m_pkthdr.len = got;
499 		break;
500 
501 	default:
502 		kprintf("%s: unknown packet type\n",
503 		    device_get_nameunit(unit->hci_dev));
504 
505 		goto bad;
506 	}
507 
508 	m_copydata(m, 0, sizeof(want), &want);
509 	want = letoh16(want) + sizeof(l2cap_hdr_t) - got;
510 
511 	if (want > 0)
512 		return;
513 
514 	link->hl_rxp = NULL;
515 
516 	if (want == 0) {
517 		l2cap_recv_frame(m, link);
518 		return;
519 	}
520 
521 bad:
522 	m_freem(m);
523 }
524 
525 /*
526  * Send ACL data on link
527  *
528  * We must fragment packets into chunks of less than unit->hci_max_acl_size and
529  * prepend a relevant ACL header to each fragment. We keep a PDU structure
530  * attached to the link, so that completed fragments can be marked off and
531  * more data requested from above once the PDU is sent.
532  */
533 int
534 hci_acl_send(struct mbuf *m, struct hci_link *link,
535 		struct l2cap_channel *chan)
536 {
537 	struct l2cap_pdu *pdu;
538 	struct mbuf *n = NULL;
539 	int plen, mlen, num = 0;
540 
541 	KKASSERT(link != NULL);
542 	KKASSERT(m != NULL);
543 	KKASSERT(m->m_flags & M_PKTHDR);
544 	KKASSERT(m->m_pkthdr.len > 0);
545 
546 	if (link->hl_state == HCI_LINK_CLOSED) {
547 		m_freem(m);
548 		return ENETDOWN;
549 	}
550 
551 	pdu = zalloc(l2cap_pdu_pool);
552 	if (pdu == NULL)
553 		goto nomem;
554 
555 	bzero(pdu, sizeof *pdu);
556 	pdu->lp_chan = chan;
557 	pdu->lp_pending = 0;
558 
559 	plen = m->m_pkthdr.len;
560 	mlen = link->hl_unit->hci_max_acl_size;
561 
562 	DPRINTFN(5, "%s: handle #%d, plen = %d, max = %d\n",
563 	    device_get_nameunit(link->hl_unit->hci_dev),
564 	    link->hl_handle, plen, mlen);
565 
566 	while (plen > 0) {
567 		if (plen > mlen) {
568 			n = m_split(m, mlen, M_NOWAIT);
569 			if (n == NULL)
570 				goto nomem;
571 		} else {
572 			mlen = plen;
573 		}
574 
575 		if (num++ == 0)
576 			m->m_flags |= M_PROTO1;	/* tag first fragment */
577 
578 		DPRINTFN(10, "(%s) chunk of %d (plen = %d) bytes\n",
579 		    device_get_nameunit(link->hl_unit->hci_dev), mlen, plen);
580 		IF_ENQUEUE(&pdu->lp_data, m);
581 		m = n;
582 		plen -= mlen;
583 	}
584 
585 	TAILQ_INSERT_TAIL(&link->hl_txq, pdu, lp_next);
586 	link->hl_txqlen += num;
587 
588 	hci_acl_start(link);
589 
590 	return 0;
591 
592 nomem:
593 	if (m) m_freem(m);
594 	if (pdu) {
595 		IF_DRAIN(&pdu->lp_data);
596 		zfree(l2cap_pdu_pool, pdu);
597 	}
598 
599 	return ENOMEM;
600 }
601 
602 /*
603  * Start sending ACL data on link.
604  *
605  *	This is called when the queue may need restarting: as new data
606  * is queued, after link mode changes have completed, or when device
607  * buffers have cleared.
608  *
609  *	We may use all the available packet slots. The reason that we add
610  * the ACL encapsulation here rather than in hci_acl_send() is that L2CAP
611  * signal packets may be queued before the handle is given to us..
612  */
613 void
614 hci_acl_start(struct hci_link *link)
615 {
616 	struct hci_unit *unit;
617 	hci_acldata_hdr_t *hdr;
618 	struct l2cap_pdu *pdu;
619 	struct mbuf *m;
620 	uint16_t handle;
621 
622 	KKASSERT(link != NULL);
623 
624 	unit = link->hl_unit;
625 	KKASSERT(unit != NULL);
626 
627 	/* this is mainly to block ourselves (below) */
628 	if (link->hl_state != HCI_LINK_OPEN)
629 		return;
630 
631 	if (link->hl_txqlen == 0 || unit->hci_num_acl_pkts == 0)
632 		return;
633 
634 	/* find first PDU with data to send */
635 	pdu = TAILQ_FIRST(&link->hl_txq);
636 	for (;;) {
637 		if (pdu == NULL)
638 			return;
639 
640 		if (!IF_QEMPTY(&pdu->lp_data))
641 			break;
642 
643 		pdu = TAILQ_NEXT(pdu, lp_next);
644 	}
645 
646 	while (unit->hci_num_acl_pkts > 0) {
647 		IF_DEQUEUE(&pdu->lp_data, m);
648 		KKASSERT(m != NULL);
649 
650 		if (m->m_flags & M_PROTO1)
651 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
652 						HCI_PACKET_START, 0);
653 		else
654 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
655 						HCI_PACKET_FRAGMENT, 0);
656 
657 		M_PREPEND(m, sizeof(*hdr), M_NOWAIT);
658 		if (m == NULL)
659 			break;
660 
661 		hdr = mtod(m, hci_acldata_hdr_t *);
662 		hdr->type = HCI_ACL_DATA_PKT;
663 		hdr->con_handle = htole16(handle);
664 		hdr->length = htole16(m->m_pkthdr.len - sizeof(*hdr));
665 
666 		link->hl_txqlen--;
667 		pdu->lp_pending++;
668 
669 		hci_output_acl(unit, m);
670 
671 		if (IF_QEMPTY(&pdu->lp_data)) {
672 			if (pdu->lp_chan) {
673 				/*
674 				 * This should enable streaming of PDUs - when
675 				 * we have placed all the fragments on the acl
676 				 * output queue, we trigger the L2CAP layer to
677 				 * send us down one more. Use a false state so
678 				 * we dont run into ourselves coming back from
679 				 * the future..
680 				 */
681 				link->hl_state = HCI_LINK_BLOCK;
682 				l2cap_start(pdu->lp_chan);
683 				link->hl_state = HCI_LINK_OPEN;
684 			}
685 
686 			pdu = TAILQ_NEXT(pdu, lp_next);
687 			if (pdu == NULL)
688 				break;
689 		}
690 	}
691 
692 	/*
693 	 * We had our turn now, move to the back of the queue to let
694 	 * other links have a go at the output buffers..
695 	 */
696 	if (TAILQ_NEXT(link, hl_next)) {
697 		TAILQ_REMOVE(&unit->hci_links, link, hl_next);
698 		TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
699 	}
700 }
701 
702 /*
703  * Confirm ACL packets cleared from Controller buffers. We scan our PDU
704  * list to clear pending fragments and signal upstream for more data
705  * when a PDU is complete.
706  */
707 void
708 hci_acl_complete(struct hci_link *link, int num)
709 {
710 	struct l2cap_pdu *pdu;
711 	struct l2cap_channel *chan;
712 
713 	DPRINTFN(5, "(%s) handle #%d (%d)\n",
714 	    device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle, num);
715 
716 	while (num > 0) {
717 		pdu = TAILQ_FIRST(&link->hl_txq);
718 		if (pdu == NULL) {
719 			kprintf("%s: %d packets completed on handle #%x "
720 				"but none pending!\n",
721 				device_get_nameunit(link->hl_unit->hci_dev),
722 				num, link->hl_handle);
723 			return;
724 		}
725 
726 		if (num >= pdu->lp_pending) {
727 			num -= pdu->lp_pending;
728 			pdu->lp_pending = 0;
729 
730 			if (IF_QEMPTY(&pdu->lp_data)) {
731 				TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
732 				chan = pdu->lp_chan;
733 				if (chan != NULL) {
734 					chan->lc_pending--;
735 					(*chan->lc_proto->complete)
736 							(chan->lc_upper, 1);
737 
738 					if (chan->lc_pending == 0)
739 						l2cap_start(chan);
740 				}
741 
742 				zfree(l2cap_pdu_pool, pdu);
743 			}
744 		} else {
745 			pdu->lp_pending -= num;
746 			num = 0;
747 		}
748 	}
749 }
750 
751 /*******************************************************************************
752  *
753  *	HCI SCO Connections
754  */
755 
756 /*
757  * Incoming SCO Connection. We check the list for anybody willing
758  * to take it.
759  */
760 struct hci_link *
761 hci_sco_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
762 {
763 	struct sockaddr_bt laddr, raddr;
764 	struct sco_pcb *pcb, *new;
765 	struct hci_link *sco, *acl;
766 
767 	memset(&laddr, 0, sizeof(laddr));
768 	laddr.bt_len = sizeof(laddr);
769 	laddr.bt_family = AF_BLUETOOTH;
770 	bdaddr_copy(&laddr.bt_bdaddr, &unit->hci_bdaddr);
771 
772 	memset(&raddr, 0, sizeof(raddr));
773 	raddr.bt_len = sizeof(raddr);
774 	raddr.bt_family = AF_BLUETOOTH;
775 	bdaddr_copy(&raddr.bt_bdaddr, bdaddr);
776 
777 	/*
778 	 * There should already be an ACL link up and running before
779 	 * the controller sends us SCO connection requests, but you
780 	 * never know..
781 	 */
782 	acl = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
783 	if (acl == NULL || acl->hl_state != HCI_LINK_OPEN)
784 		return NULL;
785 
786 	LIST_FOREACH(pcb, &sco_pcb, sp_next) {
787 		if ((pcb->sp_flags & SP_LISTENING) == 0)
788 			continue;
789 
790 		new = (*pcb->sp_proto->newconn)(pcb->sp_upper, &laddr, &raddr);
791 		if (new == NULL)
792 			continue;
793 
794 		/*
795 		 * Ok, got new pcb so we can start a new link and fill
796 		 * in all the details.
797 		 */
798 		bdaddr_copy(&new->sp_laddr, &unit->hci_bdaddr);
799 		bdaddr_copy(&new->sp_raddr, bdaddr);
800 
801 		sco = hci_link_alloc(unit);
802 		if (sco == NULL) {
803 			sco_detach(&new);
804 			return NULL;
805 		}
806 
807 		sco->hl_type = HCI_LINK_SCO;
808 		bdaddr_copy(&sco->hl_bdaddr, bdaddr);
809 
810 		sco->hl_link = hci_acl_open(unit, bdaddr);
811 		KKASSERT(sco->hl_link == acl);
812 
813 		sco->hl_sco = new;
814 		new->sp_link = sco;
815 
816 		new->sp_mtu = unit->hci_max_sco_size;
817 		return sco;
818 	}
819 
820 	return NULL;
821 }
822 
823 /*
824  * receive SCO packet, we only need to strip the header and send
825  * it to the right handler
826  */
827 void
828 hci_sco_recv(struct mbuf *m, struct hci_unit *unit)
829 {
830 	struct hci_link *link;
831 	hci_scodata_hdr_t hdr;
832 	uint16_t handle;
833 
834 	KKASSERT(m != NULL);
835 	KKASSERT(unit != NULL);
836 
837 	KKASSERT(m->m_pkthdr.len >= sizeof(hdr));
838 	m_copydata(m, 0, sizeof(hdr), &hdr);
839 	m_adj(m, sizeof(hdr));
840 
841 #ifdef DIAGNOSTIC
842 	if (hdr.type != HCI_SCO_DATA_PKT) {
843 		kprintf("%s: bad SCO packet type\n",
844 		    device_get_nameunit(unit->hci_dev));
845 		goto bad;
846 	}
847 
848 	if (m->m_pkthdr.len != hdr.length) {
849 		kprintf("%s: bad SCO packet length (%d != %d)\n",
850 		    device_get_nameunit(unit->hci_dev), m->m_pkthdr.len,
851 		    hdr.length);
852 		goto bad;
853 	}
854 #endif
855 
856 	hdr.con_handle = letoh16(hdr.con_handle);
857 	handle = HCI_CON_HANDLE(hdr.con_handle);
858 
859 	link = hci_link_lookup_handle(unit, handle);
860 	if (link == NULL || link->hl_type == HCI_LINK_ACL) {
861 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
862 		    device_get_nameunit(unit->hci_dev), handle);
863 
864 		goto bad;
865 	}
866 
867 	(*link->hl_sco->sp_proto->input)(link->hl_sco->sp_upper, m);
868 	return;
869 
870 bad:
871 	m_freem(m);
872 }
873 
874 void
875 hci_sco_start(struct hci_link *link)
876 {
877 }
878 
879 /*
880  * SCO packets have completed at the controller, so we can
881  * signal up to free the buffer space.
882  */
883 void
884 hci_sco_complete(struct hci_link *link, int num)
885 {
886 
887 	DPRINTFN(5, "handle #%d (num=%d)\n", link->hl_handle, num);
888 	link->hl_sco->sp_pending--;
889 	(*link->hl_sco->sp_proto->complete)(link->hl_sco->sp_upper, num);
890 }
891 
892 /*******************************************************************************
893  *
894  *	Generic HCI Connection alloc/free/lookup etc
895  */
896 
897 struct hci_link *
898 hci_link_alloc(struct hci_unit *unit)
899 {
900 	struct hci_link *link;
901 
902 	KKASSERT(unit != NULL);
903 
904 	link = kmalloc(sizeof *link, M_BLUETOOTH, M_NOWAIT | M_ZERO);
905 	if (link == NULL)
906 		return NULL;
907 
908 	link->hl_unit = unit;
909 	link->hl_state = HCI_LINK_CLOSED;
910 
911 	/* init ACL portion */
912 	callout_init(&link->hl_expire);
913 
914 	crit_enter();
915 	TAILQ_INIT(&link->hl_txq);	/* outgoing packets */
916 	TAILQ_INIT(&link->hl_reqs);	/* request queue */
917 
918 	link->hl_mtu = L2CAP_MTU_DEFAULT;		/* L2CAP signal mtu */
919 	link->hl_flush = L2CAP_FLUSH_TIMO_DEFAULT;	/* flush timeout */
920 
921 	/* init SCO portion */
922 	/* &link->hl_data is already zero-initialized. */
923 
924 	/* attach to unit */
925 	TAILQ_INSERT_HEAD(&unit->hci_links, link, hl_next);
926 	crit_exit();
927 	return link;
928 }
929 
930 void
931 hci_link_free(struct hci_link *link, int err)
932 {
933 	struct l2cap_req *req;
934 	struct l2cap_pdu *pdu;
935 	struct l2cap_channel *chan, *next;
936 
937 	KKASSERT(link != NULL);
938 
939 	DPRINTF("(%s) #%d, type = %d, state = %d, refcnt = %d\n",
940 	    device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle,
941 	    link->hl_type, link->hl_state, link->hl_refcnt);
942 
943 	/* ACL reference count */
944 	if (link->hl_refcnt > 0) {
945 		next = LIST_FIRST(&l2cap_active_list);
946 		while ((chan = next) != NULL) {
947 			next = LIST_NEXT(chan, lc_ncid);
948 			if (chan->lc_link == link)
949 				l2cap_close(chan, err);
950 		}
951 	}
952 	KKASSERT(link->hl_refcnt == 0);
953 
954 	/* ACL L2CAP requests.. */
955 	while ((req = TAILQ_FIRST(&link->hl_reqs)) != NULL)
956 		l2cap_request_free(req);
957 
958 	KKASSERT(TAILQ_EMPTY(&link->hl_reqs));
959 
960 	/* ACL outgoing data queue */
961 	while ((pdu = TAILQ_FIRST(&link->hl_txq)) != NULL) {
962 		TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
963 		IF_DRAIN(&pdu->lp_data);
964 		if (pdu->lp_pending)
965 			link->hl_unit->hci_num_acl_pkts += pdu->lp_pending;
966 
967 		zfree(l2cap_pdu_pool, pdu);
968 	}
969 
970 	KKASSERT(TAILQ_EMPTY(&link->hl_txq));
971 
972 	/* ACL incoming data packet */
973 	if (link->hl_rxp != NULL) {
974 		m_freem(link->hl_rxp);
975 		link->hl_rxp = NULL;
976 	}
977 
978 	/* SCO master ACL link */
979 	if (link->hl_link != NULL) {
980 		hci_acl_close(link->hl_link, err);
981 		link->hl_link = NULL;
982 	}
983 
984 	/* SCO pcb */
985 	if (link->hl_sco != NULL) {
986 		struct sco_pcb *pcb;
987 
988 		pcb = link->hl_sco;
989 		pcb->sp_link = NULL;
990 		link->hl_sco = NULL;
991 		(*pcb->sp_proto->disconnected)(pcb->sp_upper, err);
992 	}
993 
994 	/* flush any SCO data */
995 	crit_enter();
996 	IF_DRAIN(&link->hl_data);
997 	crit_exit();
998 
999 	/*
1000 	 * Halt the timeout - if its already running we cannot free the
1001 	 * link structure but the timeout function will call us back in
1002 	 * any case.
1003 	 */
1004 	link->hl_state = HCI_LINK_CLOSED;
1005 	callout_stop(&link->hl_expire);
1006 	if (callout_active(&link->hl_expire))
1007 		return;
1008 
1009 	/*
1010 	 * If we made a note of clock offset, keep it in a memo
1011 	 * to facilitate reconnections to this device
1012 	 */
1013 	if (link->hl_clock != 0) {
1014 		struct hci_memo *memo;
1015 
1016 		memo = hci_memo_new(link->hl_unit, &link->hl_bdaddr);
1017 		if (memo != NULL)
1018 			memo->clock_offset = link->hl_clock;
1019 	}
1020 
1021 	crit_enter();
1022 	TAILQ_REMOVE(&link->hl_unit->hci_links, link, hl_next);
1023 	crit_exit();
1024 	kfree(link, M_BLUETOOTH);
1025 }
1026 
1027 /*
1028  * Lookup HCI link by type and state.
1029  */
1030 struct hci_link *
1031 hci_link_lookup_state(struct hci_unit *unit, uint16_t type, uint16_t state)
1032 {
1033 	struct hci_link *link;
1034 
1035 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1036 		if (link->hl_type == type && link->hl_state == state)
1037 			break;
1038 	}
1039 
1040 	return link;
1041 }
1042 
1043 /*
1044  * Lookup HCI link by address and type. Note that for SCO links there may
1045  * be more than one link per address, so we only return links with no
1046  * handle (ie new links)
1047  */
1048 struct hci_link *
1049 hci_link_lookup_bdaddr(struct hci_unit *unit, bdaddr_t *bdaddr, uint16_t type)
1050 {
1051 	struct hci_link *link;
1052 
1053 	KKASSERT(unit != NULL);
1054 	KKASSERT(bdaddr != NULL);
1055 
1056 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1057 		if (link->hl_type != type)
1058 			continue;
1059 
1060 		if (type == HCI_LINK_SCO && link->hl_handle != 0)
1061 			continue;
1062 
1063 		if (bdaddr_same(&link->hl_bdaddr, bdaddr))
1064 			break;
1065 	}
1066 
1067 	return link;
1068 }
1069 
1070 struct hci_link *
1071 hci_link_lookup_handle(struct hci_unit *unit, uint16_t handle)
1072 {
1073 	struct hci_link *link;
1074 
1075 	KKASSERT(unit != NULL);
1076 
1077 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1078 		if (handle == link->hl_handle)
1079 			break;
1080 	}
1081 
1082 	return link;
1083 }
1084