xref: /dragonfly/sys/netbt/hci_link.c (revision 82730a9c)
1 /* $DragonFly: src/sys/netbt/hci_link.c,v 1.2 2008/03/18 13:41:42 hasso Exp $ */
2 /* $OpenBSD: src/sys/netbt/hci_link.c,v 1.7 2008/02/24 21:34:48 uwe Exp $ */
3 /* $NetBSD: hci_link.c,v 1.16 2007/11/10 23:12:22 plunky Exp $ */
4 
5 /*-
6  * Copyright (c) 2005 Iain Hibbert.
7  * Copyright (c) 2006 Itronix Inc.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of Itronix Inc. may not be used to endorse
19  *    or promote products derived from this software without specific
20  *    prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
26  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
29  * ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/proc.h>
40 #include <sys/queue.h>
41 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/callout.h>
44 #include <net/if.h>
45 #include <sys/bus.h>
46 
47 #include <netbt/bluetooth.h>
48 #include <netbt/hci.h>
49 #include <netbt/l2cap.h>
50 #include <netbt/sco.h>
51 
52 /*******************************************************************************
53  *
54  *	HCI ACL Connections
55  */
56 
57 /*
58  * Automatically expire unused ACL connections after this number of
59  * seconds (if zero, do not expire unused connections) [sysctl]
60  */
61 int hci_acl_expiry = 10;	/* seconds */
62 
63 /*
64  * hci_acl_open(unit, bdaddr)
65  *
66  * open ACL connection to remote bdaddr. Only one ACL connection is permitted
67  * between any two Bluetooth devices, so we look for an existing one before
68  * trying to start a new one.
69  */
70 struct hci_link *
71 hci_acl_open(struct hci_unit *unit, bdaddr_t *bdaddr)
72 {
73 	struct hci_link *link;
74 	struct hci_memo *memo;
75 	hci_create_con_cp cp;
76 	int err;
77 
78 	KKASSERT(unit != NULL);
79 	KKASSERT(bdaddr != NULL);
80 
81 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
82 	if (link == NULL) {
83 		link = hci_link_alloc(unit);
84 		if (link == NULL)
85 			return NULL;
86 
87 		link->hl_type = HCI_LINK_ACL;
88 		bdaddr_copy(&link->hl_bdaddr, bdaddr);
89 	}
90 
91 	switch(link->hl_state) {
92 	case HCI_LINK_CLOSED:
93 		/*
94 		 * open connection to remote device
95 		 */
96 		memset(&cp, 0, sizeof(cp));
97 		bdaddr_copy(&cp.bdaddr, bdaddr);
98 		cp.pkt_type = htole16(unit->hci_packet_type);
99 
100 		memo = hci_memo_find(unit, bdaddr);
101 		if (memo != NULL) {
102 			cp.page_scan_rep_mode = memo->page_scan_rep_mode;
103 			cp.page_scan_mode = memo->page_scan_mode;
104 			cp.clock_offset = memo->clock_offset;
105 		}
106 
107 		if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
108 			cp.accept_role_switch = 1;
109 
110 		err = hci_send_cmd(unit, HCI_CMD_CREATE_CON, &cp, sizeof(cp));
111 		if (err) {
112 			hci_link_free(link, err);
113 			return NULL;
114 		}
115 
116 		link->hl_state = HCI_LINK_WAIT_CONNECT;
117 		break;
118 
119 	case HCI_LINK_WAIT_CONNECT:
120 	case HCI_LINK_WAIT_AUTH:
121 	case HCI_LINK_WAIT_ENCRYPT:
122 	case HCI_LINK_WAIT_SECURE:
123 		/*
124 		 * somebody else already trying to connect, we just
125 		 * sit on the bench with them..
126 		 */
127 		break;
128 
129 	case HCI_LINK_OPEN:
130 		/*
131 		 * If already open, halt any expiry callouts. We dont need
132 		 * to care about already invoking callouts since refcnt >0
133 		 * will keep the link alive.
134 		 */
135 		callout_stop(&link->hl_expire);
136 		break;
137 
138 	default:
139 		UNKNOWN(link->hl_state);
140 		return NULL;
141 	}
142 
143 	/* open */
144 	link->hl_refcnt++;
145 
146 	return link;
147 }
148 
149 /*
150  * Close ACL connection. When there are no more references to this link,
151  * we can either close it down or schedule a delayed closedown.
152  */
153 void
154 hci_acl_close(struct hci_link *link, int err)
155 {
156 	KKASSERT(link != NULL);
157 
158 	if (--link->hl_refcnt == 0) {
159 		if (link->hl_state == HCI_LINK_CLOSED)
160 			hci_link_free(link, err);
161 		else if (hci_acl_expiry > 0)
162 			callout_reset(&link->hl_expire, hci_acl_expiry * hz,
163 			    hci_acl_timeout, link);
164 	}
165 }
166 
167 /*
168  * Incoming ACL connection.
169  *
170  * For now, we accept all connections but it would be better to check
171  * the L2CAP listen list and only accept when there is a listener
172  * available.
173  *
174  * There should not be a link to the same bdaddr already, we check
175  * anyway though its left unhandled for now.
176  */
177 struct hci_link *
178 hci_acl_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
179 {
180 	struct hci_link *link;
181 
182 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
183 	if (link != NULL)
184 		return NULL;
185 
186 	link = hci_link_alloc(unit);
187 	if (link != NULL) {
188 		link->hl_state = HCI_LINK_WAIT_CONNECT;
189 		link->hl_type = HCI_LINK_ACL;
190 		bdaddr_copy(&link->hl_bdaddr, bdaddr);
191 
192 		if (hci_acl_expiry > 0)
193 			callout_reset(&link->hl_expire, hci_acl_expiry * hz,
194 			    hci_acl_timeout, link);
195 	}
196 
197 	return link;
198 }
199 
200 void
201 hci_acl_timeout(void *arg)
202 {
203 	struct hci_link *link = arg;
204 	hci_discon_cp cp;
205 	int err;
206 
207 	crit_enter();
208 
209 	if (link->hl_refcnt > 0)
210 		goto out;
211 
212 	DPRINTF("link #%d expired\n", link->hl_handle);
213 
214 	switch (link->hl_state) {
215 	case HCI_LINK_CLOSED:
216 	case HCI_LINK_WAIT_CONNECT:
217 		hci_link_free(link, ECONNRESET);
218 		break;
219 
220 	case HCI_LINK_WAIT_AUTH:
221 	case HCI_LINK_WAIT_ENCRYPT:
222 	case HCI_LINK_WAIT_SECURE:
223 	case HCI_LINK_OPEN:
224 		cp.con_handle = htole16(link->hl_handle);
225 		cp.reason = 0x13; /* "Remote User Terminated Connection" */
226 
227 		err = hci_send_cmd(link->hl_unit, HCI_CMD_DISCONNECT,
228 					&cp, sizeof(cp));
229 
230 		if (err) {
231 			DPRINTF("error %d sending HCI_CMD_DISCONNECT\n",
232 			    err);
233 		}
234 
235 		break;
236 
237 	default:
238 		UNKNOWN(link->hl_state);
239 		break;
240 	}
241 
242 out:
243 	crit_exit();
244 }
245 
246 /*
247  * Initiate any Link Mode change requests.
248  */
249 int
250 hci_acl_setmode(struct hci_link *link)
251 {
252 	int err;
253 
254 	KKASSERT(link != NULL);
255 	KKASSERT(link->hl_unit != NULL);
256 
257 	if (link->hl_state != HCI_LINK_OPEN)
258 		return EINPROGRESS;
259 
260 	if ((link->hl_flags & HCI_LINK_AUTH_REQ)
261 	    && !(link->hl_flags & HCI_LINK_AUTH)) {
262 		hci_auth_req_cp cp;
263 
264 		DPRINTF("(%s) requesting auth for handle #%d\n",
265 		    device_get_nameunit(link->hl_unit->hci_dev),
266 		    link->hl_handle);
267 
268 		link->hl_state = HCI_LINK_WAIT_AUTH;
269 		cp.con_handle = htole16(link->hl_handle);
270 		err = hci_send_cmd(link->hl_unit, HCI_CMD_AUTH_REQ,
271 				   &cp, sizeof(cp));
272 
273 		return (err == 0 ? EINPROGRESS : err);
274 	}
275 
276 	if ((link->hl_flags & HCI_LINK_ENCRYPT_REQ)
277 	    && !(link->hl_flags & HCI_LINK_ENCRYPT)) {
278 		hci_set_con_encryption_cp cp;
279 
280 		/* XXX we should check features for encryption capability */
281 
282 		DPRINTF("(%s) requesting encryption for handle #%d\n",
283 		    device_get_nameunit(link->hl_unit->hci_dev),
284 		    link->hl_handle);
285 
286 		link->hl_state = HCI_LINK_WAIT_ENCRYPT;
287 		cp.con_handle = htole16(link->hl_handle);
288 		cp.encryption_enable = 0x01;
289 
290 		err = hci_send_cmd(link->hl_unit, HCI_CMD_SET_CON_ENCRYPTION,
291 				   &cp, sizeof(cp));
292 
293 		return (err == 0 ? EINPROGRESS : err);
294 	}
295 
296 	if ((link->hl_flags & HCI_LINK_SECURE_REQ)) {
297 		hci_change_con_link_key_cp cp;
298 
299 		/* always change link key for SECURE requests */
300 		link->hl_flags &= ~HCI_LINK_SECURE;
301 
302 		DPRINTF("(%s) changing link key for handle #%d\n",
303 		    device_get_nameunit(link->hl_unit->hci_dev),
304 		    link->hl_handle);
305 
306 		link->hl_state = HCI_LINK_WAIT_SECURE;
307 		cp.con_handle = htole16(link->hl_handle);
308 
309 		err = hci_send_cmd(link->hl_unit, HCI_CMD_CHANGE_CON_LINK_KEY,
310 				   &cp, sizeof(cp));
311 
312 		return (err == 0 ? EINPROGRESS : err);
313 	}
314 
315 	return 0;
316 }
317 
318 /*
319  * Link Mode changed.
320  *
321  * This is called from event handlers when the mode change
322  * is complete. We notify upstream and restart the link.
323  */
324 void
325 hci_acl_linkmode(struct hci_link *link)
326 {
327 	struct l2cap_channel *chan, *next;
328 	int err, mode = 0;
329 
330 	DPRINTF("(%s) handle #%d, auth %s, encrypt %s, secure %s\n",
331 	    device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle,
332 	    (link->hl_flags & HCI_LINK_AUTH ? "on" : "off"),
333 	    (link->hl_flags & HCI_LINK_ENCRYPT ? "on" : "off"),
334 	    (link->hl_flags & HCI_LINK_SECURE ? "on" : "off"));
335 
336 	if (link->hl_flags & HCI_LINK_AUTH)
337 		mode |= L2CAP_LM_AUTH;
338 
339 	if (link->hl_flags & HCI_LINK_ENCRYPT)
340 		mode |= L2CAP_LM_ENCRYPT;
341 
342 	if (link->hl_flags & HCI_LINK_SECURE)
343 		mode |= L2CAP_LM_SECURE;
344 
345 	/*
346 	 * The link state will only be OPEN here if the mode change
347 	 * was successful. So, we can proceed with L2CAP connections,
348 	 * or notify already establshed channels, to allow any that
349 	 * are dissatisfied to disconnect before we restart.
350 	 */
351 	next = LIST_FIRST(&l2cap_active_list);
352 	while ((chan = next) != NULL) {
353 		next = LIST_NEXT(chan, lc_ncid);
354 
355 		if (chan->lc_link != link)
356 			continue;
357 
358 		switch(chan->lc_state) {
359 		case L2CAP_WAIT_SEND_CONNECT_REQ: /* we are connecting */
360 			if ((mode & chan->lc_mode) != chan->lc_mode) {
361 				l2cap_close(chan, ECONNABORTED);
362 				break;
363 			}
364 
365 			chan->lc_state = L2CAP_WAIT_RECV_CONNECT_RSP;
366 			err = l2cap_send_connect_req(chan);
367 			if (err) {
368 				l2cap_close(chan, err);
369 				break;
370 			}
371 			break;
372 
373 		case L2CAP_WAIT_SEND_CONNECT_RSP: /* they are connecting */
374 			if ((mode & chan->lc_mode) != chan->lc_mode) {
375 				l2cap_send_connect_rsp(link, chan->lc_ident,
376 							0, chan->lc_rcid,
377 							L2CAP_SECURITY_BLOCK);
378 
379 				l2cap_close(chan, ECONNABORTED);
380 				break;
381 			}
382 
383 			l2cap_send_connect_rsp(link, chan->lc_ident,
384 						chan->lc_lcid, chan->lc_rcid,
385 						L2CAP_SUCCESS);
386 
387 			chan->lc_state = L2CAP_WAIT_CONFIG;
388 			chan->lc_flags |= (L2CAP_WAIT_CONFIG_RSP | L2CAP_WAIT_CONFIG_REQ);
389 			err = l2cap_send_config_req(chan);
390 			if (err) {
391 				l2cap_close(chan, err);
392 				break;
393 			}
394 			break;
395 
396 		case L2CAP_WAIT_RECV_CONNECT_RSP:
397 		case L2CAP_WAIT_CONFIG:
398 		case L2CAP_OPEN: /* already established */
399 			(*chan->lc_proto->linkmode)(chan->lc_upper, mode);
400 			break;
401 
402 		default:
403 			break;
404 		}
405 	}
406 
407 	link->hl_state = HCI_LINK_OPEN;
408 	hci_acl_start(link);
409 }
410 
411 /*
412  * Receive ACL Data
413  *
414  * we accumulate packet fragments on the hci_link structure
415  * until a full L2CAP frame is ready, then send it on.
416  */
417 void
418 hci_acl_recv(struct mbuf *m, struct hci_unit *unit)
419 {
420 	struct hci_link *link;
421 	hci_acldata_hdr_t hdr;
422 	uint16_t handle, want;
423 	int pb, got;
424 
425 	KKASSERT(m != NULL);
426 	KKASSERT(unit != NULL);
427 
428 	KKASSERT(m->m_pkthdr.len >= sizeof(hdr));
429 	m_copydata(m, 0, sizeof(hdr), (caddr_t)&hdr);
430 	m_adj(m, sizeof(hdr));
431 
432 #ifdef DIAGNOSTIC
433 	if (hdr.type != HCI_ACL_DATA_PKT) {
434 		kprintf("%s: bad ACL packet type\n",
435 		    device_get_nameunit(unit->hci_dev));
436 		goto bad;
437 	}
438 
439 	if (m->m_pkthdr.len != letoh16(hdr.length)) {
440 		kprintf("%s: bad ACL packet length (%d != %d)\n",
441 		    device_get_nameunit(unit->hci_dev), m->m_pkthdr.len,
442 		    letoh16(hdr.length));
443 		goto bad;
444 	}
445 #endif
446 
447 	hdr.length = letoh16(hdr.length);
448 	hdr.con_handle = letoh16(hdr.con_handle);
449 	handle = HCI_CON_HANDLE(hdr.con_handle);
450 	pb = HCI_PB_FLAG(hdr.con_handle);
451 
452 	link = hci_link_lookup_handle(unit, handle);
453 	if (link == NULL) {
454 		hci_discon_cp cp;
455 
456 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
457 		    device_get_nameunit(unit->hci_dev), handle);
458 
459 		/*
460 		 * There is no way to find out what this connection handle is
461 		 * for, just get rid of it. This may happen, if a USB dongle
462 		 * is plugged into a self powered hub and does not reset when
463 		 * the system is shut down.
464 		 */
465 		cp.con_handle = htole16(handle);
466 		cp.reason = 0x13; /* "Remote User Terminated Connection" */
467 		hci_send_cmd(unit, HCI_CMD_DISCONNECT, &cp, sizeof(cp));
468 		goto bad;
469 	}
470 
471 	switch (pb) {
472 	case HCI_PACKET_START:
473 		if (link->hl_rxp != NULL)
474 			kprintf("%s: dropped incomplete ACL packet\n",
475 			    device_get_nameunit(unit->hci_dev));
476 
477 		if (m->m_pkthdr.len < sizeof(l2cap_hdr_t)) {
478 			kprintf("%s: short ACL packet\n",
479 			    device_get_nameunit(unit->hci_dev));
480 
481 			goto bad;
482 		}
483 
484 		link->hl_rxp = m;
485 		got = m->m_pkthdr.len;
486 		break;
487 
488 	case HCI_PACKET_FRAGMENT:
489 		if (link->hl_rxp == NULL) {
490 			kprintf("%s: unexpected packet fragment\n",
491 			    device_get_nameunit(unit->hci_dev));
492 
493 			goto bad;
494 		}
495 
496 		got = m->m_pkthdr.len + link->hl_rxp->m_pkthdr.len;
497 		m_cat(link->hl_rxp, m);
498 		m = link->hl_rxp;
499 		m->m_pkthdr.len = got;
500 		break;
501 
502 	default:
503 		kprintf("%s: unknown packet type\n",
504 		    device_get_nameunit(unit->hci_dev));
505 
506 		goto bad;
507 	}
508 
509 	m_copydata(m, 0, sizeof(want), (caddr_t)&want);
510 	want = letoh16(want) + sizeof(l2cap_hdr_t) - got;
511 
512 	if (want > 0)
513 		return;
514 
515 	link->hl_rxp = NULL;
516 
517 	if (want == 0) {
518 		l2cap_recv_frame(m, link);
519 		return;
520 	}
521 
522 bad:
523 	m_freem(m);
524 }
525 
526 /*
527  * Send ACL data on link
528  *
529  * We must fragment packets into chunks of less than unit->hci_max_acl_size and
530  * prepend a relevant ACL header to each fragment. We keep a PDU structure
531  * attached to the link, so that completed fragments can be marked off and
532  * more data requested from above once the PDU is sent.
533  */
534 int
535 hci_acl_send(struct mbuf *m, struct hci_link *link,
536 		struct l2cap_channel *chan)
537 {
538 	struct l2cap_pdu *pdu;
539 	struct mbuf *n = NULL;
540 	int plen, mlen, num = 0;
541 
542 	KKASSERT(link != NULL);
543 	KKASSERT(m != NULL);
544 	KKASSERT(m->m_flags & M_PKTHDR);
545 	KKASSERT(m->m_pkthdr.len > 0);
546 
547 	if (link->hl_state == HCI_LINK_CLOSED) {
548 		m_freem(m);
549 		return ENETDOWN;
550 	}
551 
552 	pdu = zalloc(l2cap_pdu_pool);
553 	if (pdu == NULL)
554 		goto nomem;
555 
556 	bzero(pdu, sizeof *pdu);
557 	pdu->lp_chan = chan;
558 	pdu->lp_pending = 0;
559 
560 	plen = m->m_pkthdr.len;
561 	mlen = link->hl_unit->hci_max_acl_size;
562 
563 	DPRINTFN(5, "%s: handle #%d, plen = %d, max = %d\n",
564 	    device_get_nameunit(link->hl_unit->hci_dev),
565 	    link->hl_handle, plen, mlen);
566 
567 	while (plen > 0) {
568 		if (plen > mlen) {
569 			n = m_split(m, mlen, MB_DONTWAIT);
570 			if (n == NULL)
571 				goto nomem;
572 		} else {
573 			mlen = plen;
574 		}
575 
576 		if (num++ == 0)
577 			m->m_flags |= M_PROTO1;	/* tag first fragment */
578 
579 		DPRINTFN(10, "(%s) chunk of %d (plen = %d) bytes\n",
580 		    device_get_nameunit(link->hl_unit->hci_dev), mlen, plen);
581 		IF_ENQUEUE(&pdu->lp_data, m);
582 		m = n;
583 		plen -= mlen;
584 	}
585 
586 	TAILQ_INSERT_TAIL(&link->hl_txq, pdu, lp_next);
587 	link->hl_txqlen += num;
588 
589 	hci_acl_start(link);
590 
591 	return 0;
592 
593 nomem:
594 	if (m) m_freem(m);
595 	if (pdu) {
596 		IF_DRAIN(&pdu->lp_data);
597 		zfree(l2cap_pdu_pool, pdu);
598 	}
599 
600 	return ENOMEM;
601 }
602 
603 /*
604  * Start sending ACL data on link.
605  *
606  *	This is called when the queue may need restarting: as new data
607  * is queued, after link mode changes have completed, or when device
608  * buffers have cleared.
609  *
610  *	We may use all the available packet slots. The reason that we add
611  * the ACL encapsulation here rather than in hci_acl_send() is that L2CAP
612  * signal packets may be queued before the handle is given to us..
613  */
614 void
615 hci_acl_start(struct hci_link *link)
616 {
617 	struct hci_unit *unit;
618 	hci_acldata_hdr_t *hdr;
619 	struct l2cap_pdu *pdu;
620 	struct mbuf *m;
621 	uint16_t handle;
622 
623 	KKASSERT(link != NULL);
624 
625 	unit = link->hl_unit;
626 	KKASSERT(unit != NULL);
627 
628 	/* this is mainly to block ourselves (below) */
629 	if (link->hl_state != HCI_LINK_OPEN)
630 		return;
631 
632 	if (link->hl_txqlen == 0 || unit->hci_num_acl_pkts == 0)
633 		return;
634 
635 	/* find first PDU with data to send */
636 	pdu = TAILQ_FIRST(&link->hl_txq);
637 	for (;;) {
638 		if (pdu == NULL)
639 			return;
640 
641 		if (!IF_QEMPTY(&pdu->lp_data))
642 			break;
643 
644 		pdu = TAILQ_NEXT(pdu, lp_next);
645 	}
646 
647 	while (unit->hci_num_acl_pkts > 0) {
648 		IF_DEQUEUE(&pdu->lp_data, m);
649 		KKASSERT(m != NULL);
650 
651 		if (m->m_flags & M_PROTO1)
652 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
653 						HCI_PACKET_START, 0);
654 		else
655 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
656 						HCI_PACKET_FRAGMENT, 0);
657 
658 		M_PREPEND(m, sizeof(*hdr), MB_DONTWAIT);
659 		if (m == NULL)
660 			break;
661 
662 		hdr = mtod(m, hci_acldata_hdr_t *);
663 		hdr->type = HCI_ACL_DATA_PKT;
664 		hdr->con_handle = htole16(handle);
665 		hdr->length = htole16(m->m_pkthdr.len - sizeof(*hdr));
666 
667 		link->hl_txqlen--;
668 		pdu->lp_pending++;
669 
670 		hci_output_acl(unit, m);
671 
672 		if (IF_QEMPTY(&pdu->lp_data)) {
673 			if (pdu->lp_chan) {
674 				/*
675 				 * This should enable streaming of PDUs - when
676 				 * we have placed all the fragments on the acl
677 				 * output queue, we trigger the L2CAP layer to
678 				 * send us down one more. Use a false state so
679 				 * we dont run into ourselves coming back from
680 				 * the future..
681 				 */
682 				link->hl_state = HCI_LINK_BLOCK;
683 				l2cap_start(pdu->lp_chan);
684 				link->hl_state = HCI_LINK_OPEN;
685 			}
686 
687 			pdu = TAILQ_NEXT(pdu, lp_next);
688 			if (pdu == NULL)
689 				break;
690 		}
691 	}
692 
693 	/*
694 	 * We had our turn now, move to the back of the queue to let
695 	 * other links have a go at the output buffers..
696 	 */
697 	if (TAILQ_NEXT(link, hl_next)) {
698 		TAILQ_REMOVE(&unit->hci_links, link, hl_next);
699 		TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
700 	}
701 }
702 
703 /*
704  * Confirm ACL packets cleared from Controller buffers. We scan our PDU
705  * list to clear pending fragments and signal upstream for more data
706  * when a PDU is complete.
707  */
708 void
709 hci_acl_complete(struct hci_link *link, int num)
710 {
711 	struct l2cap_pdu *pdu;
712 	struct l2cap_channel *chan;
713 
714 	DPRINTFN(5, "(%s) handle #%d (%d)\n",
715 	    device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle, num);
716 
717 	while (num > 0) {
718 		pdu = TAILQ_FIRST(&link->hl_txq);
719 		if (pdu == NULL) {
720 			kprintf("%s: %d packets completed on handle #%x "
721 				"but none pending!\n",
722 				device_get_nameunit(link->hl_unit->hci_dev),
723 				num, link->hl_handle);
724 			return;
725 		}
726 
727 		if (num >= pdu->lp_pending) {
728 			num -= pdu->lp_pending;
729 			pdu->lp_pending = 0;
730 
731 			if (IF_QEMPTY(&pdu->lp_data)) {
732 				TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
733 				chan = pdu->lp_chan;
734 				if (chan != NULL) {
735 					chan->lc_pending--;
736 					(*chan->lc_proto->complete)
737 							(chan->lc_upper, 1);
738 
739 					if (chan->lc_pending == 0)
740 						l2cap_start(chan);
741 				}
742 
743 				zfree(l2cap_pdu_pool, pdu);
744 			}
745 		} else {
746 			pdu->lp_pending -= num;
747 			num = 0;
748 		}
749 	}
750 }
751 
752 /*******************************************************************************
753  *
754  *	HCI SCO Connections
755  */
756 
757 /*
758  * Incoming SCO Connection. We check the list for anybody willing
759  * to take it.
760  */
761 struct hci_link *
762 hci_sco_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
763 {
764 	struct sockaddr_bt laddr, raddr;
765 	struct sco_pcb *pcb, *new;
766 	struct hci_link *sco, *acl;
767 
768 	memset(&laddr, 0, sizeof(laddr));
769 	laddr.bt_len = sizeof(laddr);
770 	laddr.bt_family = AF_BLUETOOTH;
771 	bdaddr_copy(&laddr.bt_bdaddr, &unit->hci_bdaddr);
772 
773 	memset(&raddr, 0, sizeof(raddr));
774 	raddr.bt_len = sizeof(raddr);
775 	raddr.bt_family = AF_BLUETOOTH;
776 	bdaddr_copy(&raddr.bt_bdaddr, bdaddr);
777 
778 	/*
779 	 * There should already be an ACL link up and running before
780 	 * the controller sends us SCO connection requests, but you
781 	 * never know..
782 	 */
783 	acl = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
784 	if (acl == NULL || acl->hl_state != HCI_LINK_OPEN)
785 		return NULL;
786 
787 	LIST_FOREACH(pcb, &sco_pcb, sp_next) {
788 		if ((pcb->sp_flags & SP_LISTENING) == 0)
789 			continue;
790 
791 		new = (*pcb->sp_proto->newconn)(pcb->sp_upper, &laddr, &raddr);
792 		if (new == NULL)
793 			continue;
794 
795 		/*
796 		 * Ok, got new pcb so we can start a new link and fill
797 		 * in all the details.
798 		 */
799 		bdaddr_copy(&new->sp_laddr, &unit->hci_bdaddr);
800 		bdaddr_copy(&new->sp_raddr, bdaddr);
801 
802 		sco = hci_link_alloc(unit);
803 		if (sco == NULL) {
804 			sco_detach(&new);
805 			return NULL;
806 		}
807 
808 		sco->hl_type = HCI_LINK_SCO;
809 		bdaddr_copy(&sco->hl_bdaddr, bdaddr);
810 
811 		sco->hl_link = hci_acl_open(unit, bdaddr);
812 		KKASSERT(sco->hl_link == acl);
813 
814 		sco->hl_sco = new;
815 		new->sp_link = sco;
816 
817 		new->sp_mtu = unit->hci_max_sco_size;
818 		return sco;
819 	}
820 
821 	return NULL;
822 }
823 
824 /*
825  * receive SCO packet, we only need to strip the header and send
826  * it to the right handler
827  */
828 void
829 hci_sco_recv(struct mbuf *m, struct hci_unit *unit)
830 {
831 	struct hci_link *link;
832 	hci_scodata_hdr_t hdr;
833 	uint16_t handle;
834 
835 	KKASSERT(m != NULL);
836 	KKASSERT(unit != NULL);
837 
838 	KKASSERT(m->m_pkthdr.len >= sizeof(hdr));
839 	m_copydata(m, 0, sizeof(hdr), (caddr_t)&hdr);
840 	m_adj(m, sizeof(hdr));
841 
842 #ifdef DIAGNOSTIC
843 	if (hdr.type != HCI_SCO_DATA_PKT) {
844 		kprintf("%s: bad SCO packet type\n",
845 		    device_get_nameunit(unit->hci_dev));
846 		goto bad;
847 	}
848 
849 	if (m->m_pkthdr.len != hdr.length) {
850 		kprintf("%s: bad SCO packet length (%d != %d)\n",
851 		    device_get_nameunit(unit->hci_dev), m->m_pkthdr.len,
852 		    hdr.length);
853 		goto bad;
854 	}
855 #endif
856 
857 	hdr.con_handle = letoh16(hdr.con_handle);
858 	handle = HCI_CON_HANDLE(hdr.con_handle);
859 
860 	link = hci_link_lookup_handle(unit, handle);
861 	if (link == NULL || link->hl_type == HCI_LINK_ACL) {
862 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
863 		    device_get_nameunit(unit->hci_dev), handle);
864 
865 		goto bad;
866 	}
867 
868 	(*link->hl_sco->sp_proto->input)(link->hl_sco->sp_upper, m);
869 	return;
870 
871 bad:
872 	m_freem(m);
873 }
874 
875 void
876 hci_sco_start(struct hci_link *link)
877 {
878 }
879 
880 /*
881  * SCO packets have completed at the controller, so we can
882  * signal up to free the buffer space.
883  */
884 void
885 hci_sco_complete(struct hci_link *link, int num)
886 {
887 
888 	DPRINTFN(5, "handle #%d (num=%d)\n", link->hl_handle, num);
889 	link->hl_sco->sp_pending--;
890 	(*link->hl_sco->sp_proto->complete)(link->hl_sco->sp_upper, num);
891 }
892 
893 /*******************************************************************************
894  *
895  *	Generic HCI Connection alloc/free/lookup etc
896  */
897 
898 struct hci_link *
899 hci_link_alloc(struct hci_unit *unit)
900 {
901 	struct hci_link *link;
902 
903 	KKASSERT(unit != NULL);
904 
905 	link = kmalloc(sizeof *link, M_BLUETOOTH, M_NOWAIT | M_ZERO);
906 	if (link == NULL)
907 		return NULL;
908 
909 	link->hl_unit = unit;
910 	link->hl_state = HCI_LINK_CLOSED;
911 
912 	/* init ACL portion */
913 	callout_init(&link->hl_expire);
914 
915 	crit_enter();
916 	TAILQ_INIT(&link->hl_txq);	/* outgoing packets */
917 	TAILQ_INIT(&link->hl_reqs);	/* request queue */
918 
919 	link->hl_mtu = L2CAP_MTU_DEFAULT;		/* L2CAP signal mtu */
920 	link->hl_flush = L2CAP_FLUSH_TIMO_DEFAULT;	/* flush timeout */
921 
922 	/* init SCO portion */
923 	/* &link->hl_data is already zero-initialized. */
924 
925 	/* attach to unit */
926 	TAILQ_INSERT_HEAD(&unit->hci_links, link, hl_next);
927 	crit_exit();
928 	return link;
929 }
930 
931 void
932 hci_link_free(struct hci_link *link, int err)
933 {
934 	struct l2cap_req *req;
935 	struct l2cap_pdu *pdu;
936 	struct l2cap_channel *chan, *next;
937 
938 	KKASSERT(link != NULL);
939 
940 	DPRINTF("(%s) #%d, type = %d, state = %d, refcnt = %d\n",
941 	    device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle,
942 	    link->hl_type, link->hl_state, link->hl_refcnt);
943 
944 	/* ACL reference count */
945 	if (link->hl_refcnt > 0) {
946 		next = LIST_FIRST(&l2cap_active_list);
947 		while ((chan = next) != NULL) {
948 			next = LIST_NEXT(chan, lc_ncid);
949 			if (chan->lc_link == link)
950 				l2cap_close(chan, err);
951 		}
952 	}
953 	KKASSERT(link->hl_refcnt == 0);
954 
955 	/* ACL L2CAP requests.. */
956 	while ((req = TAILQ_FIRST(&link->hl_reqs)) != NULL)
957 		l2cap_request_free(req);
958 
959 	KKASSERT(TAILQ_EMPTY(&link->hl_reqs));
960 
961 	/* ACL outgoing data queue */
962 	while ((pdu = TAILQ_FIRST(&link->hl_txq)) != NULL) {
963 		TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
964 		IF_DRAIN(&pdu->lp_data);
965 		if (pdu->lp_pending)
966 			link->hl_unit->hci_num_acl_pkts += pdu->lp_pending;
967 
968 		zfree(l2cap_pdu_pool, pdu);
969 	}
970 
971 	KKASSERT(TAILQ_EMPTY(&link->hl_txq));
972 
973 	/* ACL incoming data packet */
974 	if (link->hl_rxp != NULL) {
975 		m_freem(link->hl_rxp);
976 		link->hl_rxp = NULL;
977 	}
978 
979 	/* SCO master ACL link */
980 	if (link->hl_link != NULL) {
981 		hci_acl_close(link->hl_link, err);
982 		link->hl_link = NULL;
983 	}
984 
985 	/* SCO pcb */
986 	if (link->hl_sco != NULL) {
987 		struct sco_pcb *pcb;
988 
989 		pcb = link->hl_sco;
990 		pcb->sp_link = NULL;
991 		link->hl_sco = NULL;
992 		(*pcb->sp_proto->disconnected)(pcb->sp_upper, err);
993 	}
994 
995 	/* flush any SCO data */
996 	crit_enter();
997 	IF_DRAIN(&link->hl_data);
998 	crit_exit();
999 
1000 	/*
1001 	 * Halt the timeout - if its already running we cannot free the
1002 	 * link structure but the timeout function will call us back in
1003 	 * any case.
1004 	 */
1005 	link->hl_state = HCI_LINK_CLOSED;
1006 	callout_stop(&link->hl_expire);
1007 	if (callout_active(&link->hl_expire))
1008 		return;
1009 
1010 	/*
1011 	 * If we made a note of clock offset, keep it in a memo
1012 	 * to facilitate reconnections to this device
1013 	 */
1014 	if (link->hl_clock != 0) {
1015 		struct hci_memo *memo;
1016 
1017 		memo = hci_memo_new(link->hl_unit, &link->hl_bdaddr);
1018 		if (memo != NULL)
1019 			memo->clock_offset = link->hl_clock;
1020 	}
1021 
1022 	crit_enter();
1023 	TAILQ_REMOVE(&link->hl_unit->hci_links, link, hl_next);
1024 	crit_exit();
1025 	kfree(link, M_BLUETOOTH);
1026 }
1027 
1028 /*
1029  * Lookup HCI link by type and state.
1030  */
1031 struct hci_link *
1032 hci_link_lookup_state(struct hci_unit *unit, uint16_t type, uint16_t state)
1033 {
1034 	struct hci_link *link;
1035 
1036 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1037 		if (link->hl_type == type && link->hl_state == state)
1038 			break;
1039 	}
1040 
1041 	return link;
1042 }
1043 
1044 /*
1045  * Lookup HCI link by address and type. Note that for SCO links there may
1046  * be more than one link per address, so we only return links with no
1047  * handle (ie new links)
1048  */
1049 struct hci_link *
1050 hci_link_lookup_bdaddr(struct hci_unit *unit, bdaddr_t *bdaddr, uint16_t type)
1051 {
1052 	struct hci_link *link;
1053 
1054 	KKASSERT(unit != NULL);
1055 	KKASSERT(bdaddr != NULL);
1056 
1057 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1058 		if (link->hl_type != type)
1059 			continue;
1060 
1061 		if (type == HCI_LINK_SCO && link->hl_handle != 0)
1062 			continue;
1063 
1064 		if (bdaddr_same(&link->hl_bdaddr, bdaddr))
1065 			break;
1066 	}
1067 
1068 	return link;
1069 }
1070 
1071 struct hci_link *
1072 hci_link_lookup_handle(struct hci_unit *unit, uint16_t handle)
1073 {
1074 	struct hci_link *link;
1075 
1076 	KKASSERT(unit != NULL);
1077 
1078 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1079 		if (handle == link->hl_handle)
1080 			break;
1081 	}
1082 
1083 	return link;
1084 }
1085