xref: /netbsd/sys/netbt/hci_link.c (revision 2bb4f2c3)
1 /*	$NetBSD: hci_link.c,v 1.26 2021/12/04 13:23:04 andvar Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005 Iain Hibbert.
5  * Copyright (c) 2006 Itronix Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of Itronix Inc. may not be used to endorse
17  *    or promote products derived from this software without specific
18  *    prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
24  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  * ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: hci_link.c,v 1.26 2021/12/04 13:23:04 andvar Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/systm.h>
43 
44 #include <netbt/bluetooth.h>
45 #include <netbt/hci.h>
46 #include <netbt/l2cap.h>
47 #include <netbt/sco.h>
48 
49 /*******************************************************************************
50  *
51  *	HCI ACL Connections
52  */
53 
54 /*
55  * Automatically expire unused ACL connections after this number of
56  * seconds (if zero, do not expire unused connections) [sysctl]
57  */
58 int hci_acl_expiry = 10;	/* seconds */
59 
60 /*
61  * hci_acl_open(unit, bdaddr)
62  *
63  * open ACL connection to remote bdaddr. Only one ACL connection is permitted
64  * between any two Bluetooth devices, so we look for an existing one before
65  * trying to start a new one.
66  */
67 struct hci_link *
hci_acl_open(struct hci_unit * unit,bdaddr_t * bdaddr)68 hci_acl_open(struct hci_unit *unit, bdaddr_t *bdaddr)
69 {
70 	struct hci_link *link;
71 	struct hci_memo *memo;
72 	hci_create_con_cp cp;
73 	int err;
74 
75 	KASSERT(unit != NULL);
76 	KASSERT(bdaddr != NULL);
77 
78 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
79 	if (link == NULL) {
80 		link = hci_link_alloc(unit, bdaddr, HCI_LINK_ACL);
81 		if (link == NULL)
82 			return NULL;
83 	}
84 
85 	switch(link->hl_state) {
86 	case HCI_LINK_CLOSED:
87 		/*
88 		 * open connection to remote device
89 		 */
90 		memset(&cp, 0, sizeof(cp));
91 		bdaddr_copy(&cp.bdaddr, bdaddr);
92 		cp.pkt_type = htole16(unit->hci_packet_type);
93 
94 		memo = hci_memo_find(unit, bdaddr);
95 		if (memo != NULL) {
96 			cp.page_scan_rep_mode = memo->page_scan_rep_mode;
97 			cp.page_scan_mode = memo->page_scan_mode;
98 			cp.clock_offset = memo->clock_offset;
99 		}
100 
101 		if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
102 			cp.accept_role_switch = 1;
103 
104 		err = hci_send_cmd(unit, HCI_CMD_CREATE_CON, &cp, sizeof(cp));
105 		if (err) {
106 			hci_link_free(link, err);
107 			return NULL;
108 		}
109 
110 		link->hl_flags |= HCI_LINK_CREATE_CON;
111 		link->hl_state = HCI_LINK_WAIT_CONNECT;
112 		break;
113 
114 	case HCI_LINK_WAIT_CONNECT:
115 	case HCI_LINK_WAIT_AUTH:
116 	case HCI_LINK_WAIT_ENCRYPT:
117 	case HCI_LINK_WAIT_SECURE:
118 		/*
119 		 * somebody else already trying to connect, we just
120 		 * sit on the bench with them..
121 		 */
122 		break;
123 
124 	case HCI_LINK_OPEN:
125 		/*
126 		 * If already open, halt any expiry timeouts. We don't need
127 		 * to care about already invoking timeouts since refcnt >0
128 		 * will keep the link alive.
129 		 */
130 		callout_stop(&link->hl_expire);
131 		break;
132 
133 	default:
134 		UNKNOWN(link->hl_state);
135 		return NULL;
136 	}
137 
138 	/* open */
139 	link->hl_refcnt++;
140 
141 	return link;
142 }
143 
144 /*
145  * Close ACL connection. When there are no more references to this link,
146  * we can either close it down or schedule a delayed closedown.
147  */
148 void
hci_acl_close(struct hci_link * link,int err)149 hci_acl_close(struct hci_link *link, int err)
150 {
151 
152 	KASSERT(link != NULL);
153 
154 	if (--link->hl_refcnt == 0) {
155 		if (link->hl_state == HCI_LINK_CLOSED)
156 			hci_link_free(link, err);
157 		else if (hci_acl_expiry > 0)
158 			callout_schedule(&link->hl_expire, hci_acl_expiry * hz);
159 	}
160 }
161 
162 /*
163  * Incoming ACL connection.
164  *
165  * Check the L2CAP listeners list and only accept when there is a
166  * potential listener available.
167  *
168  * There should not be a link to the same bdaddr already, we check
169  * anyway though its left unhandled for now.
170  */
171 struct hci_link *
hci_acl_newconn(struct hci_unit * unit,bdaddr_t * bdaddr)172 hci_acl_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
173 {
174 	struct hci_link *link;
175 	struct l2cap_channel *chan;
176 
177 	LIST_FOREACH(chan, &l2cap_listen_list, lc_ncid) {
178 		if (bdaddr_same(&unit->hci_bdaddr, &chan->lc_laddr.bt_bdaddr)
179 		    || bdaddr_any(&chan->lc_laddr.bt_bdaddr))
180 			break;
181 	}
182 
183 	if (chan == NULL) {
184 		DPRINTF("%s: rejecting connection (no listeners)\n",
185 		    device_xname(unit->hci_dev));
186 
187 		return NULL;
188 	}
189 
190 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
191 	if (link != NULL) {
192 		DPRINTF("%s: rejecting connection (link exists)\n",
193 		    device_xname(unit->hci_dev));
194 
195 		return NULL;
196 	}
197 
198 	link = hci_link_alloc(unit, bdaddr, HCI_LINK_ACL);
199 	if (link != NULL) {
200 		link->hl_state = HCI_LINK_WAIT_CONNECT;
201 
202 		if (hci_acl_expiry > 0)
203 			callout_schedule(&link->hl_expire, hci_acl_expiry * hz);
204 	}
205 
206 	return link;
207 }
208 
209 void
hci_acl_timeout(void * arg)210 hci_acl_timeout(void *arg)
211 {
212 	struct hci_link *link = arg;
213 	hci_discon_cp cp;
214 	int err;
215 
216 	mutex_enter(bt_lock);
217 	callout_ack(&link->hl_expire);
218 
219 	if (link->hl_refcnt > 0)
220 		goto out;
221 
222 	DPRINTF("link #%d expired\n", link->hl_handle);
223 
224 	switch (link->hl_state) {
225 	case HCI_LINK_CLOSED:
226 	case HCI_LINK_WAIT_CONNECT:
227 		hci_link_free(link, ECONNRESET);
228 		break;
229 
230 	case HCI_LINK_WAIT_AUTH:
231 	case HCI_LINK_WAIT_ENCRYPT:
232 	case HCI_LINK_WAIT_SECURE:
233 	case HCI_LINK_OPEN:
234 		cp.con_handle = htole16(link->hl_handle);
235 		cp.reason = 0x13; /* "Remote User Terminated Connection" */
236 
237 		err = hci_send_cmd(link->hl_unit, HCI_CMD_DISCONNECT,
238 					&cp, sizeof(cp));
239 
240 		if (err) {
241 			DPRINTF("error %d sending HCI_CMD_DISCONNECT\n",
242 			    err);
243 		}
244 
245 		break;
246 
247 	default:
248 		UNKNOWN(link->hl_state);
249 		break;
250 	}
251 
252 out:
253 	mutex_exit(bt_lock);
254 }
255 
256 /*
257  * Initiate any Link Mode change requests.
258  */
259 int
hci_acl_setmode(struct hci_link * link)260 hci_acl_setmode(struct hci_link *link)
261 {
262 	int err;
263 
264 	KASSERT(link != NULL);
265 	KASSERT(link->hl_unit != NULL);
266 
267 	if (link->hl_state != HCI_LINK_OPEN)
268 		return EINPROGRESS;
269 
270 	if ((link->hl_flags & HCI_LINK_AUTH_REQ)
271 	    && !(link->hl_flags & HCI_LINK_AUTH)) {
272 		hci_auth_req_cp cp;
273 
274 		DPRINTF("requesting auth for handle #%d\n",
275 			link->hl_handle);
276 
277 		link->hl_state = HCI_LINK_WAIT_AUTH;
278 		cp.con_handle = htole16(link->hl_handle);
279 		err = hci_send_cmd(link->hl_unit, HCI_CMD_AUTH_REQ,
280 				   &cp, sizeof(cp));
281 
282 		return (err == 0 ? EINPROGRESS : err);
283 	}
284 
285 	if ((link->hl_flags & HCI_LINK_ENCRYPT_REQ)
286 	    && !(link->hl_flags & HCI_LINK_ENCRYPT)) {
287 		hci_set_con_encryption_cp cp;
288 
289 		/* XXX we should check features for encryption capability */
290 
291 		DPRINTF("requesting encryption for handle #%d\n",
292 			link->hl_handle);
293 
294 		link->hl_state = HCI_LINK_WAIT_ENCRYPT;
295 		cp.con_handle = htole16(link->hl_handle);
296 		cp.encryption_enable = 0x01;
297 
298 		err = hci_send_cmd(link->hl_unit, HCI_CMD_SET_CON_ENCRYPTION,
299 				   &cp, sizeof(cp));
300 
301 		return (err == 0 ? EINPROGRESS : err);
302 	}
303 
304 	if ((link->hl_flags & HCI_LINK_SECURE_REQ)) {
305 		hci_change_con_link_key_cp cp;
306 
307 		/* always change link key for SECURE requests */
308 		link->hl_flags &= ~HCI_LINK_SECURE;
309 
310 		DPRINTF("changing link key for handle #%d\n",
311 			link->hl_handle);
312 
313 		link->hl_state = HCI_LINK_WAIT_SECURE;
314 		cp.con_handle = htole16(link->hl_handle);
315 
316 		err = hci_send_cmd(link->hl_unit, HCI_CMD_CHANGE_CON_LINK_KEY,
317 				   &cp, sizeof(cp));
318 
319 		return (err == 0 ? EINPROGRESS : err);
320 	}
321 
322 	return 0;
323 }
324 
325 /*
326  * Link Mode changed.
327  *
328  * This is called from event handlers when the mode change
329  * is complete. We notify upstream and restart the link.
330  */
331 void
hci_acl_linkmode(struct hci_link * link)332 hci_acl_linkmode(struct hci_link *link)
333 {
334 	struct l2cap_channel *chan, *next;
335 	int err, mode = 0;
336 
337 	DPRINTF("handle #%d, auth %s, encrypt %s, secure %s\n",
338 		link->hl_handle,
339 		(link->hl_flags & HCI_LINK_AUTH ? "on" : "off"),
340 		(link->hl_flags & HCI_LINK_ENCRYPT ? "on" : "off"),
341 		(link->hl_flags & HCI_LINK_SECURE ? "on" : "off"));
342 
343 	if (link->hl_flags & HCI_LINK_AUTH)
344 		mode |= L2CAP_LM_AUTH;
345 
346 	if (link->hl_flags & HCI_LINK_ENCRYPT)
347 		mode |= L2CAP_LM_ENCRYPT;
348 
349 	if (link->hl_flags & HCI_LINK_SECURE)
350 		mode |= L2CAP_LM_SECURE;
351 
352 	/*
353 	 * The link state will only be OPEN here if the mode change
354 	 * was successful. So, we can proceed with L2CAP connections,
355 	 * or notify already established channels, to allow any that
356 	 * are dissatisfied to disconnect before we restart.
357 	 */
358 	next = LIST_FIRST(&l2cap_active_list);
359 	while ((chan = next) != NULL) {
360 		next = LIST_NEXT(chan, lc_ncid);
361 
362 		if (chan->lc_link != link)
363 			continue;
364 
365 		switch(chan->lc_state) {
366 		case L2CAP_WAIT_SEND_CONNECT_REQ: /* we are connecting */
367 			if ((mode & chan->lc_mode) != chan->lc_mode) {
368 				l2cap_close(chan, ECONNABORTED);
369 				break;
370 			}
371 
372 			chan->lc_state = L2CAP_WAIT_RECV_CONNECT_RSP;
373 			err = l2cap_send_connect_req(chan);
374 			if (err) {
375 				l2cap_close(chan, err);
376 				break;
377 			}
378 			break;
379 
380 		case L2CAP_WAIT_SEND_CONNECT_RSP: /* they are connecting */
381 			if ((mode & chan->lc_mode) != chan->lc_mode) {
382 				l2cap_send_connect_rsp(link, chan->lc_ident,
383 							0, chan->lc_rcid,
384 							L2CAP_SECURITY_BLOCK);
385 
386 				l2cap_close(chan, ECONNABORTED);
387 				break;
388 			}
389 
390 			l2cap_send_connect_rsp(link, chan->lc_ident,
391 						chan->lc_lcid, chan->lc_rcid,
392 						L2CAP_SUCCESS);
393 
394 			chan->lc_state = L2CAP_WAIT_CONFIG;
395 			chan->lc_flags |= (L2CAP_WAIT_CONFIG_RSP | L2CAP_WAIT_CONFIG_REQ);
396 			err = l2cap_send_config_req(chan);
397 			if (err) {
398 				l2cap_close(chan, err);
399 				break;
400 			}
401 			break;
402 
403 		case L2CAP_WAIT_RECV_CONNECT_RSP:
404 		case L2CAP_WAIT_CONFIG:
405 		case L2CAP_OPEN: /* already established */
406 			(*chan->lc_proto->linkmode)(chan->lc_upper, mode);
407 			break;
408 
409 		default:
410 			break;
411 		}
412 	}
413 
414 	link->hl_state = HCI_LINK_OPEN;
415 	hci_acl_start(link);
416 }
417 
418 /*
419  * Receive ACL Data
420  *
421  * we accumulate packet fragments on the hci_link structure
422  * until a full L2CAP frame is ready, then send it on.
423  */
424 void
hci_acl_recv(struct mbuf * m,struct hci_unit * unit)425 hci_acl_recv(struct mbuf *m, struct hci_unit *unit)
426 {
427 	struct hci_link *link;
428 	hci_acldata_hdr_t hdr;
429 	uint16_t handle, want;
430 	int pb, got;
431 
432 	KASSERT(m != NULL);
433 	KASSERT(unit != NULL);
434 
435 	if (m->m_pkthdr.len < sizeof(hdr))
436 		goto bad;
437 
438 	m_copydata(m, 0, sizeof(hdr), &hdr);
439 	m_adj(m, sizeof(hdr));
440 
441 	KASSERT(hdr.type == HCI_ACL_DATA_PKT);
442 
443 	hdr.length = le16toh(hdr.length);
444 	hdr.con_handle = le16toh(hdr.con_handle);
445 	handle = HCI_CON_HANDLE(hdr.con_handle);
446 	pb = HCI_PB_FLAG(hdr.con_handle);
447 
448 	if (m->m_pkthdr.len != hdr.length)
449 		goto bad;
450 
451 	link = hci_link_lookup_handle(unit, handle);
452 	if (link == NULL) {
453 		hci_discon_cp cp;
454 
455 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
456 			device_xname(unit->hci_dev), handle);
457 
458 		/*
459 		 * There is no way to find out what this connection handle is
460 		 * for, just get rid of it. This may happen, if a USB dongle
461 		 * is plugged into a self powered hub and does not reset when
462 		 * the system is shut down.
463 		 *
464 		 * This can cause a problem with some Broadcom controllers
465 		 * which emit empty ACL packets during connection setup, so
466 		 * only disconnect where data is present.
467 		 */
468 		if (hdr.length > 0) {
469 			cp.con_handle = htole16(handle);
470 			cp.reason = 0x13;/*"Remote User Terminated Connection"*/
471 			hci_send_cmd(unit, HCI_CMD_DISCONNECT, &cp, sizeof(cp));
472 		}
473 		goto bad;
474 	}
475 
476 	switch (pb) {
477 	case HCI_PACKET_START:
478 		if (m->m_pkthdr.len < sizeof(l2cap_hdr_t))
479 			goto bad;
480 
481 		if (link->hl_rxp != NULL) {
482 			aprint_error_dev(unit->hci_dev,
483 			    "dropped incomplete ACL packet\n");
484 
485 			m_freem(link->hl_rxp);
486 		}
487 
488 		link->hl_rxp = m;
489 		got = m->m_pkthdr.len;
490 		break;
491 
492 	case HCI_PACKET_FRAGMENT:
493 		if (link->hl_rxp == NULL) {
494 			aprint_error_dev(unit->hci_dev,
495 			    "unexpected packet fragment\n");
496 
497 			goto bad;
498 		}
499 
500 		got = m->m_pkthdr.len + link->hl_rxp->m_pkthdr.len;
501 		m_cat(link->hl_rxp, m);
502 		m = link->hl_rxp;
503 		m->m_pkthdr.len = got;
504 		break;
505 
506 	default:
507 		DPRINTF("%s: unknown packet type\n",
508 		    device_xname(unit->hci_dev));
509 
510 		goto bad;
511 	}
512 
513 	m_copydata(m, 0, sizeof(want), &want);
514 	want = le16toh(want);
515 	got -= sizeof(l2cap_hdr_t);
516 
517 	if (got < want)		/* wait for more */
518 		return;
519 
520 	link->hl_rxp = NULL;
521 
522 	if (got > want) {
523 		DPRINTF("%s: packet overflow\n",
524 			device_xname(unit->hci_dev));
525 
526 		goto bad;
527 	}
528 
529 	l2cap_recv_frame(m, link);
530 	return;
531 
532 bad:
533 	m_freem(m);
534 }
535 
536 /*
537  * Send ACL data on link
538  *
539  * We must fragment packets into chunks of less than unit->hci_max_acl_size and
540  * prepend a relevant ACL header to each fragment. We keep a PDU structure
541  * attached to the link, so that completed fragments can be marked off and
542  * more data requested from above once the PDU is sent.
543  */
544 int
hci_acl_send(struct mbuf * m,struct hci_link * link,struct l2cap_channel * chan)545 hci_acl_send(struct mbuf *m, struct hci_link *link,
546 		struct l2cap_channel *chan)
547 {
548 	struct l2cap_pdu *pdu;
549 	struct mbuf *n = NULL;
550 	int plen, mlen, num = 0;
551 
552 	KASSERT(link != NULL);
553 	KASSERT(m != NULL);
554 	KASSERT(m->m_flags & M_PKTHDR);
555 	KASSERT(m->m_pkthdr.len > 0);
556 
557 	if (link->hl_state == HCI_LINK_CLOSED) {
558 		m_freem(m);
559 		return ENETDOWN;
560 	}
561 
562 	pdu = pool_get(&l2cap_pdu_pool, PR_NOWAIT);
563 	if (pdu == NULL)
564 		goto nomem;
565 
566 	pdu->lp_chan = chan;
567 	pdu->lp_pending = 0;
568 	MBUFQ_INIT(&pdu->lp_data);
569 
570 	plen = m->m_pkthdr.len;
571 	mlen = link->hl_unit->hci_max_acl_size;
572 
573 	DPRINTFN(5, "%s: handle #%d, plen = %d, max = %d\n",
574 		device_xname(link->hl_unit->hci_dev), link->hl_handle, plen, mlen);
575 
576 	while (plen > 0) {
577 		if (plen > mlen) {
578 			n = m_split(m, mlen, M_DONTWAIT);
579 			if (n == NULL)
580 				goto nomem;
581 		} else {
582 			mlen = plen;
583 		}
584 
585 		if (num++ == 0)
586 			m->m_flags |= M_PROTO1;	/* tag first fragment */
587 
588 		DPRINTFN(10, "chunk of %d (plen = %d) bytes\n", mlen, plen);
589 		MBUFQ_ENQUEUE(&pdu->lp_data, m);
590 		m = n;
591 		plen -= mlen;
592 	}
593 
594 	TAILQ_INSERT_TAIL(&link->hl_txq, pdu, lp_next);
595 	link->hl_txqlen += num;
596 
597 	hci_acl_start(link);
598 
599 	return 0;
600 
601 nomem:
602 	if (m) m_freem(m);
603 	if (pdu) {
604 		MBUFQ_DRAIN(&pdu->lp_data);
605 		pool_put(&l2cap_pdu_pool, pdu);
606 	}
607 
608 	return ENOMEM;
609 }
610 
611 /*
612  * Start sending ACL data on link.
613  *
614  *	This is called when the queue may need restarting: as new data
615  * is queued, after link mode changes have completed, or when device
616  * buffers have cleared.
617  *
618  *	We may use all the available packet slots. The reason that we add
619  * the ACL encapsulation here rather than in hci_acl_send() is that L2CAP
620  * signal packets may be queued before the handle is given to us..
621  */
622 void
hci_acl_start(struct hci_link * link)623 hci_acl_start(struct hci_link *link)
624 {
625 	struct hci_unit *unit;
626 	hci_acldata_hdr_t *hdr;
627 	struct l2cap_pdu *pdu;
628 	struct mbuf *m;
629 	uint16_t handle;
630 
631 	KASSERT(link != NULL);
632 
633 	unit = link->hl_unit;
634 	KASSERT(unit != NULL);
635 
636 	/* this is mainly to block ourselves (below) */
637 	if (link->hl_state != HCI_LINK_OPEN)
638 		return;
639 
640 	if (link->hl_txqlen == 0 || unit->hci_num_acl_pkts == 0)
641 		return;
642 
643 	/* find first PDU with data to send */
644 	pdu = TAILQ_FIRST(&link->hl_txq);
645 	for (;;) {
646 		if (pdu == NULL)
647 			return;
648 
649 		if (MBUFQ_FIRST(&pdu->lp_data) != NULL)
650 			break;
651 
652 		pdu = TAILQ_NEXT(pdu, lp_next);
653 	}
654 
655 	while (unit->hci_num_acl_pkts > 0) {
656 		MBUFQ_DEQUEUE(&pdu->lp_data, m);
657 		KASSERT(m != NULL);
658 
659 		if (m->m_flags & M_PROTO1)
660 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
661 						HCI_PACKET_START, 0);
662 		else
663 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
664 						HCI_PACKET_FRAGMENT, 0);
665 
666 		M_PREPEND(m, sizeof(*hdr), M_DONTWAIT);
667 		if (m == NULL)
668 			break;
669 
670 		hdr = mtod(m, hci_acldata_hdr_t *);
671 		hdr->type = HCI_ACL_DATA_PKT;
672 		hdr->con_handle = htole16(handle);
673 		hdr->length = htole16(m->m_pkthdr.len - sizeof(*hdr));
674 
675 		link->hl_txqlen--;
676 		pdu->lp_pending++;
677 
678 		hci_output_acl(unit, m);
679 
680 		if (MBUFQ_FIRST(&pdu->lp_data) == NULL) {
681 			if (pdu->lp_chan) {
682 				/*
683 				 * This should enable streaming of PDUs - when
684 				 * we have placed all the fragments on the acl
685 				 * output queue, we trigger the L2CAP layer to
686 				 * send us down one more. Use a false state so
687 				 * we dont run into ourselves coming back from
688 				 * the future..
689 				 */
690 				link->hl_state = HCI_LINK_BLOCK;
691 				l2cap_start(pdu->lp_chan);
692 				link->hl_state = HCI_LINK_OPEN;
693 			}
694 
695 			pdu = TAILQ_NEXT(pdu, lp_next);
696 			if (pdu == NULL)
697 				break;
698 		}
699 	}
700 
701 	/*
702 	 * We had our turn now, move to the back of the queue to let
703 	 * other links have a go at the output buffers..
704 	 */
705 	if (TAILQ_NEXT(link, hl_next)) {
706 		TAILQ_REMOVE(&unit->hci_links, link, hl_next);
707 		TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
708 	}
709 }
710 
711 /*
712  * Confirm ACL packets cleared from Controller buffers. We scan our PDU
713  * list to clear pending fragments and signal upstream for more data
714  * when a PDU is complete.
715  */
716 void
hci_acl_complete(struct hci_link * link,int num)717 hci_acl_complete(struct hci_link *link, int num)
718 {
719 	struct l2cap_pdu *pdu;
720 	struct l2cap_channel *chan;
721 
722 	DPRINTFN(5, "handle #%d (%d)\n", link->hl_handle, num);
723 
724 	while (num > 0) {
725 		pdu = TAILQ_FIRST(&link->hl_txq);
726 		if (pdu == NULL) {
727 			aprint_error_dev(link->hl_unit->hci_dev,
728 			    "%d packets completed on handle #%x but none pending!\n",
729 			    num, link->hl_handle);
730 
731 			return;
732 		}
733 
734 		if (num >= pdu->lp_pending) {
735 			num -= pdu->lp_pending;
736 			pdu->lp_pending = 0;
737 
738 			if (MBUFQ_FIRST(&pdu->lp_data) == NULL) {
739 				TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
740 				chan = pdu->lp_chan;
741 				if (chan != NULL) {
742 					chan->lc_pending--;
743 					(*chan->lc_proto->complete)
744 							(chan->lc_upper, 1);
745 
746 					if (chan->lc_pending == 0)
747 						l2cap_start(chan);
748 				}
749 
750 				pool_put(&l2cap_pdu_pool, pdu);
751 			}
752 		} else {
753 			pdu->lp_pending -= num;
754 			num = 0;
755 		}
756 	}
757 }
758 
759 /*******************************************************************************
760  *
761  *	HCI SCO Connections
762  */
763 
764 /*
765  * Incoming SCO Connection. We check the list for anybody willing
766  * to take it.
767  */
768 struct hci_link *
hci_sco_newconn(struct hci_unit * unit,bdaddr_t * bdaddr)769 hci_sco_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
770 {
771 	struct sockaddr_bt laddr, raddr;
772 	struct sco_pcb *pcb, *new;
773 	struct hci_link *sco, *acl;
774 
775 	memset(&laddr, 0, sizeof(laddr));
776 	laddr.bt_len = sizeof(laddr);
777 	laddr.bt_family = AF_BLUETOOTH;
778 	bdaddr_copy(&laddr.bt_bdaddr, &unit->hci_bdaddr);
779 
780 	memset(&raddr, 0, sizeof(raddr));
781 	raddr.bt_len = sizeof(raddr);
782 	raddr.bt_family = AF_BLUETOOTH;
783 	bdaddr_copy(&raddr.bt_bdaddr, bdaddr);
784 
785 	/*
786 	 * There should already be an ACL link up and running before
787 	 * the controller sends us SCO connection requests, but you
788 	 * never know..
789 	 */
790 	acl = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
791 	if (acl == NULL || acl->hl_state != HCI_LINK_OPEN)
792 		return NULL;
793 
794 	LIST_FOREACH(pcb, &sco_pcb, sp_next) {
795 		if ((pcb->sp_flags & SP_LISTENING) == 0)
796 			continue;
797 
798 		new = (*pcb->sp_proto->newconn)(pcb->sp_upper, &laddr, &raddr);
799 		if (new == NULL)
800 			continue;
801 
802 		/*
803 		 * Ok, got new pcb so we can start a new link and fill
804 		 * in all the details.
805 		 */
806 		bdaddr_copy(&new->sp_laddr, &unit->hci_bdaddr);
807 		bdaddr_copy(&new->sp_raddr, bdaddr);
808 
809 		sco = hci_link_alloc(unit, bdaddr, HCI_LINK_SCO);
810 		if (sco == NULL) {
811 			sco_detach_pcb(&new);
812 			return NULL;
813 		}
814 
815 		sco->hl_link = hci_acl_open(unit, bdaddr);
816 		KASSERT(sco->hl_link == acl);
817 
818 		sco->hl_sco = new;
819 		new->sp_link = sco;
820 
821 		new->sp_mtu = unit->hci_max_sco_size;
822 		return sco;
823 	}
824 
825 	return NULL;
826 }
827 
828 /*
829  * receive SCO packet, we only need to strip the header and send
830  * it to the right handler
831  */
832 void
hci_sco_recv(struct mbuf * m,struct hci_unit * unit)833 hci_sco_recv(struct mbuf *m, struct hci_unit *unit)
834 {
835 	struct hci_link *link;
836 	hci_scodata_hdr_t hdr;
837 	uint16_t handle;
838 
839 	KASSERT(m != NULL);
840 	KASSERT(unit != NULL);
841 
842 	if (m->m_pkthdr.len < sizeof(hdr))
843 		goto bad;
844 
845 	m_copydata(m, 0, sizeof(hdr), &hdr);
846 	m_adj(m, sizeof(hdr));
847 
848 	KASSERT(hdr.type == HCI_SCO_DATA_PKT);
849 
850 	hdr.con_handle = le16toh(hdr.con_handle);
851 	handle = HCI_CON_HANDLE(hdr.con_handle);
852 
853 	if (m->m_pkthdr.len != hdr.length)
854 		goto bad;
855 
856 	link = hci_link_lookup_handle(unit, handle);
857 	if (link == NULL || link->hl_type == HCI_LINK_ACL) {
858 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
859 			device_xname(unit->hci_dev), handle);
860 
861 		goto bad;
862 	}
863 
864 	(*link->hl_sco->sp_proto->input)(link->hl_sco->sp_upper, m);
865 	return;
866 
867 bad:
868 	m_freem(m);
869 }
870 
871 void
hci_sco_start(struct hci_link * link)872 hci_sco_start(struct hci_link *link)
873 {
874 }
875 
876 /*
877  * SCO packets have completed at the controller, so we can
878  * signal up to free the buffer space.
879  */
880 void
hci_sco_complete(struct hci_link * link,int num)881 hci_sco_complete(struct hci_link *link, int num)
882 {
883 
884 	DPRINTFN(5, "handle #%d (num=%d)\n", link->hl_handle, num);
885 	link->hl_sco->sp_pending--;
886 	(*link->hl_sco->sp_proto->complete)(link->hl_sco->sp_upper, num);
887 }
888 
889 /*******************************************************************************
890  *
891  *	Generic HCI Connection alloc/free/lookup etc
892  */
893 
894 struct hci_link *
hci_link_alloc(struct hci_unit * unit,bdaddr_t * bdaddr,uint8_t type)895 hci_link_alloc(struct hci_unit *unit, bdaddr_t *bdaddr, uint8_t type)
896 {
897 	struct hci_link *link;
898 
899 	KASSERT(unit != NULL);
900 
901 	link = malloc(sizeof(struct hci_link), M_BLUETOOTH, M_NOWAIT | M_ZERO);
902 	if (link == NULL)
903 		return NULL;
904 
905 	link->hl_unit = unit;
906 	link->hl_type = type;
907 	link->hl_state = HCI_LINK_CLOSED;
908 	bdaddr_copy(&link->hl_bdaddr, bdaddr);
909 
910 	/* init ACL portion */
911 	callout_init(&link->hl_expire, 0);
912 	callout_setfunc(&link->hl_expire, hci_acl_timeout, link);
913 
914 	TAILQ_INIT(&link->hl_txq);	/* outgoing packets */
915 	TAILQ_INIT(&link->hl_reqs);	/* request queue */
916 
917 	link->hl_mtu = L2CAP_MTU_DEFAULT;		/* L2CAP signal mtu */
918 	link->hl_flush = L2CAP_FLUSH_TIMO_DEFAULT;	/* flush timeout */
919 
920 	/* init SCO portion */
921 	MBUFQ_INIT(&link->hl_data);
922 
923 	/* attach to unit */
924 	TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
925 	return link;
926 }
927 
928 void
hci_link_free(struct hci_link * link,int err)929 hci_link_free(struct hci_link *link, int err)
930 {
931 	struct l2cap_req *req;
932 	struct l2cap_pdu *pdu;
933 	struct l2cap_channel *chan, *next;
934 
935 	KASSERT(link != NULL);
936 
937 	DPRINTF("#%d, type = %d, state = %d, refcnt = %d\n",
938 		link->hl_handle, link->hl_type,
939 		link->hl_state, link->hl_refcnt);
940 
941 	/* ACL reference count */
942 	if (link->hl_refcnt > 0) {
943 		next = LIST_FIRST(&l2cap_active_list);
944 		while ((chan = next) != NULL) {
945 			next = LIST_NEXT(chan, lc_ncid);
946 			if (chan->lc_link == link)
947 				l2cap_close(chan, err);
948 		}
949 	}
950 	KASSERT(link->hl_refcnt == 0);
951 
952 	/* ACL L2CAP requests.. */
953 	while ((req = TAILQ_FIRST(&link->hl_reqs)) != NULL)
954 		l2cap_request_free(req);
955 
956 	KASSERT(TAILQ_EMPTY(&link->hl_reqs));
957 
958 	/* ACL outgoing data queue */
959 	while ((pdu = TAILQ_FIRST(&link->hl_txq)) != NULL) {
960 		TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
961 		MBUFQ_DRAIN(&pdu->lp_data);
962 		if (pdu->lp_pending)
963 			link->hl_unit->hci_num_acl_pkts += pdu->lp_pending;
964 
965 		pool_put(&l2cap_pdu_pool, pdu);
966 	}
967 
968 	KASSERT(TAILQ_EMPTY(&link->hl_txq));
969 
970 	/* ACL incoming data packet */
971 	if (link->hl_rxp != NULL) {
972 		m_freem(link->hl_rxp);
973 		link->hl_rxp = NULL;
974 	}
975 
976 	/* SCO master ACL link */
977 	if (link->hl_link != NULL) {
978 		hci_acl_close(link->hl_link, err);
979 		link->hl_link = NULL;
980 	}
981 
982 	/* SCO pcb */
983 	if (link->hl_sco != NULL) {
984 		struct sco_pcb *pcb;
985 
986 		pcb = link->hl_sco;
987 		pcb->sp_link = NULL;
988 		link->hl_sco = NULL;
989 		(*pcb->sp_proto->disconnected)(pcb->sp_upper, err);
990 	}
991 
992 	/* flush any SCO data */
993 	MBUFQ_DRAIN(&link->hl_data);
994 
995 	/*
996 	 * Halt the callout - if its already running we cannot free the
997 	 * link structure but the timeout function will call us back in
998 	 * any case.
999 	 */
1000 	link->hl_state = HCI_LINK_CLOSED;
1001 	callout_stop(&link->hl_expire);
1002 	if (callout_invoking(&link->hl_expire))
1003 		return;
1004 
1005 	callout_destroy(&link->hl_expire);
1006 
1007 	/*
1008 	 * If we made a note of clock offset, keep it in a memo
1009 	 * to facilitate reconnections to this device
1010 	 */
1011 	if (link->hl_clock != 0) {
1012 		struct hci_memo *memo;
1013 
1014 		memo = hci_memo_new(link->hl_unit, &link->hl_bdaddr);
1015 		if (memo != NULL)
1016 			memo->clock_offset = link->hl_clock;
1017 	}
1018 
1019 	TAILQ_REMOVE(&link->hl_unit->hci_links, link, hl_next);
1020 	free(link, M_BLUETOOTH);
1021 }
1022 
1023 /*
1024  * Lookup HCI link by address and type. Note that for SCO links there may
1025  * be more than one link per address, so we only return links with no
1026  * handle (ie new links)
1027  */
1028 struct hci_link *
hci_link_lookup_bdaddr(struct hci_unit * unit,bdaddr_t * bdaddr,uint8_t type)1029 hci_link_lookup_bdaddr(struct hci_unit *unit, bdaddr_t *bdaddr, uint8_t type)
1030 {
1031 	struct hci_link *link;
1032 
1033 	KASSERT(unit != NULL);
1034 	KASSERT(bdaddr != NULL);
1035 
1036 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1037 		if (link->hl_type != type)
1038 			continue;
1039 
1040 		if (type == HCI_LINK_SCO && link->hl_handle != 0)
1041 			continue;
1042 
1043 		if (bdaddr_same(&link->hl_bdaddr, bdaddr))
1044 			break;
1045 	}
1046 
1047 	return link;
1048 }
1049 
1050 struct hci_link *
hci_link_lookup_handle(struct hci_unit * unit,uint16_t handle)1051 hci_link_lookup_handle(struct hci_unit *unit, uint16_t handle)
1052 {
1053 	struct hci_link *link;
1054 
1055 	KASSERT(unit != NULL);
1056 
1057 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1058 		if (handle == link->hl_handle)
1059 			break;
1060 	}
1061 
1062 	return link;
1063 }
1064