xref: /linux/net/bluetooth/l2cap_core.c (revision d6fd48ef)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch (chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 }
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
710 
711 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
712 			      void *data)
713 {
714 	struct l2cap_chan *chan;
715 
716 	list_for_each_entry(chan, &conn->chan_l, list) {
717 		func(chan, data);
718 	}
719 }
720 
721 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
722 		     void *data)
723 {
724 	if (!conn)
725 		return;
726 
727 	mutex_lock(&conn->chan_lock);
728 	__l2cap_chan_list(conn, func, data);
729 	mutex_unlock(&conn->chan_lock);
730 }
731 
732 EXPORT_SYMBOL_GPL(l2cap_chan_list);
733 
734 static void l2cap_conn_update_id_addr(struct work_struct *work)
735 {
736 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
737 					       id_addr_update_work);
738 	struct hci_conn *hcon = conn->hcon;
739 	struct l2cap_chan *chan;
740 
741 	mutex_lock(&conn->chan_lock);
742 
743 	list_for_each_entry(chan, &conn->chan_l, list) {
744 		l2cap_chan_lock(chan);
745 		bacpy(&chan->dst, &hcon->dst);
746 		chan->dst_type = bdaddr_dst_type(hcon);
747 		l2cap_chan_unlock(chan);
748 	}
749 
750 	mutex_unlock(&conn->chan_lock);
751 }
752 
753 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
754 {
755 	struct l2cap_conn *conn = chan->conn;
756 	struct l2cap_le_conn_rsp rsp;
757 	u16 result;
758 
759 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
760 		result = L2CAP_CR_LE_AUTHORIZATION;
761 	else
762 		result = L2CAP_CR_LE_BAD_PSM;
763 
764 	l2cap_state_change(chan, BT_DISCONN);
765 
766 	rsp.dcid    = cpu_to_le16(chan->scid);
767 	rsp.mtu     = cpu_to_le16(chan->imtu);
768 	rsp.mps     = cpu_to_le16(chan->mps);
769 	rsp.credits = cpu_to_le16(chan->rx_credits);
770 	rsp.result  = cpu_to_le16(result);
771 
772 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
773 		       &rsp);
774 }
775 
776 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
777 {
778 	struct l2cap_conn *conn = chan->conn;
779 	struct l2cap_ecred_conn_rsp rsp;
780 	u16 result;
781 
782 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
783 		result = L2CAP_CR_LE_AUTHORIZATION;
784 	else
785 		result = L2CAP_CR_LE_BAD_PSM;
786 
787 	l2cap_state_change(chan, BT_DISCONN);
788 
789 	memset(&rsp, 0, sizeof(rsp));
790 
791 	rsp.result  = cpu_to_le16(result);
792 
793 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
794 		       &rsp);
795 }
796 
797 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
798 {
799 	struct l2cap_conn *conn = chan->conn;
800 	struct l2cap_conn_rsp rsp;
801 	u16 result;
802 
803 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
804 		result = L2CAP_CR_SEC_BLOCK;
805 	else
806 		result = L2CAP_CR_BAD_PSM;
807 
808 	l2cap_state_change(chan, BT_DISCONN);
809 
810 	rsp.scid   = cpu_to_le16(chan->dcid);
811 	rsp.dcid   = cpu_to_le16(chan->scid);
812 	rsp.result = cpu_to_le16(result);
813 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
814 
815 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
816 }
817 
818 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
819 {
820 	struct l2cap_conn *conn = chan->conn;
821 
822 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
823 
824 	switch (chan->state) {
825 	case BT_LISTEN:
826 		chan->ops->teardown(chan, 0);
827 		break;
828 
829 	case BT_CONNECTED:
830 	case BT_CONFIG:
831 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
832 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
833 			l2cap_send_disconn_req(chan, reason);
834 		} else
835 			l2cap_chan_del(chan, reason);
836 		break;
837 
838 	case BT_CONNECT2:
839 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
840 			if (conn->hcon->type == ACL_LINK)
841 				l2cap_chan_connect_reject(chan);
842 			else if (conn->hcon->type == LE_LINK) {
843 				switch (chan->mode) {
844 				case L2CAP_MODE_LE_FLOWCTL:
845 					l2cap_chan_le_connect_reject(chan);
846 					break;
847 				case L2CAP_MODE_EXT_FLOWCTL:
848 					l2cap_chan_ecred_connect_reject(chan);
849 					break;
850 				}
851 			}
852 		}
853 
854 		l2cap_chan_del(chan, reason);
855 		break;
856 
857 	case BT_CONNECT:
858 	case BT_DISCONN:
859 		l2cap_chan_del(chan, reason);
860 		break;
861 
862 	default:
863 		chan->ops->teardown(chan, 0);
864 		break;
865 	}
866 }
867 EXPORT_SYMBOL(l2cap_chan_close);
868 
869 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
870 {
871 	switch (chan->chan_type) {
872 	case L2CAP_CHAN_RAW:
873 		switch (chan->sec_level) {
874 		case BT_SECURITY_HIGH:
875 		case BT_SECURITY_FIPS:
876 			return HCI_AT_DEDICATED_BONDING_MITM;
877 		case BT_SECURITY_MEDIUM:
878 			return HCI_AT_DEDICATED_BONDING;
879 		default:
880 			return HCI_AT_NO_BONDING;
881 		}
882 		break;
883 	case L2CAP_CHAN_CONN_LESS:
884 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
885 			if (chan->sec_level == BT_SECURITY_LOW)
886 				chan->sec_level = BT_SECURITY_SDP;
887 		}
888 		if (chan->sec_level == BT_SECURITY_HIGH ||
889 		    chan->sec_level == BT_SECURITY_FIPS)
890 			return HCI_AT_NO_BONDING_MITM;
891 		else
892 			return HCI_AT_NO_BONDING;
893 		break;
894 	case L2CAP_CHAN_CONN_ORIENTED:
895 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
896 			if (chan->sec_level == BT_SECURITY_LOW)
897 				chan->sec_level = BT_SECURITY_SDP;
898 
899 			if (chan->sec_level == BT_SECURITY_HIGH ||
900 			    chan->sec_level == BT_SECURITY_FIPS)
901 				return HCI_AT_NO_BONDING_MITM;
902 			else
903 				return HCI_AT_NO_BONDING;
904 		}
905 		fallthrough;
906 
907 	default:
908 		switch (chan->sec_level) {
909 		case BT_SECURITY_HIGH:
910 		case BT_SECURITY_FIPS:
911 			return HCI_AT_GENERAL_BONDING_MITM;
912 		case BT_SECURITY_MEDIUM:
913 			return HCI_AT_GENERAL_BONDING;
914 		default:
915 			return HCI_AT_NO_BONDING;
916 		}
917 		break;
918 	}
919 }
920 
921 /* Service level security */
922 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
923 {
924 	struct l2cap_conn *conn = chan->conn;
925 	__u8 auth_type;
926 
927 	if (conn->hcon->type == LE_LINK)
928 		return smp_conn_security(conn->hcon, chan->sec_level);
929 
930 	auth_type = l2cap_get_auth_type(chan);
931 
932 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
933 				 initiator);
934 }
935 
936 static u8 l2cap_get_ident(struct l2cap_conn *conn)
937 {
938 	u8 id;
939 
940 	/* Get next available identificator.
941 	 *    1 - 128 are used by kernel.
942 	 *  129 - 199 are reserved.
943 	 *  200 - 254 are used by utilities like l2ping, etc.
944 	 */
945 
946 	mutex_lock(&conn->ident_lock);
947 
948 	if (++conn->tx_ident > 128)
949 		conn->tx_ident = 1;
950 
951 	id = conn->tx_ident;
952 
953 	mutex_unlock(&conn->ident_lock);
954 
955 	return id;
956 }
957 
958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	hci_send_acl(conn->hchan, skb, flags);
981 }
982 
983 static bool __chan_is_moving(struct l2cap_chan *chan)
984 {
985 	return chan->move_state != L2CAP_MOVE_STABLE &&
986 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
987 }
988 
989 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
990 {
991 	struct hci_conn *hcon = chan->conn->hcon;
992 	u16 flags;
993 
994 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
995 	       skb->priority);
996 
997 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
998 		if (chan->hs_hchan)
999 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1000 		else
1001 			kfree_skb(skb);
1002 
1003 		return;
1004 	}
1005 
1006 	/* Use NO_FLUSH for LE links (where this is the only option) or
1007 	 * if the BR/EDR link supports it and flushing has not been
1008 	 * explicitly requested (through FLAG_FLUSHABLE).
1009 	 */
1010 	if (hcon->type == LE_LINK ||
1011 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1012 	     lmp_no_flush_capable(hcon->hdev)))
1013 		flags = ACL_START_NO_FLUSH;
1014 	else
1015 		flags = ACL_START;
1016 
1017 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1018 	hci_send_acl(chan->conn->hchan, skb, flags);
1019 }
1020 
1021 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1022 {
1023 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1024 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1025 
1026 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1027 		/* S-Frame */
1028 		control->sframe = 1;
1029 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1030 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1031 
1032 		control->sar = 0;
1033 		control->txseq = 0;
1034 	} else {
1035 		/* I-Frame */
1036 		control->sframe = 0;
1037 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1038 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1039 
1040 		control->poll = 0;
1041 		control->super = 0;
1042 	}
1043 }
1044 
1045 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1046 {
1047 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1048 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1049 
1050 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1051 		/* S-Frame */
1052 		control->sframe = 1;
1053 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1054 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1055 
1056 		control->sar = 0;
1057 		control->txseq = 0;
1058 	} else {
1059 		/* I-Frame */
1060 		control->sframe = 0;
1061 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1062 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1063 
1064 		control->poll = 0;
1065 		control->super = 0;
1066 	}
1067 }
1068 
1069 static inline void __unpack_control(struct l2cap_chan *chan,
1070 				    struct sk_buff *skb)
1071 {
1072 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1073 		__unpack_extended_control(get_unaligned_le32(skb->data),
1074 					  &bt_cb(skb)->l2cap);
1075 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1076 	} else {
1077 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1078 					  &bt_cb(skb)->l2cap);
1079 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1080 	}
1081 }
1082 
1083 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1084 {
1085 	u32 packed;
1086 
1087 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1088 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1089 
1090 	if (control->sframe) {
1091 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1092 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1093 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1094 	} else {
1095 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1096 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1097 	}
1098 
1099 	return packed;
1100 }
1101 
1102 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1103 {
1104 	u16 packed;
1105 
1106 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1107 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1108 
1109 	if (control->sframe) {
1110 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1111 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1112 		packed |= L2CAP_CTRL_FRAME_TYPE;
1113 	} else {
1114 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1115 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1116 	}
1117 
1118 	return packed;
1119 }
1120 
1121 static inline void __pack_control(struct l2cap_chan *chan,
1122 				  struct l2cap_ctrl *control,
1123 				  struct sk_buff *skb)
1124 {
1125 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1126 		put_unaligned_le32(__pack_extended_control(control),
1127 				   skb->data + L2CAP_HDR_SIZE);
1128 	} else {
1129 		put_unaligned_le16(__pack_enhanced_control(control),
1130 				   skb->data + L2CAP_HDR_SIZE);
1131 	}
1132 }
1133 
1134 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1135 {
1136 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 		return L2CAP_EXT_HDR_SIZE;
1138 	else
1139 		return L2CAP_ENH_HDR_SIZE;
1140 }
1141 
1142 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1143 					       u32 control)
1144 {
1145 	struct sk_buff *skb;
1146 	struct l2cap_hdr *lh;
1147 	int hlen = __ertm_hdr_size(chan);
1148 
1149 	if (chan->fcs == L2CAP_FCS_CRC16)
1150 		hlen += L2CAP_FCS_SIZE;
1151 
1152 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1153 
1154 	if (!skb)
1155 		return ERR_PTR(-ENOMEM);
1156 
1157 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1158 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1159 	lh->cid = cpu_to_le16(chan->dcid);
1160 
1161 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1162 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1163 	else
1164 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1165 
1166 	if (chan->fcs == L2CAP_FCS_CRC16) {
1167 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1168 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1169 	}
1170 
1171 	skb->priority = HCI_PRIO_MAX;
1172 	return skb;
1173 }
1174 
1175 static void l2cap_send_sframe(struct l2cap_chan *chan,
1176 			      struct l2cap_ctrl *control)
1177 {
1178 	struct sk_buff *skb;
1179 	u32 control_field;
1180 
1181 	BT_DBG("chan %p, control %p", chan, control);
1182 
1183 	if (!control->sframe)
1184 		return;
1185 
1186 	if (__chan_is_moving(chan))
1187 		return;
1188 
1189 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1190 	    !control->poll)
1191 		control->final = 1;
1192 
1193 	if (control->super == L2CAP_SUPER_RR)
1194 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1195 	else if (control->super == L2CAP_SUPER_RNR)
1196 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1197 
1198 	if (control->super != L2CAP_SUPER_SREJ) {
1199 		chan->last_acked_seq = control->reqseq;
1200 		__clear_ack_timer(chan);
1201 	}
1202 
1203 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1204 	       control->final, control->poll, control->super);
1205 
1206 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1207 		control_field = __pack_extended_control(control);
1208 	else
1209 		control_field = __pack_enhanced_control(control);
1210 
1211 	skb = l2cap_create_sframe_pdu(chan, control_field);
1212 	if (!IS_ERR(skb))
1213 		l2cap_do_send(chan, skb);
1214 }
1215 
1216 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1217 {
1218 	struct l2cap_ctrl control;
1219 
1220 	BT_DBG("chan %p, poll %d", chan, poll);
1221 
1222 	memset(&control, 0, sizeof(control));
1223 	control.sframe = 1;
1224 	control.poll = poll;
1225 
1226 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1227 		control.super = L2CAP_SUPER_RNR;
1228 	else
1229 		control.super = L2CAP_SUPER_RR;
1230 
1231 	control.reqseq = chan->buffer_seq;
1232 	l2cap_send_sframe(chan, &control);
1233 }
1234 
1235 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1236 {
1237 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1238 		return true;
1239 
1240 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1241 }
1242 
1243 static bool __amp_capable(struct l2cap_chan *chan)
1244 {
1245 	struct l2cap_conn *conn = chan->conn;
1246 	struct hci_dev *hdev;
1247 	bool amp_available = false;
1248 
1249 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1250 		return false;
1251 
1252 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1253 		return false;
1254 
1255 	read_lock(&hci_dev_list_lock);
1256 	list_for_each_entry(hdev, &hci_dev_list, list) {
1257 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1258 		    test_bit(HCI_UP, &hdev->flags)) {
1259 			amp_available = true;
1260 			break;
1261 		}
1262 	}
1263 	read_unlock(&hci_dev_list_lock);
1264 
1265 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1266 		return amp_available;
1267 
1268 	return false;
1269 }
1270 
1271 static bool l2cap_check_efs(struct l2cap_chan *chan)
1272 {
1273 	/* Check EFS parameters */
1274 	return true;
1275 }
1276 
1277 void l2cap_send_conn_req(struct l2cap_chan *chan)
1278 {
1279 	struct l2cap_conn *conn = chan->conn;
1280 	struct l2cap_conn_req req;
1281 
1282 	req.scid = cpu_to_le16(chan->scid);
1283 	req.psm  = chan->psm;
1284 
1285 	chan->ident = l2cap_get_ident(conn);
1286 
1287 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1288 
1289 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1290 }
1291 
1292 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1293 {
1294 	struct l2cap_create_chan_req req;
1295 	req.scid = cpu_to_le16(chan->scid);
1296 	req.psm  = chan->psm;
1297 	req.amp_id = amp_id;
1298 
1299 	chan->ident = l2cap_get_ident(chan->conn);
1300 
1301 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1302 		       sizeof(req), &req);
1303 }
1304 
1305 static void l2cap_move_setup(struct l2cap_chan *chan)
1306 {
1307 	struct sk_buff *skb;
1308 
1309 	BT_DBG("chan %p", chan);
1310 
1311 	if (chan->mode != L2CAP_MODE_ERTM)
1312 		return;
1313 
1314 	__clear_retrans_timer(chan);
1315 	__clear_monitor_timer(chan);
1316 	__clear_ack_timer(chan);
1317 
1318 	chan->retry_count = 0;
1319 	skb_queue_walk(&chan->tx_q, skb) {
1320 		if (bt_cb(skb)->l2cap.retries)
1321 			bt_cb(skb)->l2cap.retries = 1;
1322 		else
1323 			break;
1324 	}
1325 
1326 	chan->expected_tx_seq = chan->buffer_seq;
1327 
1328 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1329 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1330 	l2cap_seq_list_clear(&chan->retrans_list);
1331 	l2cap_seq_list_clear(&chan->srej_list);
1332 	skb_queue_purge(&chan->srej_q);
1333 
1334 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1335 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1336 
1337 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1338 }
1339 
1340 static void l2cap_move_done(struct l2cap_chan *chan)
1341 {
1342 	u8 move_role = chan->move_role;
1343 	BT_DBG("chan %p", chan);
1344 
1345 	chan->move_state = L2CAP_MOVE_STABLE;
1346 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1347 
1348 	if (chan->mode != L2CAP_MODE_ERTM)
1349 		return;
1350 
1351 	switch (move_role) {
1352 	case L2CAP_MOVE_ROLE_INITIATOR:
1353 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1354 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1355 		break;
1356 	case L2CAP_MOVE_ROLE_RESPONDER:
1357 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1358 		break;
1359 	}
1360 }
1361 
1362 static void l2cap_chan_ready(struct l2cap_chan *chan)
1363 {
1364 	/* The channel may have already been flagged as connected in
1365 	 * case of receiving data before the L2CAP info req/rsp
1366 	 * procedure is complete.
1367 	 */
1368 	if (chan->state == BT_CONNECTED)
1369 		return;
1370 
1371 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1372 	chan->conf_state = 0;
1373 	__clear_chan_timer(chan);
1374 
1375 	switch (chan->mode) {
1376 	case L2CAP_MODE_LE_FLOWCTL:
1377 	case L2CAP_MODE_EXT_FLOWCTL:
1378 		if (!chan->tx_credits)
1379 			chan->ops->suspend(chan);
1380 		break;
1381 	}
1382 
1383 	chan->state = BT_CONNECTED;
1384 
1385 	chan->ops->ready(chan);
1386 }
1387 
1388 static void l2cap_le_connect(struct l2cap_chan *chan)
1389 {
1390 	struct l2cap_conn *conn = chan->conn;
1391 	struct l2cap_le_conn_req req;
1392 
1393 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1394 		return;
1395 
1396 	if (!chan->imtu)
1397 		chan->imtu = chan->conn->mtu;
1398 
1399 	l2cap_le_flowctl_init(chan, 0);
1400 
1401 	memset(&req, 0, sizeof(req));
1402 	req.psm     = chan->psm;
1403 	req.scid    = cpu_to_le16(chan->scid);
1404 	req.mtu     = cpu_to_le16(chan->imtu);
1405 	req.mps     = cpu_to_le16(chan->mps);
1406 	req.credits = cpu_to_le16(chan->rx_credits);
1407 
1408 	chan->ident = l2cap_get_ident(conn);
1409 
1410 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1411 		       sizeof(req), &req);
1412 }
1413 
1414 struct l2cap_ecred_conn_data {
1415 	struct {
1416 		struct l2cap_ecred_conn_req req;
1417 		__le16 scid[5];
1418 	} __packed pdu;
1419 	struct l2cap_chan *chan;
1420 	struct pid *pid;
1421 	int count;
1422 };
1423 
1424 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1425 {
1426 	struct l2cap_ecred_conn_data *conn = data;
1427 	struct pid *pid;
1428 
1429 	if (chan == conn->chan)
1430 		return;
1431 
1432 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1433 		return;
1434 
1435 	pid = chan->ops->get_peer_pid(chan);
1436 
1437 	/* Only add deferred channels with the same PID/PSM */
1438 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1439 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1440 		return;
1441 
1442 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1443 		return;
1444 
1445 	l2cap_ecred_init(chan, 0);
1446 
1447 	/* Set the same ident so we can match on the rsp */
1448 	chan->ident = conn->chan->ident;
1449 
1450 	/* Include all channels deferred */
1451 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1452 
1453 	conn->count++;
1454 }
1455 
1456 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1457 {
1458 	struct l2cap_conn *conn = chan->conn;
1459 	struct l2cap_ecred_conn_data data;
1460 
1461 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1462 		return;
1463 
1464 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1465 		return;
1466 
1467 	l2cap_ecred_init(chan, 0);
1468 
1469 	memset(&data, 0, sizeof(data));
1470 	data.pdu.req.psm     = chan->psm;
1471 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1472 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1473 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1474 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1475 
1476 	chan->ident = l2cap_get_ident(conn);
1477 
1478 	data.count = 1;
1479 	data.chan = chan;
1480 	data.pid = chan->ops->get_peer_pid(chan);
1481 
1482 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1483 
1484 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1485 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1486 		       &data.pdu);
1487 }
1488 
1489 static void l2cap_le_start(struct l2cap_chan *chan)
1490 {
1491 	struct l2cap_conn *conn = chan->conn;
1492 
1493 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1494 		return;
1495 
1496 	if (!chan->psm) {
1497 		l2cap_chan_ready(chan);
1498 		return;
1499 	}
1500 
1501 	if (chan->state == BT_CONNECT) {
1502 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1503 			l2cap_ecred_connect(chan);
1504 		else
1505 			l2cap_le_connect(chan);
1506 	}
1507 }
1508 
1509 static void l2cap_start_connection(struct l2cap_chan *chan)
1510 {
1511 	if (__amp_capable(chan)) {
1512 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1513 		a2mp_discover_amp(chan);
1514 	} else if (chan->conn->hcon->type == LE_LINK) {
1515 		l2cap_le_start(chan);
1516 	} else {
1517 		l2cap_send_conn_req(chan);
1518 	}
1519 }
1520 
1521 static void l2cap_request_info(struct l2cap_conn *conn)
1522 {
1523 	struct l2cap_info_req req;
1524 
1525 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1526 		return;
1527 
1528 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1529 
1530 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1531 	conn->info_ident = l2cap_get_ident(conn);
1532 
1533 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1534 
1535 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1536 		       sizeof(req), &req);
1537 }
1538 
1539 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1540 {
1541 	/* The minimum encryption key size needs to be enforced by the
1542 	 * host stack before establishing any L2CAP connections. The
1543 	 * specification in theory allows a minimum of 1, but to align
1544 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1545 	 *
1546 	 * This check might also be called for unencrypted connections
1547 	 * that have no key size requirements. Ensure that the link is
1548 	 * actually encrypted before enforcing a key size.
1549 	 */
1550 	int min_key_size = hcon->hdev->min_enc_key_size;
1551 
1552 	/* On FIPS security level, key size must be 16 bytes */
1553 	if (hcon->sec_level == BT_SECURITY_FIPS)
1554 		min_key_size = 16;
1555 
1556 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1557 		hcon->enc_key_size >= min_key_size);
1558 }
1559 
1560 static void l2cap_do_start(struct l2cap_chan *chan)
1561 {
1562 	struct l2cap_conn *conn = chan->conn;
1563 
1564 	if (conn->hcon->type == LE_LINK) {
1565 		l2cap_le_start(chan);
1566 		return;
1567 	}
1568 
1569 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1570 		l2cap_request_info(conn);
1571 		return;
1572 	}
1573 
1574 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1575 		return;
1576 
1577 	if (!l2cap_chan_check_security(chan, true) ||
1578 	    !__l2cap_no_conn_pending(chan))
1579 		return;
1580 
1581 	if (l2cap_check_enc_key_size(conn->hcon))
1582 		l2cap_start_connection(chan);
1583 	else
1584 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1585 }
1586 
1587 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1588 {
1589 	u32 local_feat_mask = l2cap_feat_mask;
1590 	if (!disable_ertm)
1591 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1592 
1593 	switch (mode) {
1594 	case L2CAP_MODE_ERTM:
1595 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1596 	case L2CAP_MODE_STREAMING:
1597 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1598 	default:
1599 		return 0x00;
1600 	}
1601 }
1602 
1603 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1604 {
1605 	struct l2cap_conn *conn = chan->conn;
1606 	struct l2cap_disconn_req req;
1607 
1608 	if (!conn)
1609 		return;
1610 
1611 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1612 		__clear_retrans_timer(chan);
1613 		__clear_monitor_timer(chan);
1614 		__clear_ack_timer(chan);
1615 	}
1616 
1617 	if (chan->scid == L2CAP_CID_A2MP) {
1618 		l2cap_state_change(chan, BT_DISCONN);
1619 		return;
1620 	}
1621 
1622 	req.dcid = cpu_to_le16(chan->dcid);
1623 	req.scid = cpu_to_le16(chan->scid);
1624 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1625 		       sizeof(req), &req);
1626 
1627 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1628 }
1629 
1630 /* ---- L2CAP connections ---- */
1631 static void l2cap_conn_start(struct l2cap_conn *conn)
1632 {
1633 	struct l2cap_chan *chan, *tmp;
1634 
1635 	BT_DBG("conn %p", conn);
1636 
1637 	mutex_lock(&conn->chan_lock);
1638 
1639 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1640 		l2cap_chan_lock(chan);
1641 
1642 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1643 			l2cap_chan_ready(chan);
1644 			l2cap_chan_unlock(chan);
1645 			continue;
1646 		}
1647 
1648 		if (chan->state == BT_CONNECT) {
1649 			if (!l2cap_chan_check_security(chan, true) ||
1650 			    !__l2cap_no_conn_pending(chan)) {
1651 				l2cap_chan_unlock(chan);
1652 				continue;
1653 			}
1654 
1655 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1656 			    && test_bit(CONF_STATE2_DEVICE,
1657 					&chan->conf_state)) {
1658 				l2cap_chan_close(chan, ECONNRESET);
1659 				l2cap_chan_unlock(chan);
1660 				continue;
1661 			}
1662 
1663 			if (l2cap_check_enc_key_size(conn->hcon))
1664 				l2cap_start_connection(chan);
1665 			else
1666 				l2cap_chan_close(chan, ECONNREFUSED);
1667 
1668 		} else if (chan->state == BT_CONNECT2) {
1669 			struct l2cap_conn_rsp rsp;
1670 			char buf[128];
1671 			rsp.scid = cpu_to_le16(chan->dcid);
1672 			rsp.dcid = cpu_to_le16(chan->scid);
1673 
1674 			if (l2cap_chan_check_security(chan, false)) {
1675 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1676 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1677 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1678 					chan->ops->defer(chan);
1679 
1680 				} else {
1681 					l2cap_state_change(chan, BT_CONFIG);
1682 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1683 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1684 				}
1685 			} else {
1686 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1687 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1688 			}
1689 
1690 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1691 				       sizeof(rsp), &rsp);
1692 
1693 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1694 			    rsp.result != L2CAP_CR_SUCCESS) {
1695 				l2cap_chan_unlock(chan);
1696 				continue;
1697 			}
1698 
1699 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1700 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1701 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1702 			chan->num_conf_req++;
1703 		}
1704 
1705 		l2cap_chan_unlock(chan);
1706 	}
1707 
1708 	mutex_unlock(&conn->chan_lock);
1709 }
1710 
1711 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1712 {
1713 	struct hci_conn *hcon = conn->hcon;
1714 	struct hci_dev *hdev = hcon->hdev;
1715 
1716 	BT_DBG("%s conn %p", hdev->name, conn);
1717 
1718 	/* For outgoing pairing which doesn't necessarily have an
1719 	 * associated socket (e.g. mgmt_pair_device).
1720 	 */
1721 	if (hcon->out)
1722 		smp_conn_security(hcon, hcon->pending_sec_level);
1723 
1724 	/* For LE peripheral connections, make sure the connection interval
1725 	 * is in the range of the minimum and maximum interval that has
1726 	 * been configured for this connection. If not, then trigger
1727 	 * the connection update procedure.
1728 	 */
1729 	if (hcon->role == HCI_ROLE_SLAVE &&
1730 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1731 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1732 		struct l2cap_conn_param_update_req req;
1733 
1734 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1735 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1736 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1737 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1738 
1739 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1740 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1741 	}
1742 }
1743 
1744 static void l2cap_conn_ready(struct l2cap_conn *conn)
1745 {
1746 	struct l2cap_chan *chan;
1747 	struct hci_conn *hcon = conn->hcon;
1748 
1749 	BT_DBG("conn %p", conn);
1750 
1751 	if (hcon->type == ACL_LINK)
1752 		l2cap_request_info(conn);
1753 
1754 	mutex_lock(&conn->chan_lock);
1755 
1756 	list_for_each_entry(chan, &conn->chan_l, list) {
1757 
1758 		l2cap_chan_lock(chan);
1759 
1760 		if (chan->scid == L2CAP_CID_A2MP) {
1761 			l2cap_chan_unlock(chan);
1762 			continue;
1763 		}
1764 
1765 		if (hcon->type == LE_LINK) {
1766 			l2cap_le_start(chan);
1767 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1768 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1769 				l2cap_chan_ready(chan);
1770 		} else if (chan->state == BT_CONNECT) {
1771 			l2cap_do_start(chan);
1772 		}
1773 
1774 		l2cap_chan_unlock(chan);
1775 	}
1776 
1777 	mutex_unlock(&conn->chan_lock);
1778 
1779 	if (hcon->type == LE_LINK)
1780 		l2cap_le_conn_ready(conn);
1781 
1782 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1783 }
1784 
1785 /* Notify sockets that we cannot guaranty reliability anymore */
1786 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1787 {
1788 	struct l2cap_chan *chan;
1789 
1790 	BT_DBG("conn %p", conn);
1791 
1792 	mutex_lock(&conn->chan_lock);
1793 
1794 	list_for_each_entry(chan, &conn->chan_l, list) {
1795 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1796 			l2cap_chan_set_err(chan, err);
1797 	}
1798 
1799 	mutex_unlock(&conn->chan_lock);
1800 }
1801 
1802 static void l2cap_info_timeout(struct work_struct *work)
1803 {
1804 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1805 					       info_timer.work);
1806 
1807 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1808 	conn->info_ident = 0;
1809 
1810 	l2cap_conn_start(conn);
1811 }
1812 
1813 /*
1814  * l2cap_user
1815  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1816  * callback is called during registration. The ->remove callback is called
1817  * during unregistration.
1818  * An l2cap_user object can either be explicitly unregistered or when the
1819  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1820  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1821  * External modules must own a reference to the l2cap_conn object if they intend
1822  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1823  * any time if they don't.
1824  */
1825 
1826 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1827 {
1828 	struct hci_dev *hdev = conn->hcon->hdev;
1829 	int ret;
1830 
1831 	/* We need to check whether l2cap_conn is registered. If it is not, we
1832 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1833 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1834 	 * relies on the parent hci_conn object to be locked. This itself relies
1835 	 * on the hci_dev object to be locked. So we must lock the hci device
1836 	 * here, too. */
1837 
1838 	hci_dev_lock(hdev);
1839 
1840 	if (!list_empty(&user->list)) {
1841 		ret = -EINVAL;
1842 		goto out_unlock;
1843 	}
1844 
1845 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1846 	if (!conn->hchan) {
1847 		ret = -ENODEV;
1848 		goto out_unlock;
1849 	}
1850 
1851 	ret = user->probe(conn, user);
1852 	if (ret)
1853 		goto out_unlock;
1854 
1855 	list_add(&user->list, &conn->users);
1856 	ret = 0;
1857 
1858 out_unlock:
1859 	hci_dev_unlock(hdev);
1860 	return ret;
1861 }
1862 EXPORT_SYMBOL(l2cap_register_user);
1863 
1864 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1865 {
1866 	struct hci_dev *hdev = conn->hcon->hdev;
1867 
1868 	hci_dev_lock(hdev);
1869 
1870 	if (list_empty(&user->list))
1871 		goto out_unlock;
1872 
1873 	list_del_init(&user->list);
1874 	user->remove(conn, user);
1875 
1876 out_unlock:
1877 	hci_dev_unlock(hdev);
1878 }
1879 EXPORT_SYMBOL(l2cap_unregister_user);
1880 
1881 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1882 {
1883 	struct l2cap_user *user;
1884 
1885 	while (!list_empty(&conn->users)) {
1886 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1887 		list_del_init(&user->list);
1888 		user->remove(conn, user);
1889 	}
1890 }
1891 
1892 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1893 {
1894 	struct l2cap_conn *conn = hcon->l2cap_data;
1895 	struct l2cap_chan *chan, *l;
1896 
1897 	if (!conn)
1898 		return;
1899 
1900 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1901 
1902 	kfree_skb(conn->rx_skb);
1903 
1904 	skb_queue_purge(&conn->pending_rx);
1905 
1906 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1907 	 * might block if we are running on a worker from the same workqueue
1908 	 * pending_rx_work is waiting on.
1909 	 */
1910 	if (work_pending(&conn->pending_rx_work))
1911 		cancel_work_sync(&conn->pending_rx_work);
1912 
1913 	if (work_pending(&conn->id_addr_update_work))
1914 		cancel_work_sync(&conn->id_addr_update_work);
1915 
1916 	l2cap_unregister_all_users(conn);
1917 
1918 	/* Force the connection to be immediately dropped */
1919 	hcon->disc_timeout = 0;
1920 
1921 	mutex_lock(&conn->chan_lock);
1922 
1923 	/* Kill channels */
1924 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1925 		l2cap_chan_hold(chan);
1926 		l2cap_chan_lock(chan);
1927 
1928 		l2cap_chan_del(chan, err);
1929 
1930 		chan->ops->close(chan);
1931 
1932 		l2cap_chan_unlock(chan);
1933 		l2cap_chan_put(chan);
1934 	}
1935 
1936 	mutex_unlock(&conn->chan_lock);
1937 
1938 	hci_chan_del(conn->hchan);
1939 
1940 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1941 		cancel_delayed_work_sync(&conn->info_timer);
1942 
1943 	hcon->l2cap_data = NULL;
1944 	conn->hchan = NULL;
1945 	l2cap_conn_put(conn);
1946 }
1947 
1948 static void l2cap_conn_free(struct kref *ref)
1949 {
1950 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1951 
1952 	hci_conn_put(conn->hcon);
1953 	kfree(conn);
1954 }
1955 
1956 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1957 {
1958 	kref_get(&conn->ref);
1959 	return conn;
1960 }
1961 EXPORT_SYMBOL(l2cap_conn_get);
1962 
1963 void l2cap_conn_put(struct l2cap_conn *conn)
1964 {
1965 	kref_put(&conn->ref, l2cap_conn_free);
1966 }
1967 EXPORT_SYMBOL(l2cap_conn_put);
1968 
1969 /* ---- Socket interface ---- */
1970 
1971 /* Find socket with psm and source / destination bdaddr.
1972  * Returns closest match.
1973  */
1974 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1975 						   bdaddr_t *src,
1976 						   bdaddr_t *dst,
1977 						   u8 link_type)
1978 {
1979 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1980 
1981 	read_lock(&chan_list_lock);
1982 
1983 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1984 		if (state && c->state != state)
1985 			continue;
1986 
1987 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1988 			continue;
1989 
1990 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1991 			continue;
1992 
1993 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1994 			int src_match, dst_match;
1995 			int src_any, dst_any;
1996 
1997 			/* Exact match. */
1998 			src_match = !bacmp(&c->src, src);
1999 			dst_match = !bacmp(&c->dst, dst);
2000 			if (src_match && dst_match) {
2001 				if (!l2cap_chan_hold_unless_zero(c))
2002 					continue;
2003 
2004 				read_unlock(&chan_list_lock);
2005 				return c;
2006 			}
2007 
2008 			/* Closest match */
2009 			src_any = !bacmp(&c->src, BDADDR_ANY);
2010 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2011 			if ((src_match && dst_any) || (src_any && dst_match) ||
2012 			    (src_any && dst_any))
2013 				c1 = c;
2014 		}
2015 	}
2016 
2017 	if (c1)
2018 		c1 = l2cap_chan_hold_unless_zero(c1);
2019 
2020 	read_unlock(&chan_list_lock);
2021 
2022 	return c1;
2023 }
2024 
2025 static void l2cap_monitor_timeout(struct work_struct *work)
2026 {
2027 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2028 					       monitor_timer.work);
2029 
2030 	BT_DBG("chan %p", chan);
2031 
2032 	l2cap_chan_lock(chan);
2033 
2034 	if (!chan->conn) {
2035 		l2cap_chan_unlock(chan);
2036 		l2cap_chan_put(chan);
2037 		return;
2038 	}
2039 
2040 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2041 
2042 	l2cap_chan_unlock(chan);
2043 	l2cap_chan_put(chan);
2044 }
2045 
2046 static void l2cap_retrans_timeout(struct work_struct *work)
2047 {
2048 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2049 					       retrans_timer.work);
2050 
2051 	BT_DBG("chan %p", chan);
2052 
2053 	l2cap_chan_lock(chan);
2054 
2055 	if (!chan->conn) {
2056 		l2cap_chan_unlock(chan);
2057 		l2cap_chan_put(chan);
2058 		return;
2059 	}
2060 
2061 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2062 	l2cap_chan_unlock(chan);
2063 	l2cap_chan_put(chan);
2064 }
2065 
2066 static void l2cap_streaming_send(struct l2cap_chan *chan,
2067 				 struct sk_buff_head *skbs)
2068 {
2069 	struct sk_buff *skb;
2070 	struct l2cap_ctrl *control;
2071 
2072 	BT_DBG("chan %p, skbs %p", chan, skbs);
2073 
2074 	if (__chan_is_moving(chan))
2075 		return;
2076 
2077 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2078 
2079 	while (!skb_queue_empty(&chan->tx_q)) {
2080 
2081 		skb = skb_dequeue(&chan->tx_q);
2082 
2083 		bt_cb(skb)->l2cap.retries = 1;
2084 		control = &bt_cb(skb)->l2cap;
2085 
2086 		control->reqseq = 0;
2087 		control->txseq = chan->next_tx_seq;
2088 
2089 		__pack_control(chan, control, skb);
2090 
2091 		if (chan->fcs == L2CAP_FCS_CRC16) {
2092 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2093 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2094 		}
2095 
2096 		l2cap_do_send(chan, skb);
2097 
2098 		BT_DBG("Sent txseq %u", control->txseq);
2099 
2100 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2101 		chan->frames_sent++;
2102 	}
2103 }
2104 
2105 static int l2cap_ertm_send(struct l2cap_chan *chan)
2106 {
2107 	struct sk_buff *skb, *tx_skb;
2108 	struct l2cap_ctrl *control;
2109 	int sent = 0;
2110 
2111 	BT_DBG("chan %p", chan);
2112 
2113 	if (chan->state != BT_CONNECTED)
2114 		return -ENOTCONN;
2115 
2116 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2117 		return 0;
2118 
2119 	if (__chan_is_moving(chan))
2120 		return 0;
2121 
2122 	while (chan->tx_send_head &&
2123 	       chan->unacked_frames < chan->remote_tx_win &&
2124 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2125 
2126 		skb = chan->tx_send_head;
2127 
2128 		bt_cb(skb)->l2cap.retries = 1;
2129 		control = &bt_cb(skb)->l2cap;
2130 
2131 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2132 			control->final = 1;
2133 
2134 		control->reqseq = chan->buffer_seq;
2135 		chan->last_acked_seq = chan->buffer_seq;
2136 		control->txseq = chan->next_tx_seq;
2137 
2138 		__pack_control(chan, control, skb);
2139 
2140 		if (chan->fcs == L2CAP_FCS_CRC16) {
2141 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2142 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2143 		}
2144 
2145 		/* Clone after data has been modified. Data is assumed to be
2146 		   read-only (for locking purposes) on cloned sk_buffs.
2147 		 */
2148 		tx_skb = skb_clone(skb, GFP_KERNEL);
2149 
2150 		if (!tx_skb)
2151 			break;
2152 
2153 		__set_retrans_timer(chan);
2154 
2155 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2156 		chan->unacked_frames++;
2157 		chan->frames_sent++;
2158 		sent++;
2159 
2160 		if (skb_queue_is_last(&chan->tx_q, skb))
2161 			chan->tx_send_head = NULL;
2162 		else
2163 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2164 
2165 		l2cap_do_send(chan, tx_skb);
2166 		BT_DBG("Sent txseq %u", control->txseq);
2167 	}
2168 
2169 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2170 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2171 
2172 	return sent;
2173 }
2174 
2175 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2176 {
2177 	struct l2cap_ctrl control;
2178 	struct sk_buff *skb;
2179 	struct sk_buff *tx_skb;
2180 	u16 seq;
2181 
2182 	BT_DBG("chan %p", chan);
2183 
2184 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2185 		return;
2186 
2187 	if (__chan_is_moving(chan))
2188 		return;
2189 
2190 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2191 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2192 
2193 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2194 		if (!skb) {
2195 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2196 			       seq);
2197 			continue;
2198 		}
2199 
2200 		bt_cb(skb)->l2cap.retries++;
2201 		control = bt_cb(skb)->l2cap;
2202 
2203 		if (chan->max_tx != 0 &&
2204 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2205 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2206 			l2cap_send_disconn_req(chan, ECONNRESET);
2207 			l2cap_seq_list_clear(&chan->retrans_list);
2208 			break;
2209 		}
2210 
2211 		control.reqseq = chan->buffer_seq;
2212 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2213 			control.final = 1;
2214 		else
2215 			control.final = 0;
2216 
2217 		if (skb_cloned(skb)) {
2218 			/* Cloned sk_buffs are read-only, so we need a
2219 			 * writeable copy
2220 			 */
2221 			tx_skb = skb_copy(skb, GFP_KERNEL);
2222 		} else {
2223 			tx_skb = skb_clone(skb, GFP_KERNEL);
2224 		}
2225 
2226 		if (!tx_skb) {
2227 			l2cap_seq_list_clear(&chan->retrans_list);
2228 			break;
2229 		}
2230 
2231 		/* Update skb contents */
2232 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2233 			put_unaligned_le32(__pack_extended_control(&control),
2234 					   tx_skb->data + L2CAP_HDR_SIZE);
2235 		} else {
2236 			put_unaligned_le16(__pack_enhanced_control(&control),
2237 					   tx_skb->data + L2CAP_HDR_SIZE);
2238 		}
2239 
2240 		/* Update FCS */
2241 		if (chan->fcs == L2CAP_FCS_CRC16) {
2242 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2243 					tx_skb->len - L2CAP_FCS_SIZE);
2244 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2245 						L2CAP_FCS_SIZE);
2246 		}
2247 
2248 		l2cap_do_send(chan, tx_skb);
2249 
2250 		BT_DBG("Resent txseq %d", control.txseq);
2251 
2252 		chan->last_acked_seq = chan->buffer_seq;
2253 	}
2254 }
2255 
2256 static void l2cap_retransmit(struct l2cap_chan *chan,
2257 			     struct l2cap_ctrl *control)
2258 {
2259 	BT_DBG("chan %p, control %p", chan, control);
2260 
2261 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2262 	l2cap_ertm_resend(chan);
2263 }
2264 
2265 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2266 				 struct l2cap_ctrl *control)
2267 {
2268 	struct sk_buff *skb;
2269 
2270 	BT_DBG("chan %p, control %p", chan, control);
2271 
2272 	if (control->poll)
2273 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2274 
2275 	l2cap_seq_list_clear(&chan->retrans_list);
2276 
2277 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2278 		return;
2279 
2280 	if (chan->unacked_frames) {
2281 		skb_queue_walk(&chan->tx_q, skb) {
2282 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2283 			    skb == chan->tx_send_head)
2284 				break;
2285 		}
2286 
2287 		skb_queue_walk_from(&chan->tx_q, skb) {
2288 			if (skb == chan->tx_send_head)
2289 				break;
2290 
2291 			l2cap_seq_list_append(&chan->retrans_list,
2292 					      bt_cb(skb)->l2cap.txseq);
2293 		}
2294 
2295 		l2cap_ertm_resend(chan);
2296 	}
2297 }
2298 
2299 static void l2cap_send_ack(struct l2cap_chan *chan)
2300 {
2301 	struct l2cap_ctrl control;
2302 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2303 					 chan->last_acked_seq);
2304 	int threshold;
2305 
2306 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2307 	       chan, chan->last_acked_seq, chan->buffer_seq);
2308 
2309 	memset(&control, 0, sizeof(control));
2310 	control.sframe = 1;
2311 
2312 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2313 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2314 		__clear_ack_timer(chan);
2315 		control.super = L2CAP_SUPER_RNR;
2316 		control.reqseq = chan->buffer_seq;
2317 		l2cap_send_sframe(chan, &control);
2318 	} else {
2319 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2320 			l2cap_ertm_send(chan);
2321 			/* If any i-frames were sent, they included an ack */
2322 			if (chan->buffer_seq == chan->last_acked_seq)
2323 				frames_to_ack = 0;
2324 		}
2325 
2326 		/* Ack now if the window is 3/4ths full.
2327 		 * Calculate without mul or div
2328 		 */
2329 		threshold = chan->ack_win;
2330 		threshold += threshold << 1;
2331 		threshold >>= 2;
2332 
2333 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2334 		       threshold);
2335 
2336 		if (frames_to_ack >= threshold) {
2337 			__clear_ack_timer(chan);
2338 			control.super = L2CAP_SUPER_RR;
2339 			control.reqseq = chan->buffer_seq;
2340 			l2cap_send_sframe(chan, &control);
2341 			frames_to_ack = 0;
2342 		}
2343 
2344 		if (frames_to_ack)
2345 			__set_ack_timer(chan);
2346 	}
2347 }
2348 
2349 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2350 					 struct msghdr *msg, int len,
2351 					 int count, struct sk_buff *skb)
2352 {
2353 	struct l2cap_conn *conn = chan->conn;
2354 	struct sk_buff **frag;
2355 	int sent = 0;
2356 
2357 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2358 		return -EFAULT;
2359 
2360 	sent += count;
2361 	len  -= count;
2362 
2363 	/* Continuation fragments (no L2CAP header) */
2364 	frag = &skb_shinfo(skb)->frag_list;
2365 	while (len) {
2366 		struct sk_buff *tmp;
2367 
2368 		count = min_t(unsigned int, conn->mtu, len);
2369 
2370 		tmp = chan->ops->alloc_skb(chan, 0, count,
2371 					   msg->msg_flags & MSG_DONTWAIT);
2372 		if (IS_ERR(tmp))
2373 			return PTR_ERR(tmp);
2374 
2375 		*frag = tmp;
2376 
2377 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2378 				   &msg->msg_iter))
2379 			return -EFAULT;
2380 
2381 		sent += count;
2382 		len  -= count;
2383 
2384 		skb->len += (*frag)->len;
2385 		skb->data_len += (*frag)->len;
2386 
2387 		frag = &(*frag)->next;
2388 	}
2389 
2390 	return sent;
2391 }
2392 
2393 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2394 						 struct msghdr *msg, size_t len)
2395 {
2396 	struct l2cap_conn *conn = chan->conn;
2397 	struct sk_buff *skb;
2398 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2399 	struct l2cap_hdr *lh;
2400 
2401 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2402 	       __le16_to_cpu(chan->psm), len);
2403 
2404 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2405 
2406 	skb = chan->ops->alloc_skb(chan, hlen, count,
2407 				   msg->msg_flags & MSG_DONTWAIT);
2408 	if (IS_ERR(skb))
2409 		return skb;
2410 
2411 	/* Create L2CAP header */
2412 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2413 	lh->cid = cpu_to_le16(chan->dcid);
2414 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2415 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2416 
2417 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2418 	if (unlikely(err < 0)) {
2419 		kfree_skb(skb);
2420 		return ERR_PTR(err);
2421 	}
2422 	return skb;
2423 }
2424 
2425 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2426 					      struct msghdr *msg, size_t len)
2427 {
2428 	struct l2cap_conn *conn = chan->conn;
2429 	struct sk_buff *skb;
2430 	int err, count;
2431 	struct l2cap_hdr *lh;
2432 
2433 	BT_DBG("chan %p len %zu", chan, len);
2434 
2435 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2436 
2437 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2438 				   msg->msg_flags & MSG_DONTWAIT);
2439 	if (IS_ERR(skb))
2440 		return skb;
2441 
2442 	/* Create L2CAP header */
2443 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2444 	lh->cid = cpu_to_le16(chan->dcid);
2445 	lh->len = cpu_to_le16(len);
2446 
2447 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2448 	if (unlikely(err < 0)) {
2449 		kfree_skb(skb);
2450 		return ERR_PTR(err);
2451 	}
2452 	return skb;
2453 }
2454 
2455 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2456 					       struct msghdr *msg, size_t len,
2457 					       u16 sdulen)
2458 {
2459 	struct l2cap_conn *conn = chan->conn;
2460 	struct sk_buff *skb;
2461 	int err, count, hlen;
2462 	struct l2cap_hdr *lh;
2463 
2464 	BT_DBG("chan %p len %zu", chan, len);
2465 
2466 	if (!conn)
2467 		return ERR_PTR(-ENOTCONN);
2468 
2469 	hlen = __ertm_hdr_size(chan);
2470 
2471 	if (sdulen)
2472 		hlen += L2CAP_SDULEN_SIZE;
2473 
2474 	if (chan->fcs == L2CAP_FCS_CRC16)
2475 		hlen += L2CAP_FCS_SIZE;
2476 
2477 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2478 
2479 	skb = chan->ops->alloc_skb(chan, hlen, count,
2480 				   msg->msg_flags & MSG_DONTWAIT);
2481 	if (IS_ERR(skb))
2482 		return skb;
2483 
2484 	/* Create L2CAP header */
2485 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2486 	lh->cid = cpu_to_le16(chan->dcid);
2487 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2488 
2489 	/* Control header is populated later */
2490 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2491 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2492 	else
2493 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2494 
2495 	if (sdulen)
2496 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2497 
2498 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2499 	if (unlikely(err < 0)) {
2500 		kfree_skb(skb);
2501 		return ERR_PTR(err);
2502 	}
2503 
2504 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2505 	bt_cb(skb)->l2cap.retries = 0;
2506 	return skb;
2507 }
2508 
2509 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2510 			     struct sk_buff_head *seg_queue,
2511 			     struct msghdr *msg, size_t len)
2512 {
2513 	struct sk_buff *skb;
2514 	u16 sdu_len;
2515 	size_t pdu_len;
2516 	u8 sar;
2517 
2518 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2519 
2520 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2521 	 * so fragmented skbs are not used.  The HCI layer's handling
2522 	 * of fragmented skbs is not compatible with ERTM's queueing.
2523 	 */
2524 
2525 	/* PDU size is derived from the HCI MTU */
2526 	pdu_len = chan->conn->mtu;
2527 
2528 	/* Constrain PDU size for BR/EDR connections */
2529 	if (!chan->hs_hcon)
2530 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2531 
2532 	/* Adjust for largest possible L2CAP overhead. */
2533 	if (chan->fcs)
2534 		pdu_len -= L2CAP_FCS_SIZE;
2535 
2536 	pdu_len -= __ertm_hdr_size(chan);
2537 
2538 	/* Remote device may have requested smaller PDUs */
2539 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2540 
2541 	if (len <= pdu_len) {
2542 		sar = L2CAP_SAR_UNSEGMENTED;
2543 		sdu_len = 0;
2544 		pdu_len = len;
2545 	} else {
2546 		sar = L2CAP_SAR_START;
2547 		sdu_len = len;
2548 	}
2549 
2550 	while (len > 0) {
2551 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2552 
2553 		if (IS_ERR(skb)) {
2554 			__skb_queue_purge(seg_queue);
2555 			return PTR_ERR(skb);
2556 		}
2557 
2558 		bt_cb(skb)->l2cap.sar = sar;
2559 		__skb_queue_tail(seg_queue, skb);
2560 
2561 		len -= pdu_len;
2562 		if (sdu_len)
2563 			sdu_len = 0;
2564 
2565 		if (len <= pdu_len) {
2566 			sar = L2CAP_SAR_END;
2567 			pdu_len = len;
2568 		} else {
2569 			sar = L2CAP_SAR_CONTINUE;
2570 		}
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2577 						   struct msghdr *msg,
2578 						   size_t len, u16 sdulen)
2579 {
2580 	struct l2cap_conn *conn = chan->conn;
2581 	struct sk_buff *skb;
2582 	int err, count, hlen;
2583 	struct l2cap_hdr *lh;
2584 
2585 	BT_DBG("chan %p len %zu", chan, len);
2586 
2587 	if (!conn)
2588 		return ERR_PTR(-ENOTCONN);
2589 
2590 	hlen = L2CAP_HDR_SIZE;
2591 
2592 	if (sdulen)
2593 		hlen += L2CAP_SDULEN_SIZE;
2594 
2595 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2596 
2597 	skb = chan->ops->alloc_skb(chan, hlen, count,
2598 				   msg->msg_flags & MSG_DONTWAIT);
2599 	if (IS_ERR(skb))
2600 		return skb;
2601 
2602 	/* Create L2CAP header */
2603 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2604 	lh->cid = cpu_to_le16(chan->dcid);
2605 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2606 
2607 	if (sdulen)
2608 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2609 
2610 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2611 	if (unlikely(err < 0)) {
2612 		kfree_skb(skb);
2613 		return ERR_PTR(err);
2614 	}
2615 
2616 	return skb;
2617 }
2618 
2619 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2620 				struct sk_buff_head *seg_queue,
2621 				struct msghdr *msg, size_t len)
2622 {
2623 	struct sk_buff *skb;
2624 	size_t pdu_len;
2625 	u16 sdu_len;
2626 
2627 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2628 
2629 	sdu_len = len;
2630 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2631 
2632 	while (len > 0) {
2633 		if (len <= pdu_len)
2634 			pdu_len = len;
2635 
2636 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2637 		if (IS_ERR(skb)) {
2638 			__skb_queue_purge(seg_queue);
2639 			return PTR_ERR(skb);
2640 		}
2641 
2642 		__skb_queue_tail(seg_queue, skb);
2643 
2644 		len -= pdu_len;
2645 
2646 		if (sdu_len) {
2647 			sdu_len = 0;
2648 			pdu_len += L2CAP_SDULEN_SIZE;
2649 		}
2650 	}
2651 
2652 	return 0;
2653 }
2654 
2655 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2656 {
2657 	int sent = 0;
2658 
2659 	BT_DBG("chan %p", chan);
2660 
2661 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2662 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2663 		chan->tx_credits--;
2664 		sent++;
2665 	}
2666 
2667 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2668 	       skb_queue_len(&chan->tx_q));
2669 }
2670 
2671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2672 {
2673 	struct sk_buff *skb;
2674 	int err;
2675 	struct sk_buff_head seg_queue;
2676 
2677 	if (!chan->conn)
2678 		return -ENOTCONN;
2679 
2680 	/* Connectionless channel */
2681 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2682 		skb = l2cap_create_connless_pdu(chan, msg, len);
2683 		if (IS_ERR(skb))
2684 			return PTR_ERR(skb);
2685 
2686 		l2cap_do_send(chan, skb);
2687 		return len;
2688 	}
2689 
2690 	switch (chan->mode) {
2691 	case L2CAP_MODE_LE_FLOWCTL:
2692 	case L2CAP_MODE_EXT_FLOWCTL:
2693 		/* Check outgoing MTU */
2694 		if (len > chan->omtu)
2695 			return -EMSGSIZE;
2696 
2697 		__skb_queue_head_init(&seg_queue);
2698 
2699 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2700 
2701 		if (chan->state != BT_CONNECTED) {
2702 			__skb_queue_purge(&seg_queue);
2703 			err = -ENOTCONN;
2704 		}
2705 
2706 		if (err)
2707 			return err;
2708 
2709 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2710 
2711 		l2cap_le_flowctl_send(chan);
2712 
2713 		if (!chan->tx_credits)
2714 			chan->ops->suspend(chan);
2715 
2716 		err = len;
2717 
2718 		break;
2719 
2720 	case L2CAP_MODE_BASIC:
2721 		/* Check outgoing MTU */
2722 		if (len > chan->omtu)
2723 			return -EMSGSIZE;
2724 
2725 		/* Create a basic PDU */
2726 		skb = l2cap_create_basic_pdu(chan, msg, len);
2727 		if (IS_ERR(skb))
2728 			return PTR_ERR(skb);
2729 
2730 		l2cap_do_send(chan, skb);
2731 		err = len;
2732 		break;
2733 
2734 	case L2CAP_MODE_ERTM:
2735 	case L2CAP_MODE_STREAMING:
2736 		/* Check outgoing MTU */
2737 		if (len > chan->omtu) {
2738 			err = -EMSGSIZE;
2739 			break;
2740 		}
2741 
2742 		__skb_queue_head_init(&seg_queue);
2743 
2744 		/* Do segmentation before calling in to the state machine,
2745 		 * since it's possible to block while waiting for memory
2746 		 * allocation.
2747 		 */
2748 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2749 
2750 		if (err)
2751 			break;
2752 
2753 		if (chan->mode == L2CAP_MODE_ERTM)
2754 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2755 		else
2756 			l2cap_streaming_send(chan, &seg_queue);
2757 
2758 		err = len;
2759 
2760 		/* If the skbs were not queued for sending, they'll still be in
2761 		 * seg_queue and need to be purged.
2762 		 */
2763 		__skb_queue_purge(&seg_queue);
2764 		break;
2765 
2766 	default:
2767 		BT_DBG("bad state %1.1x", chan->mode);
2768 		err = -EBADFD;
2769 	}
2770 
2771 	return err;
2772 }
2773 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2774 
2775 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2776 {
2777 	struct l2cap_ctrl control;
2778 	u16 seq;
2779 
2780 	BT_DBG("chan %p, txseq %u", chan, txseq);
2781 
2782 	memset(&control, 0, sizeof(control));
2783 	control.sframe = 1;
2784 	control.super = L2CAP_SUPER_SREJ;
2785 
2786 	for (seq = chan->expected_tx_seq; seq != txseq;
2787 	     seq = __next_seq(chan, seq)) {
2788 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2789 			control.reqseq = seq;
2790 			l2cap_send_sframe(chan, &control);
2791 			l2cap_seq_list_append(&chan->srej_list, seq);
2792 		}
2793 	}
2794 
2795 	chan->expected_tx_seq = __next_seq(chan, txseq);
2796 }
2797 
2798 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2799 {
2800 	struct l2cap_ctrl control;
2801 
2802 	BT_DBG("chan %p", chan);
2803 
2804 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2805 		return;
2806 
2807 	memset(&control, 0, sizeof(control));
2808 	control.sframe = 1;
2809 	control.super = L2CAP_SUPER_SREJ;
2810 	control.reqseq = chan->srej_list.tail;
2811 	l2cap_send_sframe(chan, &control);
2812 }
2813 
2814 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2815 {
2816 	struct l2cap_ctrl control;
2817 	u16 initial_head;
2818 	u16 seq;
2819 
2820 	BT_DBG("chan %p, txseq %u", chan, txseq);
2821 
2822 	memset(&control, 0, sizeof(control));
2823 	control.sframe = 1;
2824 	control.super = L2CAP_SUPER_SREJ;
2825 
2826 	/* Capture initial list head to allow only one pass through the list. */
2827 	initial_head = chan->srej_list.head;
2828 
2829 	do {
2830 		seq = l2cap_seq_list_pop(&chan->srej_list);
2831 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2832 			break;
2833 
2834 		control.reqseq = seq;
2835 		l2cap_send_sframe(chan, &control);
2836 		l2cap_seq_list_append(&chan->srej_list, seq);
2837 	} while (chan->srej_list.head != initial_head);
2838 }
2839 
2840 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2841 {
2842 	struct sk_buff *acked_skb;
2843 	u16 ackseq;
2844 
2845 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2846 
2847 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2848 		return;
2849 
2850 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2851 	       chan->expected_ack_seq, chan->unacked_frames);
2852 
2853 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2854 	     ackseq = __next_seq(chan, ackseq)) {
2855 
2856 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2857 		if (acked_skb) {
2858 			skb_unlink(acked_skb, &chan->tx_q);
2859 			kfree_skb(acked_skb);
2860 			chan->unacked_frames--;
2861 		}
2862 	}
2863 
2864 	chan->expected_ack_seq = reqseq;
2865 
2866 	if (chan->unacked_frames == 0)
2867 		__clear_retrans_timer(chan);
2868 
2869 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2870 }
2871 
2872 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2873 {
2874 	BT_DBG("chan %p", chan);
2875 
2876 	chan->expected_tx_seq = chan->buffer_seq;
2877 	l2cap_seq_list_clear(&chan->srej_list);
2878 	skb_queue_purge(&chan->srej_q);
2879 	chan->rx_state = L2CAP_RX_STATE_RECV;
2880 }
2881 
2882 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2883 				struct l2cap_ctrl *control,
2884 				struct sk_buff_head *skbs, u8 event)
2885 {
2886 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2887 	       event);
2888 
2889 	switch (event) {
2890 	case L2CAP_EV_DATA_REQUEST:
2891 		if (chan->tx_send_head == NULL)
2892 			chan->tx_send_head = skb_peek(skbs);
2893 
2894 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2895 		l2cap_ertm_send(chan);
2896 		break;
2897 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2898 		BT_DBG("Enter LOCAL_BUSY");
2899 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2900 
2901 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2902 			/* The SREJ_SENT state must be aborted if we are to
2903 			 * enter the LOCAL_BUSY state.
2904 			 */
2905 			l2cap_abort_rx_srej_sent(chan);
2906 		}
2907 
2908 		l2cap_send_ack(chan);
2909 
2910 		break;
2911 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2912 		BT_DBG("Exit LOCAL_BUSY");
2913 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2914 
2915 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2916 			struct l2cap_ctrl local_control;
2917 
2918 			memset(&local_control, 0, sizeof(local_control));
2919 			local_control.sframe = 1;
2920 			local_control.super = L2CAP_SUPER_RR;
2921 			local_control.poll = 1;
2922 			local_control.reqseq = chan->buffer_seq;
2923 			l2cap_send_sframe(chan, &local_control);
2924 
2925 			chan->retry_count = 1;
2926 			__set_monitor_timer(chan);
2927 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2928 		}
2929 		break;
2930 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2931 		l2cap_process_reqseq(chan, control->reqseq);
2932 		break;
2933 	case L2CAP_EV_EXPLICIT_POLL:
2934 		l2cap_send_rr_or_rnr(chan, 1);
2935 		chan->retry_count = 1;
2936 		__set_monitor_timer(chan);
2937 		__clear_ack_timer(chan);
2938 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2939 		break;
2940 	case L2CAP_EV_RETRANS_TO:
2941 		l2cap_send_rr_or_rnr(chan, 1);
2942 		chan->retry_count = 1;
2943 		__set_monitor_timer(chan);
2944 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2945 		break;
2946 	case L2CAP_EV_RECV_FBIT:
2947 		/* Nothing to process */
2948 		break;
2949 	default:
2950 		break;
2951 	}
2952 }
2953 
2954 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2955 				  struct l2cap_ctrl *control,
2956 				  struct sk_buff_head *skbs, u8 event)
2957 {
2958 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2959 	       event);
2960 
2961 	switch (event) {
2962 	case L2CAP_EV_DATA_REQUEST:
2963 		if (chan->tx_send_head == NULL)
2964 			chan->tx_send_head = skb_peek(skbs);
2965 		/* Queue data, but don't send. */
2966 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2967 		break;
2968 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2969 		BT_DBG("Enter LOCAL_BUSY");
2970 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2971 
2972 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2973 			/* The SREJ_SENT state must be aborted if we are to
2974 			 * enter the LOCAL_BUSY state.
2975 			 */
2976 			l2cap_abort_rx_srej_sent(chan);
2977 		}
2978 
2979 		l2cap_send_ack(chan);
2980 
2981 		break;
2982 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2983 		BT_DBG("Exit LOCAL_BUSY");
2984 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2985 
2986 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2987 			struct l2cap_ctrl local_control;
2988 			memset(&local_control, 0, sizeof(local_control));
2989 			local_control.sframe = 1;
2990 			local_control.super = L2CAP_SUPER_RR;
2991 			local_control.poll = 1;
2992 			local_control.reqseq = chan->buffer_seq;
2993 			l2cap_send_sframe(chan, &local_control);
2994 
2995 			chan->retry_count = 1;
2996 			__set_monitor_timer(chan);
2997 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2998 		}
2999 		break;
3000 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3001 		l2cap_process_reqseq(chan, control->reqseq);
3002 		fallthrough;
3003 
3004 	case L2CAP_EV_RECV_FBIT:
3005 		if (control && control->final) {
3006 			__clear_monitor_timer(chan);
3007 			if (chan->unacked_frames > 0)
3008 				__set_retrans_timer(chan);
3009 			chan->retry_count = 0;
3010 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3011 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3012 		}
3013 		break;
3014 	case L2CAP_EV_EXPLICIT_POLL:
3015 		/* Ignore */
3016 		break;
3017 	case L2CAP_EV_MONITOR_TO:
3018 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3019 			l2cap_send_rr_or_rnr(chan, 1);
3020 			__set_monitor_timer(chan);
3021 			chan->retry_count++;
3022 		} else {
3023 			l2cap_send_disconn_req(chan, ECONNABORTED);
3024 		}
3025 		break;
3026 	default:
3027 		break;
3028 	}
3029 }
3030 
3031 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3032 		     struct sk_buff_head *skbs, u8 event)
3033 {
3034 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3035 	       chan, control, skbs, event, chan->tx_state);
3036 
3037 	switch (chan->tx_state) {
3038 	case L2CAP_TX_STATE_XMIT:
3039 		l2cap_tx_state_xmit(chan, control, skbs, event);
3040 		break;
3041 	case L2CAP_TX_STATE_WAIT_F:
3042 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3043 		break;
3044 	default:
3045 		/* Ignore event */
3046 		break;
3047 	}
3048 }
3049 
3050 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3051 			     struct l2cap_ctrl *control)
3052 {
3053 	BT_DBG("chan %p, control %p", chan, control);
3054 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3055 }
3056 
3057 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3058 				  struct l2cap_ctrl *control)
3059 {
3060 	BT_DBG("chan %p, control %p", chan, control);
3061 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3062 }
3063 
3064 /* Copy frame to all raw sockets on that connection */
3065 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3066 {
3067 	struct sk_buff *nskb;
3068 	struct l2cap_chan *chan;
3069 
3070 	BT_DBG("conn %p", conn);
3071 
3072 	mutex_lock(&conn->chan_lock);
3073 
3074 	list_for_each_entry(chan, &conn->chan_l, list) {
3075 		if (chan->chan_type != L2CAP_CHAN_RAW)
3076 			continue;
3077 
3078 		/* Don't send frame to the channel it came from */
3079 		if (bt_cb(skb)->l2cap.chan == chan)
3080 			continue;
3081 
3082 		nskb = skb_clone(skb, GFP_KERNEL);
3083 		if (!nskb)
3084 			continue;
3085 		if (chan->ops->recv(chan, nskb))
3086 			kfree_skb(nskb);
3087 	}
3088 
3089 	mutex_unlock(&conn->chan_lock);
3090 }
3091 
3092 /* ---- L2CAP signalling commands ---- */
3093 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3094 				       u8 ident, u16 dlen, void *data)
3095 {
3096 	struct sk_buff *skb, **frag;
3097 	struct l2cap_cmd_hdr *cmd;
3098 	struct l2cap_hdr *lh;
3099 	int len, count;
3100 
3101 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3102 	       conn, code, ident, dlen);
3103 
3104 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3105 		return NULL;
3106 
3107 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3108 	count = min_t(unsigned int, conn->mtu, len);
3109 
3110 	skb = bt_skb_alloc(count, GFP_KERNEL);
3111 	if (!skb)
3112 		return NULL;
3113 
3114 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3115 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3116 
3117 	if (conn->hcon->type == LE_LINK)
3118 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3119 	else
3120 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3121 
3122 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3123 	cmd->code  = code;
3124 	cmd->ident = ident;
3125 	cmd->len   = cpu_to_le16(dlen);
3126 
3127 	if (dlen) {
3128 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3129 		skb_put_data(skb, data, count);
3130 		data += count;
3131 	}
3132 
3133 	len -= skb->len;
3134 
3135 	/* Continuation fragments (no L2CAP header) */
3136 	frag = &skb_shinfo(skb)->frag_list;
3137 	while (len) {
3138 		count = min_t(unsigned int, conn->mtu, len);
3139 
3140 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3141 		if (!*frag)
3142 			goto fail;
3143 
3144 		skb_put_data(*frag, data, count);
3145 
3146 		len  -= count;
3147 		data += count;
3148 
3149 		frag = &(*frag)->next;
3150 	}
3151 
3152 	return skb;
3153 
3154 fail:
3155 	kfree_skb(skb);
3156 	return NULL;
3157 }
3158 
3159 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3160 				     unsigned long *val)
3161 {
3162 	struct l2cap_conf_opt *opt = *ptr;
3163 	int len;
3164 
3165 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3166 	*ptr += len;
3167 
3168 	*type = opt->type;
3169 	*olen = opt->len;
3170 
3171 	switch (opt->len) {
3172 	case 1:
3173 		*val = *((u8 *) opt->val);
3174 		break;
3175 
3176 	case 2:
3177 		*val = get_unaligned_le16(opt->val);
3178 		break;
3179 
3180 	case 4:
3181 		*val = get_unaligned_le32(opt->val);
3182 		break;
3183 
3184 	default:
3185 		*val = (unsigned long) opt->val;
3186 		break;
3187 	}
3188 
3189 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3190 	return len;
3191 }
3192 
3193 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3194 {
3195 	struct l2cap_conf_opt *opt = *ptr;
3196 
3197 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3198 
3199 	if (size < L2CAP_CONF_OPT_SIZE + len)
3200 		return;
3201 
3202 	opt->type = type;
3203 	opt->len  = len;
3204 
3205 	switch (len) {
3206 	case 1:
3207 		*((u8 *) opt->val)  = val;
3208 		break;
3209 
3210 	case 2:
3211 		put_unaligned_le16(val, opt->val);
3212 		break;
3213 
3214 	case 4:
3215 		put_unaligned_le32(val, opt->val);
3216 		break;
3217 
3218 	default:
3219 		memcpy(opt->val, (void *) val, len);
3220 		break;
3221 	}
3222 
3223 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3224 }
3225 
3226 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3227 {
3228 	struct l2cap_conf_efs efs;
3229 
3230 	switch (chan->mode) {
3231 	case L2CAP_MODE_ERTM:
3232 		efs.id		= chan->local_id;
3233 		efs.stype	= chan->local_stype;
3234 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3235 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3236 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3237 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3238 		break;
3239 
3240 	case L2CAP_MODE_STREAMING:
3241 		efs.id		= 1;
3242 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3243 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3244 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3245 		efs.acc_lat	= 0;
3246 		efs.flush_to	= 0;
3247 		break;
3248 
3249 	default:
3250 		return;
3251 	}
3252 
3253 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3254 			   (unsigned long) &efs, size);
3255 }
3256 
3257 static void l2cap_ack_timeout(struct work_struct *work)
3258 {
3259 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3260 					       ack_timer.work);
3261 	u16 frames_to_ack;
3262 
3263 	BT_DBG("chan %p", chan);
3264 
3265 	l2cap_chan_lock(chan);
3266 
3267 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3268 				     chan->last_acked_seq);
3269 
3270 	if (frames_to_ack)
3271 		l2cap_send_rr_or_rnr(chan, 0);
3272 
3273 	l2cap_chan_unlock(chan);
3274 	l2cap_chan_put(chan);
3275 }
3276 
3277 int l2cap_ertm_init(struct l2cap_chan *chan)
3278 {
3279 	int err;
3280 
3281 	chan->next_tx_seq = 0;
3282 	chan->expected_tx_seq = 0;
3283 	chan->expected_ack_seq = 0;
3284 	chan->unacked_frames = 0;
3285 	chan->buffer_seq = 0;
3286 	chan->frames_sent = 0;
3287 	chan->last_acked_seq = 0;
3288 	chan->sdu = NULL;
3289 	chan->sdu_last_frag = NULL;
3290 	chan->sdu_len = 0;
3291 
3292 	skb_queue_head_init(&chan->tx_q);
3293 
3294 	chan->local_amp_id = AMP_ID_BREDR;
3295 	chan->move_id = AMP_ID_BREDR;
3296 	chan->move_state = L2CAP_MOVE_STABLE;
3297 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3298 
3299 	if (chan->mode != L2CAP_MODE_ERTM)
3300 		return 0;
3301 
3302 	chan->rx_state = L2CAP_RX_STATE_RECV;
3303 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3304 
3305 	skb_queue_head_init(&chan->srej_q);
3306 
3307 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3308 	if (err < 0)
3309 		return err;
3310 
3311 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3312 	if (err < 0)
3313 		l2cap_seq_list_free(&chan->srej_list);
3314 
3315 	return err;
3316 }
3317 
3318 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3319 {
3320 	switch (mode) {
3321 	case L2CAP_MODE_STREAMING:
3322 	case L2CAP_MODE_ERTM:
3323 		if (l2cap_mode_supported(mode, remote_feat_mask))
3324 			return mode;
3325 		fallthrough;
3326 	default:
3327 		return L2CAP_MODE_BASIC;
3328 	}
3329 }
3330 
3331 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3332 {
3333 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3334 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3335 }
3336 
3337 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3338 {
3339 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3340 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3341 }
3342 
3343 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3344 				      struct l2cap_conf_rfc *rfc)
3345 {
3346 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3347 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3348 
3349 		/* Class 1 devices have must have ERTM timeouts
3350 		 * exceeding the Link Supervision Timeout.  The
3351 		 * default Link Supervision Timeout for AMP
3352 		 * controllers is 10 seconds.
3353 		 *
3354 		 * Class 1 devices use 0xffffffff for their
3355 		 * best-effort flush timeout, so the clamping logic
3356 		 * will result in a timeout that meets the above
3357 		 * requirement.  ERTM timeouts are 16-bit values, so
3358 		 * the maximum timeout is 65.535 seconds.
3359 		 */
3360 
3361 		/* Convert timeout to milliseconds and round */
3362 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3363 
3364 		/* This is the recommended formula for class 2 devices
3365 		 * that start ERTM timers when packets are sent to the
3366 		 * controller.
3367 		 */
3368 		ertm_to = 3 * ertm_to + 500;
3369 
3370 		if (ertm_to > 0xffff)
3371 			ertm_to = 0xffff;
3372 
3373 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3374 		rfc->monitor_timeout = rfc->retrans_timeout;
3375 	} else {
3376 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3377 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3378 	}
3379 }
3380 
3381 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3382 {
3383 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3384 	    __l2cap_ews_supported(chan->conn)) {
3385 		/* use extended control field */
3386 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3387 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3388 	} else {
3389 		chan->tx_win = min_t(u16, chan->tx_win,
3390 				     L2CAP_DEFAULT_TX_WINDOW);
3391 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3392 	}
3393 	chan->ack_win = chan->tx_win;
3394 }
3395 
3396 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3397 {
3398 	struct hci_conn *conn = chan->conn->hcon;
3399 
3400 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3401 
3402 	/* The 2-DH1 packet has between 2 and 56 information bytes
3403 	 * (including the 2-byte payload header)
3404 	 */
3405 	if (!(conn->pkt_type & HCI_2DH1))
3406 		chan->imtu = 54;
3407 
3408 	/* The 3-DH1 packet has between 2 and 85 information bytes
3409 	 * (including the 2-byte payload header)
3410 	 */
3411 	if (!(conn->pkt_type & HCI_3DH1))
3412 		chan->imtu = 83;
3413 
3414 	/* The 2-DH3 packet has between 2 and 369 information bytes
3415 	 * (including the 2-byte payload header)
3416 	 */
3417 	if (!(conn->pkt_type & HCI_2DH3))
3418 		chan->imtu = 367;
3419 
3420 	/* The 3-DH3 packet has between 2 and 554 information bytes
3421 	 * (including the 2-byte payload header)
3422 	 */
3423 	if (!(conn->pkt_type & HCI_3DH3))
3424 		chan->imtu = 552;
3425 
3426 	/* The 2-DH5 packet has between 2 and 681 information bytes
3427 	 * (including the 2-byte payload header)
3428 	 */
3429 	if (!(conn->pkt_type & HCI_2DH5))
3430 		chan->imtu = 679;
3431 
3432 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3433 	 * (including the 2-byte payload header)
3434 	 */
3435 	if (!(conn->pkt_type & HCI_3DH5))
3436 		chan->imtu = 1021;
3437 }
3438 
3439 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3440 {
3441 	struct l2cap_conf_req *req = data;
3442 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3443 	void *ptr = req->data;
3444 	void *endptr = data + data_size;
3445 	u16 size;
3446 
3447 	BT_DBG("chan %p", chan);
3448 
3449 	if (chan->num_conf_req || chan->num_conf_rsp)
3450 		goto done;
3451 
3452 	switch (chan->mode) {
3453 	case L2CAP_MODE_STREAMING:
3454 	case L2CAP_MODE_ERTM:
3455 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3456 			break;
3457 
3458 		if (__l2cap_efs_supported(chan->conn))
3459 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3460 
3461 		fallthrough;
3462 	default:
3463 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3464 		break;
3465 	}
3466 
3467 done:
3468 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3469 		if (!chan->imtu)
3470 			l2cap_mtu_auto(chan);
3471 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3472 				   endptr - ptr);
3473 	}
3474 
3475 	switch (chan->mode) {
3476 	case L2CAP_MODE_BASIC:
3477 		if (disable_ertm)
3478 			break;
3479 
3480 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3481 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3482 			break;
3483 
3484 		rfc.mode            = L2CAP_MODE_BASIC;
3485 		rfc.txwin_size      = 0;
3486 		rfc.max_transmit    = 0;
3487 		rfc.retrans_timeout = 0;
3488 		rfc.monitor_timeout = 0;
3489 		rfc.max_pdu_size    = 0;
3490 
3491 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3492 				   (unsigned long) &rfc, endptr - ptr);
3493 		break;
3494 
3495 	case L2CAP_MODE_ERTM:
3496 		rfc.mode            = L2CAP_MODE_ERTM;
3497 		rfc.max_transmit    = chan->max_tx;
3498 
3499 		__l2cap_set_ertm_timeouts(chan, &rfc);
3500 
3501 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3502 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3503 			     L2CAP_FCS_SIZE);
3504 		rfc.max_pdu_size = cpu_to_le16(size);
3505 
3506 		l2cap_txwin_setup(chan);
3507 
3508 		rfc.txwin_size = min_t(u16, chan->tx_win,
3509 				       L2CAP_DEFAULT_TX_WINDOW);
3510 
3511 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3512 				   (unsigned long) &rfc, endptr - ptr);
3513 
3514 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3515 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3516 
3517 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3518 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3519 					   chan->tx_win, endptr - ptr);
3520 
3521 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3522 			if (chan->fcs == L2CAP_FCS_NONE ||
3523 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3524 				chan->fcs = L2CAP_FCS_NONE;
3525 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3526 						   chan->fcs, endptr - ptr);
3527 			}
3528 		break;
3529 
3530 	case L2CAP_MODE_STREAMING:
3531 		l2cap_txwin_setup(chan);
3532 		rfc.mode            = L2CAP_MODE_STREAMING;
3533 		rfc.txwin_size      = 0;
3534 		rfc.max_transmit    = 0;
3535 		rfc.retrans_timeout = 0;
3536 		rfc.monitor_timeout = 0;
3537 
3538 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3539 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3540 			     L2CAP_FCS_SIZE);
3541 		rfc.max_pdu_size = cpu_to_le16(size);
3542 
3543 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3544 				   (unsigned long) &rfc, endptr - ptr);
3545 
3546 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3547 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3548 
3549 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3550 			if (chan->fcs == L2CAP_FCS_NONE ||
3551 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3552 				chan->fcs = L2CAP_FCS_NONE;
3553 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3554 						   chan->fcs, endptr - ptr);
3555 			}
3556 		break;
3557 	}
3558 
3559 	req->dcid  = cpu_to_le16(chan->dcid);
3560 	req->flags = cpu_to_le16(0);
3561 
3562 	return ptr - data;
3563 }
3564 
3565 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3566 {
3567 	struct l2cap_conf_rsp *rsp = data;
3568 	void *ptr = rsp->data;
3569 	void *endptr = data + data_size;
3570 	void *req = chan->conf_req;
3571 	int len = chan->conf_len;
3572 	int type, hint, olen;
3573 	unsigned long val;
3574 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3575 	struct l2cap_conf_efs efs;
3576 	u8 remote_efs = 0;
3577 	u16 mtu = L2CAP_DEFAULT_MTU;
3578 	u16 result = L2CAP_CONF_SUCCESS;
3579 	u16 size;
3580 
3581 	BT_DBG("chan %p", chan);
3582 
3583 	while (len >= L2CAP_CONF_OPT_SIZE) {
3584 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3585 		if (len < 0)
3586 			break;
3587 
3588 		hint  = type & L2CAP_CONF_HINT;
3589 		type &= L2CAP_CONF_MASK;
3590 
3591 		switch (type) {
3592 		case L2CAP_CONF_MTU:
3593 			if (olen != 2)
3594 				break;
3595 			mtu = val;
3596 			break;
3597 
3598 		case L2CAP_CONF_FLUSH_TO:
3599 			if (olen != 2)
3600 				break;
3601 			chan->flush_to = val;
3602 			break;
3603 
3604 		case L2CAP_CONF_QOS:
3605 			break;
3606 
3607 		case L2CAP_CONF_RFC:
3608 			if (olen != sizeof(rfc))
3609 				break;
3610 			memcpy(&rfc, (void *) val, olen);
3611 			break;
3612 
3613 		case L2CAP_CONF_FCS:
3614 			if (olen != 1)
3615 				break;
3616 			if (val == L2CAP_FCS_NONE)
3617 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3618 			break;
3619 
3620 		case L2CAP_CONF_EFS:
3621 			if (olen != sizeof(efs))
3622 				break;
3623 			remote_efs = 1;
3624 			memcpy(&efs, (void *) val, olen);
3625 			break;
3626 
3627 		case L2CAP_CONF_EWS:
3628 			if (olen != 2)
3629 				break;
3630 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3631 				return -ECONNREFUSED;
3632 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3633 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3634 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3635 			chan->remote_tx_win = val;
3636 			break;
3637 
3638 		default:
3639 			if (hint)
3640 				break;
3641 			result = L2CAP_CONF_UNKNOWN;
3642 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3643 			break;
3644 		}
3645 	}
3646 
3647 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3648 		goto done;
3649 
3650 	switch (chan->mode) {
3651 	case L2CAP_MODE_STREAMING:
3652 	case L2CAP_MODE_ERTM:
3653 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3654 			chan->mode = l2cap_select_mode(rfc.mode,
3655 						       chan->conn->feat_mask);
3656 			break;
3657 		}
3658 
3659 		if (remote_efs) {
3660 			if (__l2cap_efs_supported(chan->conn))
3661 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3662 			else
3663 				return -ECONNREFUSED;
3664 		}
3665 
3666 		if (chan->mode != rfc.mode)
3667 			return -ECONNREFUSED;
3668 
3669 		break;
3670 	}
3671 
3672 done:
3673 	if (chan->mode != rfc.mode) {
3674 		result = L2CAP_CONF_UNACCEPT;
3675 		rfc.mode = chan->mode;
3676 
3677 		if (chan->num_conf_rsp == 1)
3678 			return -ECONNREFUSED;
3679 
3680 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3681 				   (unsigned long) &rfc, endptr - ptr);
3682 	}
3683 
3684 	if (result == L2CAP_CONF_SUCCESS) {
3685 		/* Configure output options and let the other side know
3686 		 * which ones we don't like. */
3687 
3688 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3689 			result = L2CAP_CONF_UNACCEPT;
3690 		else {
3691 			chan->omtu = mtu;
3692 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3693 		}
3694 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3695 
3696 		if (remote_efs) {
3697 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3698 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3699 			    efs.stype != chan->local_stype) {
3700 
3701 				result = L2CAP_CONF_UNACCEPT;
3702 
3703 				if (chan->num_conf_req >= 1)
3704 					return -ECONNREFUSED;
3705 
3706 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3707 						   sizeof(efs),
3708 						   (unsigned long) &efs, endptr - ptr);
3709 			} else {
3710 				/* Send PENDING Conf Rsp */
3711 				result = L2CAP_CONF_PENDING;
3712 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3713 			}
3714 		}
3715 
3716 		switch (rfc.mode) {
3717 		case L2CAP_MODE_BASIC:
3718 			chan->fcs = L2CAP_FCS_NONE;
3719 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3720 			break;
3721 
3722 		case L2CAP_MODE_ERTM:
3723 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3724 				chan->remote_tx_win = rfc.txwin_size;
3725 			else
3726 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3727 
3728 			chan->remote_max_tx = rfc.max_transmit;
3729 
3730 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3731 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3732 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3733 			rfc.max_pdu_size = cpu_to_le16(size);
3734 			chan->remote_mps = size;
3735 
3736 			__l2cap_set_ertm_timeouts(chan, &rfc);
3737 
3738 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3739 
3740 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3741 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3742 
3743 			if (remote_efs &&
3744 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3745 				chan->remote_id = efs.id;
3746 				chan->remote_stype = efs.stype;
3747 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3748 				chan->remote_flush_to =
3749 					le32_to_cpu(efs.flush_to);
3750 				chan->remote_acc_lat =
3751 					le32_to_cpu(efs.acc_lat);
3752 				chan->remote_sdu_itime =
3753 					le32_to_cpu(efs.sdu_itime);
3754 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3755 						   sizeof(efs),
3756 						   (unsigned long) &efs, endptr - ptr);
3757 			}
3758 			break;
3759 
3760 		case L2CAP_MODE_STREAMING:
3761 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3762 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3763 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3764 			rfc.max_pdu_size = cpu_to_le16(size);
3765 			chan->remote_mps = size;
3766 
3767 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3768 
3769 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3770 					   (unsigned long) &rfc, endptr - ptr);
3771 
3772 			break;
3773 
3774 		default:
3775 			result = L2CAP_CONF_UNACCEPT;
3776 
3777 			memset(&rfc, 0, sizeof(rfc));
3778 			rfc.mode = chan->mode;
3779 		}
3780 
3781 		if (result == L2CAP_CONF_SUCCESS)
3782 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3783 	}
3784 	rsp->scid   = cpu_to_le16(chan->dcid);
3785 	rsp->result = cpu_to_le16(result);
3786 	rsp->flags  = cpu_to_le16(0);
3787 
3788 	return ptr - data;
3789 }
3790 
3791 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3792 				void *data, size_t size, u16 *result)
3793 {
3794 	struct l2cap_conf_req *req = data;
3795 	void *ptr = req->data;
3796 	void *endptr = data + size;
3797 	int type, olen;
3798 	unsigned long val;
3799 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3800 	struct l2cap_conf_efs efs;
3801 
3802 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3803 
3804 	while (len >= L2CAP_CONF_OPT_SIZE) {
3805 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3806 		if (len < 0)
3807 			break;
3808 
3809 		switch (type) {
3810 		case L2CAP_CONF_MTU:
3811 			if (olen != 2)
3812 				break;
3813 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3814 				*result = L2CAP_CONF_UNACCEPT;
3815 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3816 			} else
3817 				chan->imtu = val;
3818 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3819 					   endptr - ptr);
3820 			break;
3821 
3822 		case L2CAP_CONF_FLUSH_TO:
3823 			if (olen != 2)
3824 				break;
3825 			chan->flush_to = val;
3826 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3827 					   chan->flush_to, endptr - ptr);
3828 			break;
3829 
3830 		case L2CAP_CONF_RFC:
3831 			if (olen != sizeof(rfc))
3832 				break;
3833 			memcpy(&rfc, (void *)val, olen);
3834 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3835 			    rfc.mode != chan->mode)
3836 				return -ECONNREFUSED;
3837 			chan->fcs = 0;
3838 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3839 					   (unsigned long) &rfc, endptr - ptr);
3840 			break;
3841 
3842 		case L2CAP_CONF_EWS:
3843 			if (olen != 2)
3844 				break;
3845 			chan->ack_win = min_t(u16, val, chan->ack_win);
3846 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3847 					   chan->tx_win, endptr - ptr);
3848 			break;
3849 
3850 		case L2CAP_CONF_EFS:
3851 			if (olen != sizeof(efs))
3852 				break;
3853 			memcpy(&efs, (void *)val, olen);
3854 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3855 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3856 			    efs.stype != chan->local_stype)
3857 				return -ECONNREFUSED;
3858 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3859 					   (unsigned long) &efs, endptr - ptr);
3860 			break;
3861 
3862 		case L2CAP_CONF_FCS:
3863 			if (olen != 1)
3864 				break;
3865 			if (*result == L2CAP_CONF_PENDING)
3866 				if (val == L2CAP_FCS_NONE)
3867 					set_bit(CONF_RECV_NO_FCS,
3868 						&chan->conf_state);
3869 			break;
3870 		}
3871 	}
3872 
3873 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3874 		return -ECONNREFUSED;
3875 
3876 	chan->mode = rfc.mode;
3877 
3878 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3879 		switch (rfc.mode) {
3880 		case L2CAP_MODE_ERTM:
3881 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3882 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3883 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3884 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3885 				chan->ack_win = min_t(u16, chan->ack_win,
3886 						      rfc.txwin_size);
3887 
3888 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3889 				chan->local_msdu = le16_to_cpu(efs.msdu);
3890 				chan->local_sdu_itime =
3891 					le32_to_cpu(efs.sdu_itime);
3892 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3893 				chan->local_flush_to =
3894 					le32_to_cpu(efs.flush_to);
3895 			}
3896 			break;
3897 
3898 		case L2CAP_MODE_STREAMING:
3899 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3900 		}
3901 	}
3902 
3903 	req->dcid   = cpu_to_le16(chan->dcid);
3904 	req->flags  = cpu_to_le16(0);
3905 
3906 	return ptr - data;
3907 }
3908 
3909 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3910 				u16 result, u16 flags)
3911 {
3912 	struct l2cap_conf_rsp *rsp = data;
3913 	void *ptr = rsp->data;
3914 
3915 	BT_DBG("chan %p", chan);
3916 
3917 	rsp->scid   = cpu_to_le16(chan->dcid);
3918 	rsp->result = cpu_to_le16(result);
3919 	rsp->flags  = cpu_to_le16(flags);
3920 
3921 	return ptr - data;
3922 }
3923 
3924 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3925 {
3926 	struct l2cap_le_conn_rsp rsp;
3927 	struct l2cap_conn *conn = chan->conn;
3928 
3929 	BT_DBG("chan %p", chan);
3930 
3931 	rsp.dcid    = cpu_to_le16(chan->scid);
3932 	rsp.mtu     = cpu_to_le16(chan->imtu);
3933 	rsp.mps     = cpu_to_le16(chan->mps);
3934 	rsp.credits = cpu_to_le16(chan->rx_credits);
3935 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3936 
3937 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3938 		       &rsp);
3939 }
3940 
3941 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3942 {
3943 	struct {
3944 		struct l2cap_ecred_conn_rsp rsp;
3945 		__le16 dcid[5];
3946 	} __packed pdu;
3947 	struct l2cap_conn *conn = chan->conn;
3948 	u16 ident = chan->ident;
3949 	int i = 0;
3950 
3951 	if (!ident)
3952 		return;
3953 
3954 	BT_DBG("chan %p ident %d", chan, ident);
3955 
3956 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3957 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3958 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3959 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3960 
3961 	mutex_lock(&conn->chan_lock);
3962 
3963 	list_for_each_entry(chan, &conn->chan_l, list) {
3964 		if (chan->ident != ident)
3965 			continue;
3966 
3967 		/* Reset ident so only one response is sent */
3968 		chan->ident = 0;
3969 
3970 		/* Include all channels pending with the same ident */
3971 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3972 	}
3973 
3974 	mutex_unlock(&conn->chan_lock);
3975 
3976 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3977 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3978 }
3979 
3980 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3981 {
3982 	struct l2cap_conn_rsp rsp;
3983 	struct l2cap_conn *conn = chan->conn;
3984 	u8 buf[128];
3985 	u8 rsp_code;
3986 
3987 	rsp.scid   = cpu_to_le16(chan->dcid);
3988 	rsp.dcid   = cpu_to_le16(chan->scid);
3989 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3990 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3991 
3992 	if (chan->hs_hcon)
3993 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3994 	else
3995 		rsp_code = L2CAP_CONN_RSP;
3996 
3997 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3998 
3999 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4000 
4001 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4002 		return;
4003 
4004 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4005 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4006 	chan->num_conf_req++;
4007 }
4008 
4009 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4010 {
4011 	int type, olen;
4012 	unsigned long val;
4013 	/* Use sane default values in case a misbehaving remote device
4014 	 * did not send an RFC or extended window size option.
4015 	 */
4016 	u16 txwin_ext = chan->ack_win;
4017 	struct l2cap_conf_rfc rfc = {
4018 		.mode = chan->mode,
4019 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4020 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4021 		.max_pdu_size = cpu_to_le16(chan->imtu),
4022 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4023 	};
4024 
4025 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4026 
4027 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4028 		return;
4029 
4030 	while (len >= L2CAP_CONF_OPT_SIZE) {
4031 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4032 		if (len < 0)
4033 			break;
4034 
4035 		switch (type) {
4036 		case L2CAP_CONF_RFC:
4037 			if (olen != sizeof(rfc))
4038 				break;
4039 			memcpy(&rfc, (void *)val, olen);
4040 			break;
4041 		case L2CAP_CONF_EWS:
4042 			if (olen != 2)
4043 				break;
4044 			txwin_ext = val;
4045 			break;
4046 		}
4047 	}
4048 
4049 	switch (rfc.mode) {
4050 	case L2CAP_MODE_ERTM:
4051 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4052 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4053 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4054 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4055 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4056 		else
4057 			chan->ack_win = min_t(u16, chan->ack_win,
4058 					      rfc.txwin_size);
4059 		break;
4060 	case L2CAP_MODE_STREAMING:
4061 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4062 	}
4063 }
4064 
4065 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4066 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4067 				    u8 *data)
4068 {
4069 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4070 
4071 	if (cmd_len < sizeof(*rej))
4072 		return -EPROTO;
4073 
4074 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4075 		return 0;
4076 
4077 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4078 	    cmd->ident == conn->info_ident) {
4079 		cancel_delayed_work(&conn->info_timer);
4080 
4081 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4082 		conn->info_ident = 0;
4083 
4084 		l2cap_conn_start(conn);
4085 	}
4086 
4087 	return 0;
4088 }
4089 
4090 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4091 					struct l2cap_cmd_hdr *cmd,
4092 					u8 *data, u8 rsp_code, u8 amp_id)
4093 {
4094 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4095 	struct l2cap_conn_rsp rsp;
4096 	struct l2cap_chan *chan = NULL, *pchan;
4097 	int result, status = L2CAP_CS_NO_INFO;
4098 
4099 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4100 	__le16 psm = req->psm;
4101 
4102 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4103 
4104 	/* Check if we have socket listening on psm */
4105 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4106 					 &conn->hcon->dst, ACL_LINK);
4107 	if (!pchan) {
4108 		result = L2CAP_CR_BAD_PSM;
4109 		goto sendresp;
4110 	}
4111 
4112 	mutex_lock(&conn->chan_lock);
4113 	l2cap_chan_lock(pchan);
4114 
4115 	/* Check if the ACL is secure enough (if not SDP) */
4116 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4117 	    !hci_conn_check_link_mode(conn->hcon)) {
4118 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4119 		result = L2CAP_CR_SEC_BLOCK;
4120 		goto response;
4121 	}
4122 
4123 	result = L2CAP_CR_NO_MEM;
4124 
4125 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4126 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4127 		result = L2CAP_CR_INVALID_SCID;
4128 		goto response;
4129 	}
4130 
4131 	/* Check if we already have channel with that dcid */
4132 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4133 		result = L2CAP_CR_SCID_IN_USE;
4134 		goto response;
4135 	}
4136 
4137 	chan = pchan->ops->new_connection(pchan);
4138 	if (!chan)
4139 		goto response;
4140 
4141 	/* For certain devices (ex: HID mouse), support for authentication,
4142 	 * pairing and bonding is optional. For such devices, inorder to avoid
4143 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4144 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4145 	 */
4146 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4147 
4148 	bacpy(&chan->src, &conn->hcon->src);
4149 	bacpy(&chan->dst, &conn->hcon->dst);
4150 	chan->src_type = bdaddr_src_type(conn->hcon);
4151 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4152 	chan->psm  = psm;
4153 	chan->dcid = scid;
4154 	chan->local_amp_id = amp_id;
4155 
4156 	__l2cap_chan_add(conn, chan);
4157 
4158 	dcid = chan->scid;
4159 
4160 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4161 
4162 	chan->ident = cmd->ident;
4163 
4164 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4165 		if (l2cap_chan_check_security(chan, false)) {
4166 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4167 				l2cap_state_change(chan, BT_CONNECT2);
4168 				result = L2CAP_CR_PEND;
4169 				status = L2CAP_CS_AUTHOR_PEND;
4170 				chan->ops->defer(chan);
4171 			} else {
4172 				/* Force pending result for AMP controllers.
4173 				 * The connection will succeed after the
4174 				 * physical link is up.
4175 				 */
4176 				if (amp_id == AMP_ID_BREDR) {
4177 					l2cap_state_change(chan, BT_CONFIG);
4178 					result = L2CAP_CR_SUCCESS;
4179 				} else {
4180 					l2cap_state_change(chan, BT_CONNECT2);
4181 					result = L2CAP_CR_PEND;
4182 				}
4183 				status = L2CAP_CS_NO_INFO;
4184 			}
4185 		} else {
4186 			l2cap_state_change(chan, BT_CONNECT2);
4187 			result = L2CAP_CR_PEND;
4188 			status = L2CAP_CS_AUTHEN_PEND;
4189 		}
4190 	} else {
4191 		l2cap_state_change(chan, BT_CONNECT2);
4192 		result = L2CAP_CR_PEND;
4193 		status = L2CAP_CS_NO_INFO;
4194 	}
4195 
4196 response:
4197 	l2cap_chan_unlock(pchan);
4198 	mutex_unlock(&conn->chan_lock);
4199 	l2cap_chan_put(pchan);
4200 
4201 sendresp:
4202 	rsp.scid   = cpu_to_le16(scid);
4203 	rsp.dcid   = cpu_to_le16(dcid);
4204 	rsp.result = cpu_to_le16(result);
4205 	rsp.status = cpu_to_le16(status);
4206 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4207 
4208 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4209 		struct l2cap_info_req info;
4210 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4211 
4212 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4213 		conn->info_ident = l2cap_get_ident(conn);
4214 
4215 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4216 
4217 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4218 			       sizeof(info), &info);
4219 	}
4220 
4221 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4222 	    result == L2CAP_CR_SUCCESS) {
4223 		u8 buf[128];
4224 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4225 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4226 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4227 		chan->num_conf_req++;
4228 	}
4229 
4230 	return chan;
4231 }
4232 
4233 static int l2cap_connect_req(struct l2cap_conn *conn,
4234 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4235 {
4236 	struct hci_dev *hdev = conn->hcon->hdev;
4237 	struct hci_conn *hcon = conn->hcon;
4238 
4239 	if (cmd_len < sizeof(struct l2cap_conn_req))
4240 		return -EPROTO;
4241 
4242 	hci_dev_lock(hdev);
4243 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4244 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4245 		mgmt_device_connected(hdev, hcon, NULL, 0);
4246 	hci_dev_unlock(hdev);
4247 
4248 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4249 	return 0;
4250 }
4251 
4252 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4253 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4254 				    u8 *data)
4255 {
4256 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4257 	u16 scid, dcid, result, status;
4258 	struct l2cap_chan *chan;
4259 	u8 req[128];
4260 	int err;
4261 
4262 	if (cmd_len < sizeof(*rsp))
4263 		return -EPROTO;
4264 
4265 	scid   = __le16_to_cpu(rsp->scid);
4266 	dcid   = __le16_to_cpu(rsp->dcid);
4267 	result = __le16_to_cpu(rsp->result);
4268 	status = __le16_to_cpu(rsp->status);
4269 
4270 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4271 	       dcid, scid, result, status);
4272 
4273 	mutex_lock(&conn->chan_lock);
4274 
4275 	if (scid) {
4276 		chan = __l2cap_get_chan_by_scid(conn, scid);
4277 		if (!chan) {
4278 			err = -EBADSLT;
4279 			goto unlock;
4280 		}
4281 	} else {
4282 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4283 		if (!chan) {
4284 			err = -EBADSLT;
4285 			goto unlock;
4286 		}
4287 	}
4288 
4289 	chan = l2cap_chan_hold_unless_zero(chan);
4290 	if (!chan) {
4291 		err = -EBADSLT;
4292 		goto unlock;
4293 	}
4294 
4295 	err = 0;
4296 
4297 	l2cap_chan_lock(chan);
4298 
4299 	switch (result) {
4300 	case L2CAP_CR_SUCCESS:
4301 		l2cap_state_change(chan, BT_CONFIG);
4302 		chan->ident = 0;
4303 		chan->dcid = dcid;
4304 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4305 
4306 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4307 			break;
4308 
4309 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4310 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4311 		chan->num_conf_req++;
4312 		break;
4313 
4314 	case L2CAP_CR_PEND:
4315 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4316 		break;
4317 
4318 	default:
4319 		l2cap_chan_del(chan, ECONNREFUSED);
4320 		break;
4321 	}
4322 
4323 	l2cap_chan_unlock(chan);
4324 	l2cap_chan_put(chan);
4325 
4326 unlock:
4327 	mutex_unlock(&conn->chan_lock);
4328 
4329 	return err;
4330 }
4331 
4332 static inline void set_default_fcs(struct l2cap_chan *chan)
4333 {
4334 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4335 	 * sides request it.
4336 	 */
4337 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4338 		chan->fcs = L2CAP_FCS_NONE;
4339 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4340 		chan->fcs = L2CAP_FCS_CRC16;
4341 }
4342 
4343 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4344 				    u8 ident, u16 flags)
4345 {
4346 	struct l2cap_conn *conn = chan->conn;
4347 
4348 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4349 	       flags);
4350 
4351 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4352 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4353 
4354 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4355 		       l2cap_build_conf_rsp(chan, data,
4356 					    L2CAP_CONF_SUCCESS, flags), data);
4357 }
4358 
4359 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4360 				   u16 scid, u16 dcid)
4361 {
4362 	struct l2cap_cmd_rej_cid rej;
4363 
4364 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4365 	rej.scid = __cpu_to_le16(scid);
4366 	rej.dcid = __cpu_to_le16(dcid);
4367 
4368 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4369 }
4370 
4371 static inline int l2cap_config_req(struct l2cap_conn *conn,
4372 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4373 				   u8 *data)
4374 {
4375 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4376 	u16 dcid, flags;
4377 	u8 rsp[64];
4378 	struct l2cap_chan *chan;
4379 	int len, err = 0;
4380 
4381 	if (cmd_len < sizeof(*req))
4382 		return -EPROTO;
4383 
4384 	dcid  = __le16_to_cpu(req->dcid);
4385 	flags = __le16_to_cpu(req->flags);
4386 
4387 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4388 
4389 	chan = l2cap_get_chan_by_scid(conn, dcid);
4390 	if (!chan) {
4391 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4392 		return 0;
4393 	}
4394 
4395 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4396 	    chan->state != BT_CONNECTED) {
4397 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4398 				       chan->dcid);
4399 		goto unlock;
4400 	}
4401 
4402 	/* Reject if config buffer is too small. */
4403 	len = cmd_len - sizeof(*req);
4404 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4405 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4406 			       l2cap_build_conf_rsp(chan, rsp,
4407 			       L2CAP_CONF_REJECT, flags), rsp);
4408 		goto unlock;
4409 	}
4410 
4411 	/* Store config. */
4412 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4413 	chan->conf_len += len;
4414 
4415 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4416 		/* Incomplete config. Send empty response. */
4417 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4418 			       l2cap_build_conf_rsp(chan, rsp,
4419 			       L2CAP_CONF_SUCCESS, flags), rsp);
4420 		goto unlock;
4421 	}
4422 
4423 	/* Complete config. */
4424 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4425 	if (len < 0) {
4426 		l2cap_send_disconn_req(chan, ECONNRESET);
4427 		goto unlock;
4428 	}
4429 
4430 	chan->ident = cmd->ident;
4431 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4432 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4433 		chan->num_conf_rsp++;
4434 
4435 	/* Reset config buffer. */
4436 	chan->conf_len = 0;
4437 
4438 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4439 		goto unlock;
4440 
4441 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4442 		set_default_fcs(chan);
4443 
4444 		if (chan->mode == L2CAP_MODE_ERTM ||
4445 		    chan->mode == L2CAP_MODE_STREAMING)
4446 			err = l2cap_ertm_init(chan);
4447 
4448 		if (err < 0)
4449 			l2cap_send_disconn_req(chan, -err);
4450 		else
4451 			l2cap_chan_ready(chan);
4452 
4453 		goto unlock;
4454 	}
4455 
4456 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4457 		u8 buf[64];
4458 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4459 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4460 		chan->num_conf_req++;
4461 	}
4462 
4463 	/* Got Conf Rsp PENDING from remote side and assume we sent
4464 	   Conf Rsp PENDING in the code above */
4465 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4466 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4467 
4468 		/* check compatibility */
4469 
4470 		/* Send rsp for BR/EDR channel */
4471 		if (!chan->hs_hcon)
4472 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4473 		else
4474 			chan->ident = cmd->ident;
4475 	}
4476 
4477 unlock:
4478 	l2cap_chan_unlock(chan);
4479 	l2cap_chan_put(chan);
4480 	return err;
4481 }
4482 
4483 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4484 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4485 				   u8 *data)
4486 {
4487 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4488 	u16 scid, flags, result;
4489 	struct l2cap_chan *chan;
4490 	int len = cmd_len - sizeof(*rsp);
4491 	int err = 0;
4492 
4493 	if (cmd_len < sizeof(*rsp))
4494 		return -EPROTO;
4495 
4496 	scid   = __le16_to_cpu(rsp->scid);
4497 	flags  = __le16_to_cpu(rsp->flags);
4498 	result = __le16_to_cpu(rsp->result);
4499 
4500 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4501 	       result, len);
4502 
4503 	chan = l2cap_get_chan_by_scid(conn, scid);
4504 	if (!chan)
4505 		return 0;
4506 
4507 	switch (result) {
4508 	case L2CAP_CONF_SUCCESS:
4509 		l2cap_conf_rfc_get(chan, rsp->data, len);
4510 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4511 		break;
4512 
4513 	case L2CAP_CONF_PENDING:
4514 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4515 
4516 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4517 			char buf[64];
4518 
4519 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4520 						   buf, sizeof(buf), &result);
4521 			if (len < 0) {
4522 				l2cap_send_disconn_req(chan, ECONNRESET);
4523 				goto done;
4524 			}
4525 
4526 			if (!chan->hs_hcon) {
4527 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4528 							0);
4529 			} else {
4530 				if (l2cap_check_efs(chan)) {
4531 					amp_create_logical_link(chan);
4532 					chan->ident = cmd->ident;
4533 				}
4534 			}
4535 		}
4536 		goto done;
4537 
4538 	case L2CAP_CONF_UNKNOWN:
4539 	case L2CAP_CONF_UNACCEPT:
4540 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4541 			char req[64];
4542 
4543 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4544 				l2cap_send_disconn_req(chan, ECONNRESET);
4545 				goto done;
4546 			}
4547 
4548 			/* throw out any old stored conf requests */
4549 			result = L2CAP_CONF_SUCCESS;
4550 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4551 						   req, sizeof(req), &result);
4552 			if (len < 0) {
4553 				l2cap_send_disconn_req(chan, ECONNRESET);
4554 				goto done;
4555 			}
4556 
4557 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4558 				       L2CAP_CONF_REQ, len, req);
4559 			chan->num_conf_req++;
4560 			if (result != L2CAP_CONF_SUCCESS)
4561 				goto done;
4562 			break;
4563 		}
4564 		fallthrough;
4565 
4566 	default:
4567 		l2cap_chan_set_err(chan, ECONNRESET);
4568 
4569 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4570 		l2cap_send_disconn_req(chan, ECONNRESET);
4571 		goto done;
4572 	}
4573 
4574 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4575 		goto done;
4576 
4577 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4578 
4579 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4580 		set_default_fcs(chan);
4581 
4582 		if (chan->mode == L2CAP_MODE_ERTM ||
4583 		    chan->mode == L2CAP_MODE_STREAMING)
4584 			err = l2cap_ertm_init(chan);
4585 
4586 		if (err < 0)
4587 			l2cap_send_disconn_req(chan, -err);
4588 		else
4589 			l2cap_chan_ready(chan);
4590 	}
4591 
4592 done:
4593 	l2cap_chan_unlock(chan);
4594 	l2cap_chan_put(chan);
4595 	return err;
4596 }
4597 
4598 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4599 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4600 				       u8 *data)
4601 {
4602 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4603 	struct l2cap_disconn_rsp rsp;
4604 	u16 dcid, scid;
4605 	struct l2cap_chan *chan;
4606 
4607 	if (cmd_len != sizeof(*req))
4608 		return -EPROTO;
4609 
4610 	scid = __le16_to_cpu(req->scid);
4611 	dcid = __le16_to_cpu(req->dcid);
4612 
4613 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4614 
4615 	mutex_lock(&conn->chan_lock);
4616 
4617 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4618 	if (!chan) {
4619 		mutex_unlock(&conn->chan_lock);
4620 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4621 		return 0;
4622 	}
4623 
4624 	l2cap_chan_hold(chan);
4625 	l2cap_chan_lock(chan);
4626 
4627 	rsp.dcid = cpu_to_le16(chan->scid);
4628 	rsp.scid = cpu_to_le16(chan->dcid);
4629 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4630 
4631 	chan->ops->set_shutdown(chan);
4632 
4633 	l2cap_chan_del(chan, ECONNRESET);
4634 
4635 	chan->ops->close(chan);
4636 
4637 	l2cap_chan_unlock(chan);
4638 	l2cap_chan_put(chan);
4639 
4640 	mutex_unlock(&conn->chan_lock);
4641 
4642 	return 0;
4643 }
4644 
4645 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4646 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4647 				       u8 *data)
4648 {
4649 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4650 	u16 dcid, scid;
4651 	struct l2cap_chan *chan;
4652 
4653 	if (cmd_len != sizeof(*rsp))
4654 		return -EPROTO;
4655 
4656 	scid = __le16_to_cpu(rsp->scid);
4657 	dcid = __le16_to_cpu(rsp->dcid);
4658 
4659 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4660 
4661 	mutex_lock(&conn->chan_lock);
4662 
4663 	chan = __l2cap_get_chan_by_scid(conn, scid);
4664 	if (!chan) {
4665 		mutex_unlock(&conn->chan_lock);
4666 		return 0;
4667 	}
4668 
4669 	l2cap_chan_hold(chan);
4670 	l2cap_chan_lock(chan);
4671 
4672 	if (chan->state != BT_DISCONN) {
4673 		l2cap_chan_unlock(chan);
4674 		l2cap_chan_put(chan);
4675 		mutex_unlock(&conn->chan_lock);
4676 		return 0;
4677 	}
4678 
4679 	l2cap_chan_del(chan, 0);
4680 
4681 	chan->ops->close(chan);
4682 
4683 	l2cap_chan_unlock(chan);
4684 	l2cap_chan_put(chan);
4685 
4686 	mutex_unlock(&conn->chan_lock);
4687 
4688 	return 0;
4689 }
4690 
4691 static inline int l2cap_information_req(struct l2cap_conn *conn,
4692 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4693 					u8 *data)
4694 {
4695 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4696 	u16 type;
4697 
4698 	if (cmd_len != sizeof(*req))
4699 		return -EPROTO;
4700 
4701 	type = __le16_to_cpu(req->type);
4702 
4703 	BT_DBG("type 0x%4.4x", type);
4704 
4705 	if (type == L2CAP_IT_FEAT_MASK) {
4706 		u8 buf[8];
4707 		u32 feat_mask = l2cap_feat_mask;
4708 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4709 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4710 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4711 		if (!disable_ertm)
4712 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4713 				| L2CAP_FEAT_FCS;
4714 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4715 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4716 				| L2CAP_FEAT_EXT_WINDOW;
4717 
4718 		put_unaligned_le32(feat_mask, rsp->data);
4719 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4720 			       buf);
4721 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4722 		u8 buf[12];
4723 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4724 
4725 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4726 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4727 		rsp->data[0] = conn->local_fixed_chan;
4728 		memset(rsp->data + 1, 0, 7);
4729 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4730 			       buf);
4731 	} else {
4732 		struct l2cap_info_rsp rsp;
4733 		rsp.type   = cpu_to_le16(type);
4734 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4735 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4736 			       &rsp);
4737 	}
4738 
4739 	return 0;
4740 }
4741 
4742 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4743 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4744 					u8 *data)
4745 {
4746 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4747 	u16 type, result;
4748 
4749 	if (cmd_len < sizeof(*rsp))
4750 		return -EPROTO;
4751 
4752 	type   = __le16_to_cpu(rsp->type);
4753 	result = __le16_to_cpu(rsp->result);
4754 
4755 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4756 
4757 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4758 	if (cmd->ident != conn->info_ident ||
4759 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4760 		return 0;
4761 
4762 	cancel_delayed_work(&conn->info_timer);
4763 
4764 	if (result != L2CAP_IR_SUCCESS) {
4765 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4766 		conn->info_ident = 0;
4767 
4768 		l2cap_conn_start(conn);
4769 
4770 		return 0;
4771 	}
4772 
4773 	switch (type) {
4774 	case L2CAP_IT_FEAT_MASK:
4775 		conn->feat_mask = get_unaligned_le32(rsp->data);
4776 
4777 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4778 			struct l2cap_info_req req;
4779 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4780 
4781 			conn->info_ident = l2cap_get_ident(conn);
4782 
4783 			l2cap_send_cmd(conn, conn->info_ident,
4784 				       L2CAP_INFO_REQ, sizeof(req), &req);
4785 		} else {
4786 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4787 			conn->info_ident = 0;
4788 
4789 			l2cap_conn_start(conn);
4790 		}
4791 		break;
4792 
4793 	case L2CAP_IT_FIXED_CHAN:
4794 		conn->remote_fixed_chan = rsp->data[0];
4795 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4796 		conn->info_ident = 0;
4797 
4798 		l2cap_conn_start(conn);
4799 		break;
4800 	}
4801 
4802 	return 0;
4803 }
4804 
4805 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4806 				    struct l2cap_cmd_hdr *cmd,
4807 				    u16 cmd_len, void *data)
4808 {
4809 	struct l2cap_create_chan_req *req = data;
4810 	struct l2cap_create_chan_rsp rsp;
4811 	struct l2cap_chan *chan;
4812 	struct hci_dev *hdev;
4813 	u16 psm, scid;
4814 
4815 	if (cmd_len != sizeof(*req))
4816 		return -EPROTO;
4817 
4818 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4819 		return -EINVAL;
4820 
4821 	psm = le16_to_cpu(req->psm);
4822 	scid = le16_to_cpu(req->scid);
4823 
4824 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4825 
4826 	/* For controller id 0 make BR/EDR connection */
4827 	if (req->amp_id == AMP_ID_BREDR) {
4828 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4829 			      req->amp_id);
4830 		return 0;
4831 	}
4832 
4833 	/* Validate AMP controller id */
4834 	hdev = hci_dev_get(req->amp_id);
4835 	if (!hdev)
4836 		goto error;
4837 
4838 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4839 		hci_dev_put(hdev);
4840 		goto error;
4841 	}
4842 
4843 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4844 			     req->amp_id);
4845 	if (chan) {
4846 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4847 		struct hci_conn *hs_hcon;
4848 
4849 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4850 						  &conn->hcon->dst);
4851 		if (!hs_hcon) {
4852 			hci_dev_put(hdev);
4853 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4854 					       chan->dcid);
4855 			return 0;
4856 		}
4857 
4858 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4859 
4860 		mgr->bredr_chan = chan;
4861 		chan->hs_hcon = hs_hcon;
4862 		chan->fcs = L2CAP_FCS_NONE;
4863 		conn->mtu = hdev->block_mtu;
4864 	}
4865 
4866 	hci_dev_put(hdev);
4867 
4868 	return 0;
4869 
4870 error:
4871 	rsp.dcid = 0;
4872 	rsp.scid = cpu_to_le16(scid);
4873 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4874 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4875 
4876 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4877 		       sizeof(rsp), &rsp);
4878 
4879 	return 0;
4880 }
4881 
4882 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4883 {
4884 	struct l2cap_move_chan_req req;
4885 	u8 ident;
4886 
4887 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4888 
4889 	ident = l2cap_get_ident(chan->conn);
4890 	chan->ident = ident;
4891 
4892 	req.icid = cpu_to_le16(chan->scid);
4893 	req.dest_amp_id = dest_amp_id;
4894 
4895 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4896 		       &req);
4897 
4898 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4899 }
4900 
4901 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4902 {
4903 	struct l2cap_move_chan_rsp rsp;
4904 
4905 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4906 
4907 	rsp.icid = cpu_to_le16(chan->dcid);
4908 	rsp.result = cpu_to_le16(result);
4909 
4910 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4911 		       sizeof(rsp), &rsp);
4912 }
4913 
4914 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4915 {
4916 	struct l2cap_move_chan_cfm cfm;
4917 
4918 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4919 
4920 	chan->ident = l2cap_get_ident(chan->conn);
4921 
4922 	cfm.icid = cpu_to_le16(chan->scid);
4923 	cfm.result = cpu_to_le16(result);
4924 
4925 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4926 		       sizeof(cfm), &cfm);
4927 
4928 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4929 }
4930 
4931 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4932 {
4933 	struct l2cap_move_chan_cfm cfm;
4934 
4935 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4936 
4937 	cfm.icid = cpu_to_le16(icid);
4938 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4939 
4940 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4941 		       sizeof(cfm), &cfm);
4942 }
4943 
4944 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4945 					 u16 icid)
4946 {
4947 	struct l2cap_move_chan_cfm_rsp rsp;
4948 
4949 	BT_DBG("icid 0x%4.4x", icid);
4950 
4951 	rsp.icid = cpu_to_le16(icid);
4952 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4953 }
4954 
4955 static void __release_logical_link(struct l2cap_chan *chan)
4956 {
4957 	chan->hs_hchan = NULL;
4958 	chan->hs_hcon = NULL;
4959 
4960 	/* Placeholder - release the logical link */
4961 }
4962 
4963 static void l2cap_logical_fail(struct l2cap_chan *chan)
4964 {
4965 	/* Logical link setup failed */
4966 	if (chan->state != BT_CONNECTED) {
4967 		/* Create channel failure, disconnect */
4968 		l2cap_send_disconn_req(chan, ECONNRESET);
4969 		return;
4970 	}
4971 
4972 	switch (chan->move_role) {
4973 	case L2CAP_MOVE_ROLE_RESPONDER:
4974 		l2cap_move_done(chan);
4975 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4976 		break;
4977 	case L2CAP_MOVE_ROLE_INITIATOR:
4978 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4979 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4980 			/* Remote has only sent pending or
4981 			 * success responses, clean up
4982 			 */
4983 			l2cap_move_done(chan);
4984 		}
4985 
4986 		/* Other amp move states imply that the move
4987 		 * has already aborted
4988 		 */
4989 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4990 		break;
4991 	}
4992 }
4993 
4994 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4995 					struct hci_chan *hchan)
4996 {
4997 	struct l2cap_conf_rsp rsp;
4998 
4999 	chan->hs_hchan = hchan;
5000 	chan->hs_hcon->l2cap_data = chan->conn;
5001 
5002 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5003 
5004 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5005 		int err;
5006 
5007 		set_default_fcs(chan);
5008 
5009 		err = l2cap_ertm_init(chan);
5010 		if (err < 0)
5011 			l2cap_send_disconn_req(chan, -err);
5012 		else
5013 			l2cap_chan_ready(chan);
5014 	}
5015 }
5016 
5017 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5018 				      struct hci_chan *hchan)
5019 {
5020 	chan->hs_hcon = hchan->conn;
5021 	chan->hs_hcon->l2cap_data = chan->conn;
5022 
5023 	BT_DBG("move_state %d", chan->move_state);
5024 
5025 	switch (chan->move_state) {
5026 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5027 		/* Move confirm will be sent after a success
5028 		 * response is received
5029 		 */
5030 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5031 		break;
5032 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5033 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5034 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5035 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5036 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5037 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5038 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5039 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5040 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5041 		}
5042 		break;
5043 	default:
5044 		/* Move was not in expected state, free the channel */
5045 		__release_logical_link(chan);
5046 
5047 		chan->move_state = L2CAP_MOVE_STABLE;
5048 	}
5049 }
5050 
5051 /* Call with chan locked */
5052 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5053 		       u8 status)
5054 {
5055 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5056 
5057 	if (status) {
5058 		l2cap_logical_fail(chan);
5059 		__release_logical_link(chan);
5060 		return;
5061 	}
5062 
5063 	if (chan->state != BT_CONNECTED) {
5064 		/* Ignore logical link if channel is on BR/EDR */
5065 		if (chan->local_amp_id != AMP_ID_BREDR)
5066 			l2cap_logical_finish_create(chan, hchan);
5067 	} else {
5068 		l2cap_logical_finish_move(chan, hchan);
5069 	}
5070 }
5071 
5072 void l2cap_move_start(struct l2cap_chan *chan)
5073 {
5074 	BT_DBG("chan %p", chan);
5075 
5076 	if (chan->local_amp_id == AMP_ID_BREDR) {
5077 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5078 			return;
5079 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5080 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5081 		/* Placeholder - start physical link setup */
5082 	} else {
5083 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5084 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5085 		chan->move_id = 0;
5086 		l2cap_move_setup(chan);
5087 		l2cap_send_move_chan_req(chan, 0);
5088 	}
5089 }
5090 
5091 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5092 			    u8 local_amp_id, u8 remote_amp_id)
5093 {
5094 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5095 	       local_amp_id, remote_amp_id);
5096 
5097 	chan->fcs = L2CAP_FCS_NONE;
5098 
5099 	/* Outgoing channel on AMP */
5100 	if (chan->state == BT_CONNECT) {
5101 		if (result == L2CAP_CR_SUCCESS) {
5102 			chan->local_amp_id = local_amp_id;
5103 			l2cap_send_create_chan_req(chan, remote_amp_id);
5104 		} else {
5105 			/* Revert to BR/EDR connect */
5106 			l2cap_send_conn_req(chan);
5107 		}
5108 
5109 		return;
5110 	}
5111 
5112 	/* Incoming channel on AMP */
5113 	if (__l2cap_no_conn_pending(chan)) {
5114 		struct l2cap_conn_rsp rsp;
5115 		char buf[128];
5116 		rsp.scid = cpu_to_le16(chan->dcid);
5117 		rsp.dcid = cpu_to_le16(chan->scid);
5118 
5119 		if (result == L2CAP_CR_SUCCESS) {
5120 			/* Send successful response */
5121 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5122 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5123 		} else {
5124 			/* Send negative response */
5125 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5126 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5127 		}
5128 
5129 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5130 			       sizeof(rsp), &rsp);
5131 
5132 		if (result == L2CAP_CR_SUCCESS) {
5133 			l2cap_state_change(chan, BT_CONFIG);
5134 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5135 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5136 				       L2CAP_CONF_REQ,
5137 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5138 			chan->num_conf_req++;
5139 		}
5140 	}
5141 }
5142 
5143 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5144 				   u8 remote_amp_id)
5145 {
5146 	l2cap_move_setup(chan);
5147 	chan->move_id = local_amp_id;
5148 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5149 
5150 	l2cap_send_move_chan_req(chan, remote_amp_id);
5151 }
5152 
5153 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5154 {
5155 	struct hci_chan *hchan = NULL;
5156 
5157 	/* Placeholder - get hci_chan for logical link */
5158 
5159 	if (hchan) {
5160 		if (hchan->state == BT_CONNECTED) {
5161 			/* Logical link is ready to go */
5162 			chan->hs_hcon = hchan->conn;
5163 			chan->hs_hcon->l2cap_data = chan->conn;
5164 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5165 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5166 
5167 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5168 		} else {
5169 			/* Wait for logical link to be ready */
5170 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5171 		}
5172 	} else {
5173 		/* Logical link not available */
5174 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5175 	}
5176 }
5177 
5178 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5179 {
5180 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5181 		u8 rsp_result;
5182 		if (result == -EINVAL)
5183 			rsp_result = L2CAP_MR_BAD_ID;
5184 		else
5185 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5186 
5187 		l2cap_send_move_chan_rsp(chan, rsp_result);
5188 	}
5189 
5190 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5191 	chan->move_state = L2CAP_MOVE_STABLE;
5192 
5193 	/* Restart data transmission */
5194 	l2cap_ertm_send(chan);
5195 }
5196 
5197 /* Invoke with locked chan */
5198 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5199 {
5200 	u8 local_amp_id = chan->local_amp_id;
5201 	u8 remote_amp_id = chan->remote_amp_id;
5202 
5203 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5204 	       chan, result, local_amp_id, remote_amp_id);
5205 
5206 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5207 		return;
5208 
5209 	if (chan->state != BT_CONNECTED) {
5210 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5211 	} else if (result != L2CAP_MR_SUCCESS) {
5212 		l2cap_do_move_cancel(chan, result);
5213 	} else {
5214 		switch (chan->move_role) {
5215 		case L2CAP_MOVE_ROLE_INITIATOR:
5216 			l2cap_do_move_initiate(chan, local_amp_id,
5217 					       remote_amp_id);
5218 			break;
5219 		case L2CAP_MOVE_ROLE_RESPONDER:
5220 			l2cap_do_move_respond(chan, result);
5221 			break;
5222 		default:
5223 			l2cap_do_move_cancel(chan, result);
5224 			break;
5225 		}
5226 	}
5227 }
5228 
5229 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5230 					 struct l2cap_cmd_hdr *cmd,
5231 					 u16 cmd_len, void *data)
5232 {
5233 	struct l2cap_move_chan_req *req = data;
5234 	struct l2cap_move_chan_rsp rsp;
5235 	struct l2cap_chan *chan;
5236 	u16 icid = 0;
5237 	u16 result = L2CAP_MR_NOT_ALLOWED;
5238 
5239 	if (cmd_len != sizeof(*req))
5240 		return -EPROTO;
5241 
5242 	icid = le16_to_cpu(req->icid);
5243 
5244 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5245 
5246 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5247 		return -EINVAL;
5248 
5249 	chan = l2cap_get_chan_by_dcid(conn, icid);
5250 	if (!chan) {
5251 		rsp.icid = cpu_to_le16(icid);
5252 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5253 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5254 			       sizeof(rsp), &rsp);
5255 		return 0;
5256 	}
5257 
5258 	chan->ident = cmd->ident;
5259 
5260 	if (chan->scid < L2CAP_CID_DYN_START ||
5261 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5262 	    (chan->mode != L2CAP_MODE_ERTM &&
5263 	     chan->mode != L2CAP_MODE_STREAMING)) {
5264 		result = L2CAP_MR_NOT_ALLOWED;
5265 		goto send_move_response;
5266 	}
5267 
5268 	if (chan->local_amp_id == req->dest_amp_id) {
5269 		result = L2CAP_MR_SAME_ID;
5270 		goto send_move_response;
5271 	}
5272 
5273 	if (req->dest_amp_id != AMP_ID_BREDR) {
5274 		struct hci_dev *hdev;
5275 		hdev = hci_dev_get(req->dest_amp_id);
5276 		if (!hdev || hdev->dev_type != HCI_AMP ||
5277 		    !test_bit(HCI_UP, &hdev->flags)) {
5278 			if (hdev)
5279 				hci_dev_put(hdev);
5280 
5281 			result = L2CAP_MR_BAD_ID;
5282 			goto send_move_response;
5283 		}
5284 		hci_dev_put(hdev);
5285 	}
5286 
5287 	/* Detect a move collision.  Only send a collision response
5288 	 * if this side has "lost", otherwise proceed with the move.
5289 	 * The winner has the larger bd_addr.
5290 	 */
5291 	if ((__chan_is_moving(chan) ||
5292 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5293 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5294 		result = L2CAP_MR_COLLISION;
5295 		goto send_move_response;
5296 	}
5297 
5298 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5299 	l2cap_move_setup(chan);
5300 	chan->move_id = req->dest_amp_id;
5301 
5302 	if (req->dest_amp_id == AMP_ID_BREDR) {
5303 		/* Moving to BR/EDR */
5304 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5305 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5306 			result = L2CAP_MR_PEND;
5307 		} else {
5308 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5309 			result = L2CAP_MR_SUCCESS;
5310 		}
5311 	} else {
5312 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5313 		/* Placeholder - uncomment when amp functions are available */
5314 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5315 		result = L2CAP_MR_PEND;
5316 	}
5317 
5318 send_move_response:
5319 	l2cap_send_move_chan_rsp(chan, result);
5320 
5321 	l2cap_chan_unlock(chan);
5322 	l2cap_chan_put(chan);
5323 
5324 	return 0;
5325 }
5326 
5327 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5328 {
5329 	struct l2cap_chan *chan;
5330 	struct hci_chan *hchan = NULL;
5331 
5332 	chan = l2cap_get_chan_by_scid(conn, icid);
5333 	if (!chan) {
5334 		l2cap_send_move_chan_cfm_icid(conn, icid);
5335 		return;
5336 	}
5337 
5338 	__clear_chan_timer(chan);
5339 	if (result == L2CAP_MR_PEND)
5340 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5341 
5342 	switch (chan->move_state) {
5343 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5344 		/* Move confirm will be sent when logical link
5345 		 * is complete.
5346 		 */
5347 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5348 		break;
5349 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5350 		if (result == L2CAP_MR_PEND) {
5351 			break;
5352 		} else if (test_bit(CONN_LOCAL_BUSY,
5353 				    &chan->conn_state)) {
5354 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5355 		} else {
5356 			/* Logical link is up or moving to BR/EDR,
5357 			 * proceed with move
5358 			 */
5359 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5360 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5361 		}
5362 		break;
5363 	case L2CAP_MOVE_WAIT_RSP:
5364 		/* Moving to AMP */
5365 		if (result == L2CAP_MR_SUCCESS) {
5366 			/* Remote is ready, send confirm immediately
5367 			 * after logical link is ready
5368 			 */
5369 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5370 		} else {
5371 			/* Both logical link and move success
5372 			 * are required to confirm
5373 			 */
5374 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5375 		}
5376 
5377 		/* Placeholder - get hci_chan for logical link */
5378 		if (!hchan) {
5379 			/* Logical link not available */
5380 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5381 			break;
5382 		}
5383 
5384 		/* If the logical link is not yet connected, do not
5385 		 * send confirmation.
5386 		 */
5387 		if (hchan->state != BT_CONNECTED)
5388 			break;
5389 
5390 		/* Logical link is already ready to go */
5391 
5392 		chan->hs_hcon = hchan->conn;
5393 		chan->hs_hcon->l2cap_data = chan->conn;
5394 
5395 		if (result == L2CAP_MR_SUCCESS) {
5396 			/* Can confirm now */
5397 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5398 		} else {
5399 			/* Now only need move success
5400 			 * to confirm
5401 			 */
5402 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5403 		}
5404 
5405 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5406 		break;
5407 	default:
5408 		/* Any other amp move state means the move failed. */
5409 		chan->move_id = chan->local_amp_id;
5410 		l2cap_move_done(chan);
5411 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5412 	}
5413 
5414 	l2cap_chan_unlock(chan);
5415 	l2cap_chan_put(chan);
5416 }
5417 
5418 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5419 			    u16 result)
5420 {
5421 	struct l2cap_chan *chan;
5422 
5423 	chan = l2cap_get_chan_by_ident(conn, ident);
5424 	if (!chan) {
5425 		/* Could not locate channel, icid is best guess */
5426 		l2cap_send_move_chan_cfm_icid(conn, icid);
5427 		return;
5428 	}
5429 
5430 	__clear_chan_timer(chan);
5431 
5432 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5433 		if (result == L2CAP_MR_COLLISION) {
5434 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5435 		} else {
5436 			/* Cleanup - cancel move */
5437 			chan->move_id = chan->local_amp_id;
5438 			l2cap_move_done(chan);
5439 		}
5440 	}
5441 
5442 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5443 
5444 	l2cap_chan_unlock(chan);
5445 	l2cap_chan_put(chan);
5446 }
5447 
5448 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5449 				  struct l2cap_cmd_hdr *cmd,
5450 				  u16 cmd_len, void *data)
5451 {
5452 	struct l2cap_move_chan_rsp *rsp = data;
5453 	u16 icid, result;
5454 
5455 	if (cmd_len != sizeof(*rsp))
5456 		return -EPROTO;
5457 
5458 	icid = le16_to_cpu(rsp->icid);
5459 	result = le16_to_cpu(rsp->result);
5460 
5461 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5462 
5463 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5464 		l2cap_move_continue(conn, icid, result);
5465 	else
5466 		l2cap_move_fail(conn, cmd->ident, icid, result);
5467 
5468 	return 0;
5469 }
5470 
5471 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5472 				      struct l2cap_cmd_hdr *cmd,
5473 				      u16 cmd_len, void *data)
5474 {
5475 	struct l2cap_move_chan_cfm *cfm = data;
5476 	struct l2cap_chan *chan;
5477 	u16 icid, result;
5478 
5479 	if (cmd_len != sizeof(*cfm))
5480 		return -EPROTO;
5481 
5482 	icid = le16_to_cpu(cfm->icid);
5483 	result = le16_to_cpu(cfm->result);
5484 
5485 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5486 
5487 	chan = l2cap_get_chan_by_dcid(conn, icid);
5488 	if (!chan) {
5489 		/* Spec requires a response even if the icid was not found */
5490 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5491 		return 0;
5492 	}
5493 
5494 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5495 		if (result == L2CAP_MC_CONFIRMED) {
5496 			chan->local_amp_id = chan->move_id;
5497 			if (chan->local_amp_id == AMP_ID_BREDR)
5498 				__release_logical_link(chan);
5499 		} else {
5500 			chan->move_id = chan->local_amp_id;
5501 		}
5502 
5503 		l2cap_move_done(chan);
5504 	}
5505 
5506 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5507 
5508 	l2cap_chan_unlock(chan);
5509 	l2cap_chan_put(chan);
5510 
5511 	return 0;
5512 }
5513 
5514 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5515 						 struct l2cap_cmd_hdr *cmd,
5516 						 u16 cmd_len, void *data)
5517 {
5518 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5519 	struct l2cap_chan *chan;
5520 	u16 icid;
5521 
5522 	if (cmd_len != sizeof(*rsp))
5523 		return -EPROTO;
5524 
5525 	icid = le16_to_cpu(rsp->icid);
5526 
5527 	BT_DBG("icid 0x%4.4x", icid);
5528 
5529 	chan = l2cap_get_chan_by_scid(conn, icid);
5530 	if (!chan)
5531 		return 0;
5532 
5533 	__clear_chan_timer(chan);
5534 
5535 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5536 		chan->local_amp_id = chan->move_id;
5537 
5538 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5539 			__release_logical_link(chan);
5540 
5541 		l2cap_move_done(chan);
5542 	}
5543 
5544 	l2cap_chan_unlock(chan);
5545 	l2cap_chan_put(chan);
5546 
5547 	return 0;
5548 }
5549 
5550 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5551 					      struct l2cap_cmd_hdr *cmd,
5552 					      u16 cmd_len, u8 *data)
5553 {
5554 	struct hci_conn *hcon = conn->hcon;
5555 	struct l2cap_conn_param_update_req *req;
5556 	struct l2cap_conn_param_update_rsp rsp;
5557 	u16 min, max, latency, to_multiplier;
5558 	int err;
5559 
5560 	if (hcon->role != HCI_ROLE_MASTER)
5561 		return -EINVAL;
5562 
5563 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5564 		return -EPROTO;
5565 
5566 	req = (struct l2cap_conn_param_update_req *) data;
5567 	min		= __le16_to_cpu(req->min);
5568 	max		= __le16_to_cpu(req->max);
5569 	latency		= __le16_to_cpu(req->latency);
5570 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5571 
5572 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5573 	       min, max, latency, to_multiplier);
5574 
5575 	memset(&rsp, 0, sizeof(rsp));
5576 
5577 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5578 	if (err)
5579 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5580 	else
5581 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5582 
5583 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5584 		       sizeof(rsp), &rsp);
5585 
5586 	if (!err) {
5587 		u8 store_hint;
5588 
5589 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5590 						to_multiplier);
5591 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5592 				    store_hint, min, max, latency,
5593 				    to_multiplier);
5594 
5595 	}
5596 
5597 	return 0;
5598 }
5599 
5600 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5601 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5602 				u8 *data)
5603 {
5604 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5605 	struct hci_conn *hcon = conn->hcon;
5606 	u16 dcid, mtu, mps, credits, result;
5607 	struct l2cap_chan *chan;
5608 	int err, sec_level;
5609 
5610 	if (cmd_len < sizeof(*rsp))
5611 		return -EPROTO;
5612 
5613 	dcid    = __le16_to_cpu(rsp->dcid);
5614 	mtu     = __le16_to_cpu(rsp->mtu);
5615 	mps     = __le16_to_cpu(rsp->mps);
5616 	credits = __le16_to_cpu(rsp->credits);
5617 	result  = __le16_to_cpu(rsp->result);
5618 
5619 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5620 					   dcid < L2CAP_CID_DYN_START ||
5621 					   dcid > L2CAP_CID_LE_DYN_END))
5622 		return -EPROTO;
5623 
5624 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5625 	       dcid, mtu, mps, credits, result);
5626 
5627 	mutex_lock(&conn->chan_lock);
5628 
5629 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5630 	if (!chan) {
5631 		err = -EBADSLT;
5632 		goto unlock;
5633 	}
5634 
5635 	err = 0;
5636 
5637 	l2cap_chan_lock(chan);
5638 
5639 	switch (result) {
5640 	case L2CAP_CR_LE_SUCCESS:
5641 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5642 			err = -EBADSLT;
5643 			break;
5644 		}
5645 
5646 		chan->ident = 0;
5647 		chan->dcid = dcid;
5648 		chan->omtu = mtu;
5649 		chan->remote_mps = mps;
5650 		chan->tx_credits = credits;
5651 		l2cap_chan_ready(chan);
5652 		break;
5653 
5654 	case L2CAP_CR_LE_AUTHENTICATION:
5655 	case L2CAP_CR_LE_ENCRYPTION:
5656 		/* If we already have MITM protection we can't do
5657 		 * anything.
5658 		 */
5659 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5660 			l2cap_chan_del(chan, ECONNREFUSED);
5661 			break;
5662 		}
5663 
5664 		sec_level = hcon->sec_level + 1;
5665 		if (chan->sec_level < sec_level)
5666 			chan->sec_level = sec_level;
5667 
5668 		/* We'll need to send a new Connect Request */
5669 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5670 
5671 		smp_conn_security(hcon, chan->sec_level);
5672 		break;
5673 
5674 	default:
5675 		l2cap_chan_del(chan, ECONNREFUSED);
5676 		break;
5677 	}
5678 
5679 	l2cap_chan_unlock(chan);
5680 
5681 unlock:
5682 	mutex_unlock(&conn->chan_lock);
5683 
5684 	return err;
5685 }
5686 
5687 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5688 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5689 				      u8 *data)
5690 {
5691 	int err = 0;
5692 
5693 	switch (cmd->code) {
5694 	case L2CAP_COMMAND_REJ:
5695 		l2cap_command_rej(conn, cmd, cmd_len, data);
5696 		break;
5697 
5698 	case L2CAP_CONN_REQ:
5699 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5700 		break;
5701 
5702 	case L2CAP_CONN_RSP:
5703 	case L2CAP_CREATE_CHAN_RSP:
5704 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5705 		break;
5706 
5707 	case L2CAP_CONF_REQ:
5708 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5709 		break;
5710 
5711 	case L2CAP_CONF_RSP:
5712 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5713 		break;
5714 
5715 	case L2CAP_DISCONN_REQ:
5716 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5717 		break;
5718 
5719 	case L2CAP_DISCONN_RSP:
5720 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5721 		break;
5722 
5723 	case L2CAP_ECHO_REQ:
5724 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5725 		break;
5726 
5727 	case L2CAP_ECHO_RSP:
5728 		break;
5729 
5730 	case L2CAP_INFO_REQ:
5731 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5732 		break;
5733 
5734 	case L2CAP_INFO_RSP:
5735 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5736 		break;
5737 
5738 	case L2CAP_CREATE_CHAN_REQ:
5739 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5740 		break;
5741 
5742 	case L2CAP_MOVE_CHAN_REQ:
5743 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5744 		break;
5745 
5746 	case L2CAP_MOVE_CHAN_RSP:
5747 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5748 		break;
5749 
5750 	case L2CAP_MOVE_CHAN_CFM:
5751 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5752 		break;
5753 
5754 	case L2CAP_MOVE_CHAN_CFM_RSP:
5755 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5756 		break;
5757 
5758 	default:
5759 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5760 		err = -EINVAL;
5761 		break;
5762 	}
5763 
5764 	return err;
5765 }
5766 
5767 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5768 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5769 				u8 *data)
5770 {
5771 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5772 	struct l2cap_le_conn_rsp rsp;
5773 	struct l2cap_chan *chan, *pchan;
5774 	u16 dcid, scid, credits, mtu, mps;
5775 	__le16 psm;
5776 	u8 result;
5777 
5778 	if (cmd_len != sizeof(*req))
5779 		return -EPROTO;
5780 
5781 	scid = __le16_to_cpu(req->scid);
5782 	mtu  = __le16_to_cpu(req->mtu);
5783 	mps  = __le16_to_cpu(req->mps);
5784 	psm  = req->psm;
5785 	dcid = 0;
5786 	credits = 0;
5787 
5788 	if (mtu < 23 || mps < 23)
5789 		return -EPROTO;
5790 
5791 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5792 	       scid, mtu, mps);
5793 
5794 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5795 	 * page 1059:
5796 	 *
5797 	 * Valid range: 0x0001-0x00ff
5798 	 *
5799 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5800 	 */
5801 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5802 		result = L2CAP_CR_LE_BAD_PSM;
5803 		chan = NULL;
5804 		goto response;
5805 	}
5806 
5807 	/* Check if we have socket listening on psm */
5808 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5809 					 &conn->hcon->dst, LE_LINK);
5810 	if (!pchan) {
5811 		result = L2CAP_CR_LE_BAD_PSM;
5812 		chan = NULL;
5813 		goto response;
5814 	}
5815 
5816 	mutex_lock(&conn->chan_lock);
5817 	l2cap_chan_lock(pchan);
5818 
5819 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5820 				     SMP_ALLOW_STK)) {
5821 		result = L2CAP_CR_LE_AUTHENTICATION;
5822 		chan = NULL;
5823 		goto response_unlock;
5824 	}
5825 
5826 	/* Check for valid dynamic CID range */
5827 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5828 		result = L2CAP_CR_LE_INVALID_SCID;
5829 		chan = NULL;
5830 		goto response_unlock;
5831 	}
5832 
5833 	/* Check if we already have channel with that dcid */
5834 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5835 		result = L2CAP_CR_LE_SCID_IN_USE;
5836 		chan = NULL;
5837 		goto response_unlock;
5838 	}
5839 
5840 	chan = pchan->ops->new_connection(pchan);
5841 	if (!chan) {
5842 		result = L2CAP_CR_LE_NO_MEM;
5843 		goto response_unlock;
5844 	}
5845 
5846 	bacpy(&chan->src, &conn->hcon->src);
5847 	bacpy(&chan->dst, &conn->hcon->dst);
5848 	chan->src_type = bdaddr_src_type(conn->hcon);
5849 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5850 	chan->psm  = psm;
5851 	chan->dcid = scid;
5852 	chan->omtu = mtu;
5853 	chan->remote_mps = mps;
5854 
5855 	__l2cap_chan_add(conn, chan);
5856 
5857 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5858 
5859 	dcid = chan->scid;
5860 	credits = chan->rx_credits;
5861 
5862 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5863 
5864 	chan->ident = cmd->ident;
5865 
5866 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5867 		l2cap_state_change(chan, BT_CONNECT2);
5868 		/* The following result value is actually not defined
5869 		 * for LE CoC but we use it to let the function know
5870 		 * that it should bail out after doing its cleanup
5871 		 * instead of sending a response.
5872 		 */
5873 		result = L2CAP_CR_PEND;
5874 		chan->ops->defer(chan);
5875 	} else {
5876 		l2cap_chan_ready(chan);
5877 		result = L2CAP_CR_LE_SUCCESS;
5878 	}
5879 
5880 response_unlock:
5881 	l2cap_chan_unlock(pchan);
5882 	mutex_unlock(&conn->chan_lock);
5883 	l2cap_chan_put(pchan);
5884 
5885 	if (result == L2CAP_CR_PEND)
5886 		return 0;
5887 
5888 response:
5889 	if (chan) {
5890 		rsp.mtu = cpu_to_le16(chan->imtu);
5891 		rsp.mps = cpu_to_le16(chan->mps);
5892 	} else {
5893 		rsp.mtu = 0;
5894 		rsp.mps = 0;
5895 	}
5896 
5897 	rsp.dcid    = cpu_to_le16(dcid);
5898 	rsp.credits = cpu_to_le16(credits);
5899 	rsp.result  = cpu_to_le16(result);
5900 
5901 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5902 
5903 	return 0;
5904 }
5905 
5906 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5907 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5908 				   u8 *data)
5909 {
5910 	struct l2cap_le_credits *pkt;
5911 	struct l2cap_chan *chan;
5912 	u16 cid, credits, max_credits;
5913 
5914 	if (cmd_len != sizeof(*pkt))
5915 		return -EPROTO;
5916 
5917 	pkt = (struct l2cap_le_credits *) data;
5918 	cid	= __le16_to_cpu(pkt->cid);
5919 	credits	= __le16_to_cpu(pkt->credits);
5920 
5921 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5922 
5923 	chan = l2cap_get_chan_by_dcid(conn, cid);
5924 	if (!chan)
5925 		return -EBADSLT;
5926 
5927 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5928 	if (credits > max_credits) {
5929 		BT_ERR("LE credits overflow");
5930 		l2cap_send_disconn_req(chan, ECONNRESET);
5931 
5932 		/* Return 0 so that we don't trigger an unnecessary
5933 		 * command reject packet.
5934 		 */
5935 		goto unlock;
5936 	}
5937 
5938 	chan->tx_credits += credits;
5939 
5940 	/* Resume sending */
5941 	l2cap_le_flowctl_send(chan);
5942 
5943 	if (chan->tx_credits)
5944 		chan->ops->resume(chan);
5945 
5946 unlock:
5947 	l2cap_chan_unlock(chan);
5948 	l2cap_chan_put(chan);
5949 
5950 	return 0;
5951 }
5952 
5953 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5954 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5955 				       u8 *data)
5956 {
5957 	struct l2cap_ecred_conn_req *req = (void *) data;
5958 	struct {
5959 		struct l2cap_ecred_conn_rsp rsp;
5960 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5961 	} __packed pdu;
5962 	struct l2cap_chan *chan, *pchan;
5963 	u16 mtu, mps;
5964 	__le16 psm;
5965 	u8 result, len = 0;
5966 	int i, num_scid;
5967 	bool defer = false;
5968 
5969 	if (!enable_ecred)
5970 		return -EINVAL;
5971 
5972 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5973 		result = L2CAP_CR_LE_INVALID_PARAMS;
5974 		goto response;
5975 	}
5976 
5977 	cmd_len -= sizeof(*req);
5978 	num_scid = cmd_len / sizeof(u16);
5979 
5980 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5981 		result = L2CAP_CR_LE_INVALID_PARAMS;
5982 		goto response;
5983 	}
5984 
5985 	mtu  = __le16_to_cpu(req->mtu);
5986 	mps  = __le16_to_cpu(req->mps);
5987 
5988 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5989 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5990 		goto response;
5991 	}
5992 
5993 	psm  = req->psm;
5994 
5995 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5996 	 * page 1059:
5997 	 *
5998 	 * Valid range: 0x0001-0x00ff
5999 	 *
6000 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6001 	 */
6002 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6003 		result = L2CAP_CR_LE_BAD_PSM;
6004 		goto response;
6005 	}
6006 
6007 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6008 
6009 	memset(&pdu, 0, sizeof(pdu));
6010 
6011 	/* Check if we have socket listening on psm */
6012 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6013 					 &conn->hcon->dst, LE_LINK);
6014 	if (!pchan) {
6015 		result = L2CAP_CR_LE_BAD_PSM;
6016 		goto response;
6017 	}
6018 
6019 	mutex_lock(&conn->chan_lock);
6020 	l2cap_chan_lock(pchan);
6021 
6022 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6023 				     SMP_ALLOW_STK)) {
6024 		result = L2CAP_CR_LE_AUTHENTICATION;
6025 		goto unlock;
6026 	}
6027 
6028 	result = L2CAP_CR_LE_SUCCESS;
6029 
6030 	for (i = 0; i < num_scid; i++) {
6031 		u16 scid = __le16_to_cpu(req->scid[i]);
6032 
6033 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6034 
6035 		pdu.dcid[i] = 0x0000;
6036 		len += sizeof(*pdu.dcid);
6037 
6038 		/* Check for valid dynamic CID range */
6039 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6040 			result = L2CAP_CR_LE_INVALID_SCID;
6041 			continue;
6042 		}
6043 
6044 		/* Check if we already have channel with that dcid */
6045 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6046 			result = L2CAP_CR_LE_SCID_IN_USE;
6047 			continue;
6048 		}
6049 
6050 		chan = pchan->ops->new_connection(pchan);
6051 		if (!chan) {
6052 			result = L2CAP_CR_LE_NO_MEM;
6053 			continue;
6054 		}
6055 
6056 		bacpy(&chan->src, &conn->hcon->src);
6057 		bacpy(&chan->dst, &conn->hcon->dst);
6058 		chan->src_type = bdaddr_src_type(conn->hcon);
6059 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6060 		chan->psm  = psm;
6061 		chan->dcid = scid;
6062 		chan->omtu = mtu;
6063 		chan->remote_mps = mps;
6064 
6065 		__l2cap_chan_add(conn, chan);
6066 
6067 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6068 
6069 		/* Init response */
6070 		if (!pdu.rsp.credits) {
6071 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6072 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6073 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6074 		}
6075 
6076 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6077 
6078 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6079 
6080 		chan->ident = cmd->ident;
6081 
6082 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6083 			l2cap_state_change(chan, BT_CONNECT2);
6084 			defer = true;
6085 			chan->ops->defer(chan);
6086 		} else {
6087 			l2cap_chan_ready(chan);
6088 		}
6089 	}
6090 
6091 unlock:
6092 	l2cap_chan_unlock(pchan);
6093 	mutex_unlock(&conn->chan_lock);
6094 	l2cap_chan_put(pchan);
6095 
6096 response:
6097 	pdu.rsp.result = cpu_to_le16(result);
6098 
6099 	if (defer)
6100 		return 0;
6101 
6102 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6103 		       sizeof(pdu.rsp) + len, &pdu);
6104 
6105 	return 0;
6106 }
6107 
6108 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6109 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6110 				       u8 *data)
6111 {
6112 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6113 	struct hci_conn *hcon = conn->hcon;
6114 	u16 mtu, mps, credits, result;
6115 	struct l2cap_chan *chan, *tmp;
6116 	int err = 0, sec_level;
6117 	int i = 0;
6118 
6119 	if (cmd_len < sizeof(*rsp))
6120 		return -EPROTO;
6121 
6122 	mtu     = __le16_to_cpu(rsp->mtu);
6123 	mps     = __le16_to_cpu(rsp->mps);
6124 	credits = __le16_to_cpu(rsp->credits);
6125 	result  = __le16_to_cpu(rsp->result);
6126 
6127 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6128 	       result);
6129 
6130 	mutex_lock(&conn->chan_lock);
6131 
6132 	cmd_len -= sizeof(*rsp);
6133 
6134 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6135 		u16 dcid;
6136 
6137 		if (chan->ident != cmd->ident ||
6138 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6139 		    chan->state == BT_CONNECTED)
6140 			continue;
6141 
6142 		l2cap_chan_lock(chan);
6143 
6144 		/* Check that there is a dcid for each pending channel */
6145 		if (cmd_len < sizeof(dcid)) {
6146 			l2cap_chan_del(chan, ECONNREFUSED);
6147 			l2cap_chan_unlock(chan);
6148 			continue;
6149 		}
6150 
6151 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6152 		cmd_len -= sizeof(u16);
6153 
6154 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6155 
6156 		/* Check if dcid is already in use */
6157 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6158 			/* If a device receives a
6159 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6160 			 * already-assigned Destination CID, then both the
6161 			 * original channel and the new channel shall be
6162 			 * immediately discarded and not used.
6163 			 */
6164 			l2cap_chan_del(chan, ECONNREFUSED);
6165 			l2cap_chan_unlock(chan);
6166 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6167 			l2cap_chan_lock(chan);
6168 			l2cap_chan_del(chan, ECONNRESET);
6169 			l2cap_chan_unlock(chan);
6170 			continue;
6171 		}
6172 
6173 		switch (result) {
6174 		case L2CAP_CR_LE_AUTHENTICATION:
6175 		case L2CAP_CR_LE_ENCRYPTION:
6176 			/* If we already have MITM protection we can't do
6177 			 * anything.
6178 			 */
6179 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6180 				l2cap_chan_del(chan, ECONNREFUSED);
6181 				break;
6182 			}
6183 
6184 			sec_level = hcon->sec_level + 1;
6185 			if (chan->sec_level < sec_level)
6186 				chan->sec_level = sec_level;
6187 
6188 			/* We'll need to send a new Connect Request */
6189 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6190 
6191 			smp_conn_security(hcon, chan->sec_level);
6192 			break;
6193 
6194 		case L2CAP_CR_LE_BAD_PSM:
6195 			l2cap_chan_del(chan, ECONNREFUSED);
6196 			break;
6197 
6198 		default:
6199 			/* If dcid was not set it means channels was refused */
6200 			if (!dcid) {
6201 				l2cap_chan_del(chan, ECONNREFUSED);
6202 				break;
6203 			}
6204 
6205 			chan->ident = 0;
6206 			chan->dcid = dcid;
6207 			chan->omtu = mtu;
6208 			chan->remote_mps = mps;
6209 			chan->tx_credits = credits;
6210 			l2cap_chan_ready(chan);
6211 			break;
6212 		}
6213 
6214 		l2cap_chan_unlock(chan);
6215 	}
6216 
6217 	mutex_unlock(&conn->chan_lock);
6218 
6219 	return err;
6220 }
6221 
6222 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6223 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6224 					 u8 *data)
6225 {
6226 	struct l2cap_ecred_reconf_req *req = (void *) data;
6227 	struct l2cap_ecred_reconf_rsp rsp;
6228 	u16 mtu, mps, result;
6229 	struct l2cap_chan *chan;
6230 	int i, num_scid;
6231 
6232 	if (!enable_ecred)
6233 		return -EINVAL;
6234 
6235 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6236 		result = L2CAP_CR_LE_INVALID_PARAMS;
6237 		goto respond;
6238 	}
6239 
6240 	mtu = __le16_to_cpu(req->mtu);
6241 	mps = __le16_to_cpu(req->mps);
6242 
6243 	BT_DBG("mtu %u mps %u", mtu, mps);
6244 
6245 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6246 		result = L2CAP_RECONF_INVALID_MTU;
6247 		goto respond;
6248 	}
6249 
6250 	if (mps < L2CAP_ECRED_MIN_MPS) {
6251 		result = L2CAP_RECONF_INVALID_MPS;
6252 		goto respond;
6253 	}
6254 
6255 	cmd_len -= sizeof(*req);
6256 	num_scid = cmd_len / sizeof(u16);
6257 	result = L2CAP_RECONF_SUCCESS;
6258 
6259 	for (i = 0; i < num_scid; i++) {
6260 		u16 scid;
6261 
6262 		scid = __le16_to_cpu(req->scid[i]);
6263 		if (!scid)
6264 			return -EPROTO;
6265 
6266 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6267 		if (!chan)
6268 			continue;
6269 
6270 		/* If the MTU value is decreased for any of the included
6271 		 * channels, then the receiver shall disconnect all
6272 		 * included channels.
6273 		 */
6274 		if (chan->omtu > mtu) {
6275 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6276 			       chan->omtu, mtu);
6277 			result = L2CAP_RECONF_INVALID_MTU;
6278 		}
6279 
6280 		chan->omtu = mtu;
6281 		chan->remote_mps = mps;
6282 	}
6283 
6284 respond:
6285 	rsp.result = cpu_to_le16(result);
6286 
6287 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6288 		       &rsp);
6289 
6290 	return 0;
6291 }
6292 
6293 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6294 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6295 					 u8 *data)
6296 {
6297 	struct l2cap_chan *chan, *tmp;
6298 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6299 	u16 result;
6300 
6301 	if (cmd_len < sizeof(*rsp))
6302 		return -EPROTO;
6303 
6304 	result = __le16_to_cpu(rsp->result);
6305 
6306 	BT_DBG("result 0x%4.4x", rsp->result);
6307 
6308 	if (!result)
6309 		return 0;
6310 
6311 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6312 		if (chan->ident != cmd->ident)
6313 			continue;
6314 
6315 		l2cap_chan_del(chan, ECONNRESET);
6316 	}
6317 
6318 	return 0;
6319 }
6320 
6321 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6322 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6323 				       u8 *data)
6324 {
6325 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6326 	struct l2cap_chan *chan;
6327 
6328 	if (cmd_len < sizeof(*rej))
6329 		return -EPROTO;
6330 
6331 	mutex_lock(&conn->chan_lock);
6332 
6333 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6334 	if (!chan)
6335 		goto done;
6336 
6337 	l2cap_chan_lock(chan);
6338 	l2cap_chan_del(chan, ECONNREFUSED);
6339 	l2cap_chan_unlock(chan);
6340 
6341 done:
6342 	mutex_unlock(&conn->chan_lock);
6343 	return 0;
6344 }
6345 
6346 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6347 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6348 				   u8 *data)
6349 {
6350 	int err = 0;
6351 
6352 	switch (cmd->code) {
6353 	case L2CAP_COMMAND_REJ:
6354 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6355 		break;
6356 
6357 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6358 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6359 		break;
6360 
6361 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6362 		break;
6363 
6364 	case L2CAP_LE_CONN_RSP:
6365 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6366 		break;
6367 
6368 	case L2CAP_LE_CONN_REQ:
6369 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6370 		break;
6371 
6372 	case L2CAP_LE_CREDITS:
6373 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6374 		break;
6375 
6376 	case L2CAP_ECRED_CONN_REQ:
6377 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6378 		break;
6379 
6380 	case L2CAP_ECRED_CONN_RSP:
6381 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6382 		break;
6383 
6384 	case L2CAP_ECRED_RECONF_REQ:
6385 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6386 		break;
6387 
6388 	case L2CAP_ECRED_RECONF_RSP:
6389 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6390 		break;
6391 
6392 	case L2CAP_DISCONN_REQ:
6393 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6394 		break;
6395 
6396 	case L2CAP_DISCONN_RSP:
6397 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6398 		break;
6399 
6400 	default:
6401 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6402 		err = -EINVAL;
6403 		break;
6404 	}
6405 
6406 	return err;
6407 }
6408 
6409 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6410 					struct sk_buff *skb)
6411 {
6412 	struct hci_conn *hcon = conn->hcon;
6413 	struct l2cap_cmd_hdr *cmd;
6414 	u16 len;
6415 	int err;
6416 
6417 	if (hcon->type != LE_LINK)
6418 		goto drop;
6419 
6420 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6421 		goto drop;
6422 
6423 	cmd = (void *) skb->data;
6424 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6425 
6426 	len = le16_to_cpu(cmd->len);
6427 
6428 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6429 
6430 	if (len != skb->len || !cmd->ident) {
6431 		BT_DBG("corrupted command");
6432 		goto drop;
6433 	}
6434 
6435 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6436 	if (err) {
6437 		struct l2cap_cmd_rej_unk rej;
6438 
6439 		BT_ERR("Wrong link type (%d)", err);
6440 
6441 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6442 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6443 			       sizeof(rej), &rej);
6444 	}
6445 
6446 drop:
6447 	kfree_skb(skb);
6448 }
6449 
6450 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6451 				     struct sk_buff *skb)
6452 {
6453 	struct hci_conn *hcon = conn->hcon;
6454 	struct l2cap_cmd_hdr *cmd;
6455 	int err;
6456 
6457 	l2cap_raw_recv(conn, skb);
6458 
6459 	if (hcon->type != ACL_LINK)
6460 		goto drop;
6461 
6462 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6463 		u16 len;
6464 
6465 		cmd = (void *) skb->data;
6466 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6467 
6468 		len = le16_to_cpu(cmd->len);
6469 
6470 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6471 		       cmd->ident);
6472 
6473 		if (len > skb->len || !cmd->ident) {
6474 			BT_DBG("corrupted command");
6475 			break;
6476 		}
6477 
6478 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6479 		if (err) {
6480 			struct l2cap_cmd_rej_unk rej;
6481 
6482 			BT_ERR("Wrong link type (%d)", err);
6483 
6484 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6485 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6486 				       sizeof(rej), &rej);
6487 		}
6488 
6489 		skb_pull(skb, len);
6490 	}
6491 
6492 drop:
6493 	kfree_skb(skb);
6494 }
6495 
6496 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6497 {
6498 	u16 our_fcs, rcv_fcs;
6499 	int hdr_size;
6500 
6501 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6502 		hdr_size = L2CAP_EXT_HDR_SIZE;
6503 	else
6504 		hdr_size = L2CAP_ENH_HDR_SIZE;
6505 
6506 	if (chan->fcs == L2CAP_FCS_CRC16) {
6507 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6508 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6509 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6510 
6511 		if (our_fcs != rcv_fcs)
6512 			return -EBADMSG;
6513 	}
6514 	return 0;
6515 }
6516 
6517 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6518 {
6519 	struct l2cap_ctrl control;
6520 
6521 	BT_DBG("chan %p", chan);
6522 
6523 	memset(&control, 0, sizeof(control));
6524 	control.sframe = 1;
6525 	control.final = 1;
6526 	control.reqseq = chan->buffer_seq;
6527 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6528 
6529 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6530 		control.super = L2CAP_SUPER_RNR;
6531 		l2cap_send_sframe(chan, &control);
6532 	}
6533 
6534 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6535 	    chan->unacked_frames > 0)
6536 		__set_retrans_timer(chan);
6537 
6538 	/* Send pending iframes */
6539 	l2cap_ertm_send(chan);
6540 
6541 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6542 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6543 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6544 		 * send it now.
6545 		 */
6546 		control.super = L2CAP_SUPER_RR;
6547 		l2cap_send_sframe(chan, &control);
6548 	}
6549 }
6550 
6551 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6552 			    struct sk_buff **last_frag)
6553 {
6554 	/* skb->len reflects data in skb as well as all fragments
6555 	 * skb->data_len reflects only data in fragments
6556 	 */
6557 	if (!skb_has_frag_list(skb))
6558 		skb_shinfo(skb)->frag_list = new_frag;
6559 
6560 	new_frag->next = NULL;
6561 
6562 	(*last_frag)->next = new_frag;
6563 	*last_frag = new_frag;
6564 
6565 	skb->len += new_frag->len;
6566 	skb->data_len += new_frag->len;
6567 	skb->truesize += new_frag->truesize;
6568 }
6569 
6570 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6571 				struct l2cap_ctrl *control)
6572 {
6573 	int err = -EINVAL;
6574 
6575 	switch (control->sar) {
6576 	case L2CAP_SAR_UNSEGMENTED:
6577 		if (chan->sdu)
6578 			break;
6579 
6580 		err = chan->ops->recv(chan, skb);
6581 		break;
6582 
6583 	case L2CAP_SAR_START:
6584 		if (chan->sdu)
6585 			break;
6586 
6587 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6588 			break;
6589 
6590 		chan->sdu_len = get_unaligned_le16(skb->data);
6591 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6592 
6593 		if (chan->sdu_len > chan->imtu) {
6594 			err = -EMSGSIZE;
6595 			break;
6596 		}
6597 
6598 		if (skb->len >= chan->sdu_len)
6599 			break;
6600 
6601 		chan->sdu = skb;
6602 		chan->sdu_last_frag = skb;
6603 
6604 		skb = NULL;
6605 		err = 0;
6606 		break;
6607 
6608 	case L2CAP_SAR_CONTINUE:
6609 		if (!chan->sdu)
6610 			break;
6611 
6612 		append_skb_frag(chan->sdu, skb,
6613 				&chan->sdu_last_frag);
6614 		skb = NULL;
6615 
6616 		if (chan->sdu->len >= chan->sdu_len)
6617 			break;
6618 
6619 		err = 0;
6620 		break;
6621 
6622 	case L2CAP_SAR_END:
6623 		if (!chan->sdu)
6624 			break;
6625 
6626 		append_skb_frag(chan->sdu, skb,
6627 				&chan->sdu_last_frag);
6628 		skb = NULL;
6629 
6630 		if (chan->sdu->len != chan->sdu_len)
6631 			break;
6632 
6633 		err = chan->ops->recv(chan, chan->sdu);
6634 
6635 		if (!err) {
6636 			/* Reassembly complete */
6637 			chan->sdu = NULL;
6638 			chan->sdu_last_frag = NULL;
6639 			chan->sdu_len = 0;
6640 		}
6641 		break;
6642 	}
6643 
6644 	if (err) {
6645 		kfree_skb(skb);
6646 		kfree_skb(chan->sdu);
6647 		chan->sdu = NULL;
6648 		chan->sdu_last_frag = NULL;
6649 		chan->sdu_len = 0;
6650 	}
6651 
6652 	return err;
6653 }
6654 
6655 static int l2cap_resegment(struct l2cap_chan *chan)
6656 {
6657 	/* Placeholder */
6658 	return 0;
6659 }
6660 
6661 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6662 {
6663 	u8 event;
6664 
6665 	if (chan->mode != L2CAP_MODE_ERTM)
6666 		return;
6667 
6668 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6669 	l2cap_tx(chan, NULL, NULL, event);
6670 }
6671 
6672 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6673 {
6674 	int err = 0;
6675 	/* Pass sequential frames to l2cap_reassemble_sdu()
6676 	 * until a gap is encountered.
6677 	 */
6678 
6679 	BT_DBG("chan %p", chan);
6680 
6681 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6682 		struct sk_buff *skb;
6683 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6684 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6685 
6686 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6687 
6688 		if (!skb)
6689 			break;
6690 
6691 		skb_unlink(skb, &chan->srej_q);
6692 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6693 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6694 		if (err)
6695 			break;
6696 	}
6697 
6698 	if (skb_queue_empty(&chan->srej_q)) {
6699 		chan->rx_state = L2CAP_RX_STATE_RECV;
6700 		l2cap_send_ack(chan);
6701 	}
6702 
6703 	return err;
6704 }
6705 
6706 static void l2cap_handle_srej(struct l2cap_chan *chan,
6707 			      struct l2cap_ctrl *control)
6708 {
6709 	struct sk_buff *skb;
6710 
6711 	BT_DBG("chan %p, control %p", chan, control);
6712 
6713 	if (control->reqseq == chan->next_tx_seq) {
6714 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6715 		l2cap_send_disconn_req(chan, ECONNRESET);
6716 		return;
6717 	}
6718 
6719 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6720 
6721 	if (skb == NULL) {
6722 		BT_DBG("Seq %d not available for retransmission",
6723 		       control->reqseq);
6724 		return;
6725 	}
6726 
6727 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6728 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6729 		l2cap_send_disconn_req(chan, ECONNRESET);
6730 		return;
6731 	}
6732 
6733 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6734 
6735 	if (control->poll) {
6736 		l2cap_pass_to_tx(chan, control);
6737 
6738 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6739 		l2cap_retransmit(chan, control);
6740 		l2cap_ertm_send(chan);
6741 
6742 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6743 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6744 			chan->srej_save_reqseq = control->reqseq;
6745 		}
6746 	} else {
6747 		l2cap_pass_to_tx_fbit(chan, control);
6748 
6749 		if (control->final) {
6750 			if (chan->srej_save_reqseq != control->reqseq ||
6751 			    !test_and_clear_bit(CONN_SREJ_ACT,
6752 						&chan->conn_state))
6753 				l2cap_retransmit(chan, control);
6754 		} else {
6755 			l2cap_retransmit(chan, control);
6756 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6757 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6758 				chan->srej_save_reqseq = control->reqseq;
6759 			}
6760 		}
6761 	}
6762 }
6763 
6764 static void l2cap_handle_rej(struct l2cap_chan *chan,
6765 			     struct l2cap_ctrl *control)
6766 {
6767 	struct sk_buff *skb;
6768 
6769 	BT_DBG("chan %p, control %p", chan, control);
6770 
6771 	if (control->reqseq == chan->next_tx_seq) {
6772 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6773 		l2cap_send_disconn_req(chan, ECONNRESET);
6774 		return;
6775 	}
6776 
6777 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6778 
6779 	if (chan->max_tx && skb &&
6780 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6781 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6782 		l2cap_send_disconn_req(chan, ECONNRESET);
6783 		return;
6784 	}
6785 
6786 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6787 
6788 	l2cap_pass_to_tx(chan, control);
6789 
6790 	if (control->final) {
6791 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6792 			l2cap_retransmit_all(chan, control);
6793 	} else {
6794 		l2cap_retransmit_all(chan, control);
6795 		l2cap_ertm_send(chan);
6796 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6797 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6798 	}
6799 }
6800 
6801 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6802 {
6803 	BT_DBG("chan %p, txseq %d", chan, txseq);
6804 
6805 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6806 	       chan->expected_tx_seq);
6807 
6808 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6809 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6810 		    chan->tx_win) {
6811 			/* See notes below regarding "double poll" and
6812 			 * invalid packets.
6813 			 */
6814 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6815 				BT_DBG("Invalid/Ignore - after SREJ");
6816 				return L2CAP_TXSEQ_INVALID_IGNORE;
6817 			} else {
6818 				BT_DBG("Invalid - in window after SREJ sent");
6819 				return L2CAP_TXSEQ_INVALID;
6820 			}
6821 		}
6822 
6823 		if (chan->srej_list.head == txseq) {
6824 			BT_DBG("Expected SREJ");
6825 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6826 		}
6827 
6828 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6829 			BT_DBG("Duplicate SREJ - txseq already stored");
6830 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6831 		}
6832 
6833 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6834 			BT_DBG("Unexpected SREJ - not requested");
6835 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6836 		}
6837 	}
6838 
6839 	if (chan->expected_tx_seq == txseq) {
6840 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6841 		    chan->tx_win) {
6842 			BT_DBG("Invalid - txseq outside tx window");
6843 			return L2CAP_TXSEQ_INVALID;
6844 		} else {
6845 			BT_DBG("Expected");
6846 			return L2CAP_TXSEQ_EXPECTED;
6847 		}
6848 	}
6849 
6850 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6851 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6852 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6853 		return L2CAP_TXSEQ_DUPLICATE;
6854 	}
6855 
6856 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6857 		/* A source of invalid packets is a "double poll" condition,
6858 		 * where delays cause us to send multiple poll packets.  If
6859 		 * the remote stack receives and processes both polls,
6860 		 * sequence numbers can wrap around in such a way that a
6861 		 * resent frame has a sequence number that looks like new data
6862 		 * with a sequence gap.  This would trigger an erroneous SREJ
6863 		 * request.
6864 		 *
6865 		 * Fortunately, this is impossible with a tx window that's
6866 		 * less than half of the maximum sequence number, which allows
6867 		 * invalid frames to be safely ignored.
6868 		 *
6869 		 * With tx window sizes greater than half of the tx window
6870 		 * maximum, the frame is invalid and cannot be ignored.  This
6871 		 * causes a disconnect.
6872 		 */
6873 
6874 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6875 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6876 			return L2CAP_TXSEQ_INVALID_IGNORE;
6877 		} else {
6878 			BT_DBG("Invalid - txseq outside tx window");
6879 			return L2CAP_TXSEQ_INVALID;
6880 		}
6881 	} else {
6882 		BT_DBG("Unexpected - txseq indicates missing frames");
6883 		return L2CAP_TXSEQ_UNEXPECTED;
6884 	}
6885 }
6886 
6887 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6888 			       struct l2cap_ctrl *control,
6889 			       struct sk_buff *skb, u8 event)
6890 {
6891 	struct l2cap_ctrl local_control;
6892 	int err = 0;
6893 	bool skb_in_use = false;
6894 
6895 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6896 	       event);
6897 
6898 	switch (event) {
6899 	case L2CAP_EV_RECV_IFRAME:
6900 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6901 		case L2CAP_TXSEQ_EXPECTED:
6902 			l2cap_pass_to_tx(chan, control);
6903 
6904 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6905 				BT_DBG("Busy, discarding expected seq %d",
6906 				       control->txseq);
6907 				break;
6908 			}
6909 
6910 			chan->expected_tx_seq = __next_seq(chan,
6911 							   control->txseq);
6912 
6913 			chan->buffer_seq = chan->expected_tx_seq;
6914 			skb_in_use = true;
6915 
6916 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6917 			 * control, so make a copy in advance to use it after
6918 			 * l2cap_reassemble_sdu returns and to avoid the race
6919 			 * condition, for example:
6920 			 *
6921 			 * The current thread calls:
6922 			 *   l2cap_reassemble_sdu
6923 			 *     chan->ops->recv == l2cap_sock_recv_cb
6924 			 *       __sock_queue_rcv_skb
6925 			 * Another thread calls:
6926 			 *   bt_sock_recvmsg
6927 			 *     skb_recv_datagram
6928 			 *     skb_free_datagram
6929 			 * Then the current thread tries to access control, but
6930 			 * it was freed by skb_free_datagram.
6931 			 */
6932 			local_control = *control;
6933 			err = l2cap_reassemble_sdu(chan, skb, control);
6934 			if (err)
6935 				break;
6936 
6937 			if (local_control.final) {
6938 				if (!test_and_clear_bit(CONN_REJ_ACT,
6939 							&chan->conn_state)) {
6940 					local_control.final = 0;
6941 					l2cap_retransmit_all(chan, &local_control);
6942 					l2cap_ertm_send(chan);
6943 				}
6944 			}
6945 
6946 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6947 				l2cap_send_ack(chan);
6948 			break;
6949 		case L2CAP_TXSEQ_UNEXPECTED:
6950 			l2cap_pass_to_tx(chan, control);
6951 
6952 			/* Can't issue SREJ frames in the local busy state.
6953 			 * Drop this frame, it will be seen as missing
6954 			 * when local busy is exited.
6955 			 */
6956 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6957 				BT_DBG("Busy, discarding unexpected seq %d",
6958 				       control->txseq);
6959 				break;
6960 			}
6961 
6962 			/* There was a gap in the sequence, so an SREJ
6963 			 * must be sent for each missing frame.  The
6964 			 * current frame is stored for later use.
6965 			 */
6966 			skb_queue_tail(&chan->srej_q, skb);
6967 			skb_in_use = true;
6968 			BT_DBG("Queued %p (queue len %d)", skb,
6969 			       skb_queue_len(&chan->srej_q));
6970 
6971 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6972 			l2cap_seq_list_clear(&chan->srej_list);
6973 			l2cap_send_srej(chan, control->txseq);
6974 
6975 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6976 			break;
6977 		case L2CAP_TXSEQ_DUPLICATE:
6978 			l2cap_pass_to_tx(chan, control);
6979 			break;
6980 		case L2CAP_TXSEQ_INVALID_IGNORE:
6981 			break;
6982 		case L2CAP_TXSEQ_INVALID:
6983 		default:
6984 			l2cap_send_disconn_req(chan, ECONNRESET);
6985 			break;
6986 		}
6987 		break;
6988 	case L2CAP_EV_RECV_RR:
6989 		l2cap_pass_to_tx(chan, control);
6990 		if (control->final) {
6991 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6992 
6993 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6994 			    !__chan_is_moving(chan)) {
6995 				control->final = 0;
6996 				l2cap_retransmit_all(chan, control);
6997 			}
6998 
6999 			l2cap_ertm_send(chan);
7000 		} else if (control->poll) {
7001 			l2cap_send_i_or_rr_or_rnr(chan);
7002 		} else {
7003 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7004 					       &chan->conn_state) &&
7005 			    chan->unacked_frames)
7006 				__set_retrans_timer(chan);
7007 
7008 			l2cap_ertm_send(chan);
7009 		}
7010 		break;
7011 	case L2CAP_EV_RECV_RNR:
7012 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7013 		l2cap_pass_to_tx(chan, control);
7014 		if (control && control->poll) {
7015 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7016 			l2cap_send_rr_or_rnr(chan, 0);
7017 		}
7018 		__clear_retrans_timer(chan);
7019 		l2cap_seq_list_clear(&chan->retrans_list);
7020 		break;
7021 	case L2CAP_EV_RECV_REJ:
7022 		l2cap_handle_rej(chan, control);
7023 		break;
7024 	case L2CAP_EV_RECV_SREJ:
7025 		l2cap_handle_srej(chan, control);
7026 		break;
7027 	default:
7028 		break;
7029 	}
7030 
7031 	if (skb && !skb_in_use) {
7032 		BT_DBG("Freeing %p", skb);
7033 		kfree_skb(skb);
7034 	}
7035 
7036 	return err;
7037 }
7038 
7039 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7040 				    struct l2cap_ctrl *control,
7041 				    struct sk_buff *skb, u8 event)
7042 {
7043 	int err = 0;
7044 	u16 txseq = control->txseq;
7045 	bool skb_in_use = false;
7046 
7047 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7048 	       event);
7049 
7050 	switch (event) {
7051 	case L2CAP_EV_RECV_IFRAME:
7052 		switch (l2cap_classify_txseq(chan, txseq)) {
7053 		case L2CAP_TXSEQ_EXPECTED:
7054 			/* Keep frame for reassembly later */
7055 			l2cap_pass_to_tx(chan, control);
7056 			skb_queue_tail(&chan->srej_q, skb);
7057 			skb_in_use = true;
7058 			BT_DBG("Queued %p (queue len %d)", skb,
7059 			       skb_queue_len(&chan->srej_q));
7060 
7061 			chan->expected_tx_seq = __next_seq(chan, txseq);
7062 			break;
7063 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7064 			l2cap_seq_list_pop(&chan->srej_list);
7065 
7066 			l2cap_pass_to_tx(chan, control);
7067 			skb_queue_tail(&chan->srej_q, skb);
7068 			skb_in_use = true;
7069 			BT_DBG("Queued %p (queue len %d)", skb,
7070 			       skb_queue_len(&chan->srej_q));
7071 
7072 			err = l2cap_rx_queued_iframes(chan);
7073 			if (err)
7074 				break;
7075 
7076 			break;
7077 		case L2CAP_TXSEQ_UNEXPECTED:
7078 			/* Got a frame that can't be reassembled yet.
7079 			 * Save it for later, and send SREJs to cover
7080 			 * the missing frames.
7081 			 */
7082 			skb_queue_tail(&chan->srej_q, skb);
7083 			skb_in_use = true;
7084 			BT_DBG("Queued %p (queue len %d)", skb,
7085 			       skb_queue_len(&chan->srej_q));
7086 
7087 			l2cap_pass_to_tx(chan, control);
7088 			l2cap_send_srej(chan, control->txseq);
7089 			break;
7090 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7091 			/* This frame was requested with an SREJ, but
7092 			 * some expected retransmitted frames are
7093 			 * missing.  Request retransmission of missing
7094 			 * SREJ'd frames.
7095 			 */
7096 			skb_queue_tail(&chan->srej_q, skb);
7097 			skb_in_use = true;
7098 			BT_DBG("Queued %p (queue len %d)", skb,
7099 			       skb_queue_len(&chan->srej_q));
7100 
7101 			l2cap_pass_to_tx(chan, control);
7102 			l2cap_send_srej_list(chan, control->txseq);
7103 			break;
7104 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7105 			/* We've already queued this frame.  Drop this copy. */
7106 			l2cap_pass_to_tx(chan, control);
7107 			break;
7108 		case L2CAP_TXSEQ_DUPLICATE:
7109 			/* Expecting a later sequence number, so this frame
7110 			 * was already received.  Ignore it completely.
7111 			 */
7112 			break;
7113 		case L2CAP_TXSEQ_INVALID_IGNORE:
7114 			break;
7115 		case L2CAP_TXSEQ_INVALID:
7116 		default:
7117 			l2cap_send_disconn_req(chan, ECONNRESET);
7118 			break;
7119 		}
7120 		break;
7121 	case L2CAP_EV_RECV_RR:
7122 		l2cap_pass_to_tx(chan, control);
7123 		if (control->final) {
7124 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7125 
7126 			if (!test_and_clear_bit(CONN_REJ_ACT,
7127 						&chan->conn_state)) {
7128 				control->final = 0;
7129 				l2cap_retransmit_all(chan, control);
7130 			}
7131 
7132 			l2cap_ertm_send(chan);
7133 		} else if (control->poll) {
7134 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7135 					       &chan->conn_state) &&
7136 			    chan->unacked_frames) {
7137 				__set_retrans_timer(chan);
7138 			}
7139 
7140 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7141 			l2cap_send_srej_tail(chan);
7142 		} else {
7143 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7144 					       &chan->conn_state) &&
7145 			    chan->unacked_frames)
7146 				__set_retrans_timer(chan);
7147 
7148 			l2cap_send_ack(chan);
7149 		}
7150 		break;
7151 	case L2CAP_EV_RECV_RNR:
7152 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7153 		l2cap_pass_to_tx(chan, control);
7154 		if (control->poll) {
7155 			l2cap_send_srej_tail(chan);
7156 		} else {
7157 			struct l2cap_ctrl rr_control;
7158 			memset(&rr_control, 0, sizeof(rr_control));
7159 			rr_control.sframe = 1;
7160 			rr_control.super = L2CAP_SUPER_RR;
7161 			rr_control.reqseq = chan->buffer_seq;
7162 			l2cap_send_sframe(chan, &rr_control);
7163 		}
7164 
7165 		break;
7166 	case L2CAP_EV_RECV_REJ:
7167 		l2cap_handle_rej(chan, control);
7168 		break;
7169 	case L2CAP_EV_RECV_SREJ:
7170 		l2cap_handle_srej(chan, control);
7171 		break;
7172 	}
7173 
7174 	if (skb && !skb_in_use) {
7175 		BT_DBG("Freeing %p", skb);
7176 		kfree_skb(skb);
7177 	}
7178 
7179 	return err;
7180 }
7181 
7182 static int l2cap_finish_move(struct l2cap_chan *chan)
7183 {
7184 	BT_DBG("chan %p", chan);
7185 
7186 	chan->rx_state = L2CAP_RX_STATE_RECV;
7187 
7188 	if (chan->hs_hcon)
7189 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7190 	else
7191 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7192 
7193 	return l2cap_resegment(chan);
7194 }
7195 
7196 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7197 				 struct l2cap_ctrl *control,
7198 				 struct sk_buff *skb, u8 event)
7199 {
7200 	int err;
7201 
7202 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7203 	       event);
7204 
7205 	if (!control->poll)
7206 		return -EPROTO;
7207 
7208 	l2cap_process_reqseq(chan, control->reqseq);
7209 
7210 	if (!skb_queue_empty(&chan->tx_q))
7211 		chan->tx_send_head = skb_peek(&chan->tx_q);
7212 	else
7213 		chan->tx_send_head = NULL;
7214 
7215 	/* Rewind next_tx_seq to the point expected
7216 	 * by the receiver.
7217 	 */
7218 	chan->next_tx_seq = control->reqseq;
7219 	chan->unacked_frames = 0;
7220 
7221 	err = l2cap_finish_move(chan);
7222 	if (err)
7223 		return err;
7224 
7225 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7226 	l2cap_send_i_or_rr_or_rnr(chan);
7227 
7228 	if (event == L2CAP_EV_RECV_IFRAME)
7229 		return -EPROTO;
7230 
7231 	return l2cap_rx_state_recv(chan, control, NULL, event);
7232 }
7233 
7234 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7235 				 struct l2cap_ctrl *control,
7236 				 struct sk_buff *skb, u8 event)
7237 {
7238 	int err;
7239 
7240 	if (!control->final)
7241 		return -EPROTO;
7242 
7243 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7244 
7245 	chan->rx_state = L2CAP_RX_STATE_RECV;
7246 	l2cap_process_reqseq(chan, control->reqseq);
7247 
7248 	if (!skb_queue_empty(&chan->tx_q))
7249 		chan->tx_send_head = skb_peek(&chan->tx_q);
7250 	else
7251 		chan->tx_send_head = NULL;
7252 
7253 	/* Rewind next_tx_seq to the point expected
7254 	 * by the receiver.
7255 	 */
7256 	chan->next_tx_seq = control->reqseq;
7257 	chan->unacked_frames = 0;
7258 
7259 	if (chan->hs_hcon)
7260 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7261 	else
7262 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7263 
7264 	err = l2cap_resegment(chan);
7265 
7266 	if (!err)
7267 		err = l2cap_rx_state_recv(chan, control, skb, event);
7268 
7269 	return err;
7270 }
7271 
7272 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7273 {
7274 	/* Make sure reqseq is for a packet that has been sent but not acked */
7275 	u16 unacked;
7276 
7277 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7278 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7279 }
7280 
7281 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7282 		    struct sk_buff *skb, u8 event)
7283 {
7284 	int err = 0;
7285 
7286 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7287 	       control, skb, event, chan->rx_state);
7288 
7289 	if (__valid_reqseq(chan, control->reqseq)) {
7290 		switch (chan->rx_state) {
7291 		case L2CAP_RX_STATE_RECV:
7292 			err = l2cap_rx_state_recv(chan, control, skb, event);
7293 			break;
7294 		case L2CAP_RX_STATE_SREJ_SENT:
7295 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7296 						       event);
7297 			break;
7298 		case L2CAP_RX_STATE_WAIT_P:
7299 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7300 			break;
7301 		case L2CAP_RX_STATE_WAIT_F:
7302 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7303 			break;
7304 		default:
7305 			/* shut it down */
7306 			break;
7307 		}
7308 	} else {
7309 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7310 		       control->reqseq, chan->next_tx_seq,
7311 		       chan->expected_ack_seq);
7312 		l2cap_send_disconn_req(chan, ECONNRESET);
7313 	}
7314 
7315 	return err;
7316 }
7317 
7318 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7319 			   struct sk_buff *skb)
7320 {
7321 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7322 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7323 	 * returns and to avoid the race condition, for example:
7324 	 *
7325 	 * The current thread calls:
7326 	 *   l2cap_reassemble_sdu
7327 	 *     chan->ops->recv == l2cap_sock_recv_cb
7328 	 *       __sock_queue_rcv_skb
7329 	 * Another thread calls:
7330 	 *   bt_sock_recvmsg
7331 	 *     skb_recv_datagram
7332 	 *     skb_free_datagram
7333 	 * Then the current thread tries to access control, but it was freed by
7334 	 * skb_free_datagram.
7335 	 */
7336 	u16 txseq = control->txseq;
7337 
7338 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7339 	       chan->rx_state);
7340 
7341 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7342 		l2cap_pass_to_tx(chan, control);
7343 
7344 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7345 		       __next_seq(chan, chan->buffer_seq));
7346 
7347 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7348 
7349 		l2cap_reassemble_sdu(chan, skb, control);
7350 	} else {
7351 		if (chan->sdu) {
7352 			kfree_skb(chan->sdu);
7353 			chan->sdu = NULL;
7354 		}
7355 		chan->sdu_last_frag = NULL;
7356 		chan->sdu_len = 0;
7357 
7358 		if (skb) {
7359 			BT_DBG("Freeing %p", skb);
7360 			kfree_skb(skb);
7361 		}
7362 	}
7363 
7364 	chan->last_acked_seq = txseq;
7365 	chan->expected_tx_seq = __next_seq(chan, txseq);
7366 
7367 	return 0;
7368 }
7369 
7370 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7371 {
7372 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7373 	u16 len;
7374 	u8 event;
7375 
7376 	__unpack_control(chan, skb);
7377 
7378 	len = skb->len;
7379 
7380 	/*
7381 	 * We can just drop the corrupted I-frame here.
7382 	 * Receiver will miss it and start proper recovery
7383 	 * procedures and ask for retransmission.
7384 	 */
7385 	if (l2cap_check_fcs(chan, skb))
7386 		goto drop;
7387 
7388 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7389 		len -= L2CAP_SDULEN_SIZE;
7390 
7391 	if (chan->fcs == L2CAP_FCS_CRC16)
7392 		len -= L2CAP_FCS_SIZE;
7393 
7394 	if (len > chan->mps) {
7395 		l2cap_send_disconn_req(chan, ECONNRESET);
7396 		goto drop;
7397 	}
7398 
7399 	if (chan->ops->filter) {
7400 		if (chan->ops->filter(chan, skb))
7401 			goto drop;
7402 	}
7403 
7404 	if (!control->sframe) {
7405 		int err;
7406 
7407 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7408 		       control->sar, control->reqseq, control->final,
7409 		       control->txseq);
7410 
7411 		/* Validate F-bit - F=0 always valid, F=1 only
7412 		 * valid in TX WAIT_F
7413 		 */
7414 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7415 			goto drop;
7416 
7417 		if (chan->mode != L2CAP_MODE_STREAMING) {
7418 			event = L2CAP_EV_RECV_IFRAME;
7419 			err = l2cap_rx(chan, control, skb, event);
7420 		} else {
7421 			err = l2cap_stream_rx(chan, control, skb);
7422 		}
7423 
7424 		if (err)
7425 			l2cap_send_disconn_req(chan, ECONNRESET);
7426 	} else {
7427 		const u8 rx_func_to_event[4] = {
7428 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7429 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7430 		};
7431 
7432 		/* Only I-frames are expected in streaming mode */
7433 		if (chan->mode == L2CAP_MODE_STREAMING)
7434 			goto drop;
7435 
7436 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7437 		       control->reqseq, control->final, control->poll,
7438 		       control->super);
7439 
7440 		if (len != 0) {
7441 			BT_ERR("Trailing bytes: %d in sframe", len);
7442 			l2cap_send_disconn_req(chan, ECONNRESET);
7443 			goto drop;
7444 		}
7445 
7446 		/* Validate F and P bits */
7447 		if (control->final && (control->poll ||
7448 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7449 			goto drop;
7450 
7451 		event = rx_func_to_event[control->super];
7452 		if (l2cap_rx(chan, control, skb, event))
7453 			l2cap_send_disconn_req(chan, ECONNRESET);
7454 	}
7455 
7456 	return 0;
7457 
7458 drop:
7459 	kfree_skb(skb);
7460 	return 0;
7461 }
7462 
7463 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7464 {
7465 	struct l2cap_conn *conn = chan->conn;
7466 	struct l2cap_le_credits pkt;
7467 	u16 return_credits;
7468 
7469 	return_credits = (chan->imtu / chan->mps) + 1;
7470 
7471 	if (chan->rx_credits >= return_credits)
7472 		return;
7473 
7474 	return_credits -= chan->rx_credits;
7475 
7476 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7477 
7478 	chan->rx_credits += return_credits;
7479 
7480 	pkt.cid     = cpu_to_le16(chan->scid);
7481 	pkt.credits = cpu_to_le16(return_credits);
7482 
7483 	chan->ident = l2cap_get_ident(conn);
7484 
7485 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7486 }
7487 
7488 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7489 {
7490 	int err;
7491 
7492 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7493 
7494 	/* Wait recv to confirm reception before updating the credits */
7495 	err = chan->ops->recv(chan, skb);
7496 
7497 	/* Update credits whenever an SDU is received */
7498 	l2cap_chan_le_send_credits(chan);
7499 
7500 	return err;
7501 }
7502 
7503 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7504 {
7505 	int err;
7506 
7507 	if (!chan->rx_credits) {
7508 		BT_ERR("No credits to receive LE L2CAP data");
7509 		l2cap_send_disconn_req(chan, ECONNRESET);
7510 		return -ENOBUFS;
7511 	}
7512 
7513 	if (chan->imtu < skb->len) {
7514 		BT_ERR("Too big LE L2CAP PDU");
7515 		return -ENOBUFS;
7516 	}
7517 
7518 	chan->rx_credits--;
7519 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7520 
7521 	/* Update if remote had run out of credits, this should only happens
7522 	 * if the remote is not using the entire MPS.
7523 	 */
7524 	if (!chan->rx_credits)
7525 		l2cap_chan_le_send_credits(chan);
7526 
7527 	err = 0;
7528 
7529 	if (!chan->sdu) {
7530 		u16 sdu_len;
7531 
7532 		sdu_len = get_unaligned_le16(skb->data);
7533 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7534 
7535 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7536 		       sdu_len, skb->len, chan->imtu);
7537 
7538 		if (sdu_len > chan->imtu) {
7539 			BT_ERR("Too big LE L2CAP SDU length received");
7540 			err = -EMSGSIZE;
7541 			goto failed;
7542 		}
7543 
7544 		if (skb->len > sdu_len) {
7545 			BT_ERR("Too much LE L2CAP data received");
7546 			err = -EINVAL;
7547 			goto failed;
7548 		}
7549 
7550 		if (skb->len == sdu_len)
7551 			return l2cap_ecred_recv(chan, skb);
7552 
7553 		chan->sdu = skb;
7554 		chan->sdu_len = sdu_len;
7555 		chan->sdu_last_frag = skb;
7556 
7557 		/* Detect if remote is not able to use the selected MPS */
7558 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7559 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7560 
7561 			/* Adjust the number of credits */
7562 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7563 			chan->mps = mps_len;
7564 			l2cap_chan_le_send_credits(chan);
7565 		}
7566 
7567 		return 0;
7568 	}
7569 
7570 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7571 	       chan->sdu->len, skb->len, chan->sdu_len);
7572 
7573 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7574 		BT_ERR("Too much LE L2CAP data received");
7575 		err = -EINVAL;
7576 		goto failed;
7577 	}
7578 
7579 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7580 	skb = NULL;
7581 
7582 	if (chan->sdu->len == chan->sdu_len) {
7583 		err = l2cap_ecred_recv(chan, chan->sdu);
7584 		if (!err) {
7585 			chan->sdu = NULL;
7586 			chan->sdu_last_frag = NULL;
7587 			chan->sdu_len = 0;
7588 		}
7589 	}
7590 
7591 failed:
7592 	if (err) {
7593 		kfree_skb(skb);
7594 		kfree_skb(chan->sdu);
7595 		chan->sdu = NULL;
7596 		chan->sdu_last_frag = NULL;
7597 		chan->sdu_len = 0;
7598 	}
7599 
7600 	/* We can't return an error here since we took care of the skb
7601 	 * freeing internally. An error return would cause the caller to
7602 	 * do a double-free of the skb.
7603 	 */
7604 	return 0;
7605 }
7606 
7607 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7608 			       struct sk_buff *skb)
7609 {
7610 	struct l2cap_chan *chan;
7611 
7612 	chan = l2cap_get_chan_by_scid(conn, cid);
7613 	if (!chan) {
7614 		if (cid == L2CAP_CID_A2MP) {
7615 			chan = a2mp_channel_create(conn, skb);
7616 			if (!chan) {
7617 				kfree_skb(skb);
7618 				return;
7619 			}
7620 
7621 			l2cap_chan_hold(chan);
7622 			l2cap_chan_lock(chan);
7623 		} else {
7624 			BT_DBG("unknown cid 0x%4.4x", cid);
7625 			/* Drop packet and return */
7626 			kfree_skb(skb);
7627 			return;
7628 		}
7629 	}
7630 
7631 	BT_DBG("chan %p, len %d", chan, skb->len);
7632 
7633 	/* If we receive data on a fixed channel before the info req/rsp
7634 	 * procedure is done simply assume that the channel is supported
7635 	 * and mark it as ready.
7636 	 */
7637 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7638 		l2cap_chan_ready(chan);
7639 
7640 	if (chan->state != BT_CONNECTED)
7641 		goto drop;
7642 
7643 	switch (chan->mode) {
7644 	case L2CAP_MODE_LE_FLOWCTL:
7645 	case L2CAP_MODE_EXT_FLOWCTL:
7646 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7647 			goto drop;
7648 
7649 		goto done;
7650 
7651 	case L2CAP_MODE_BASIC:
7652 		/* If socket recv buffers overflows we drop data here
7653 		 * which is *bad* because L2CAP has to be reliable.
7654 		 * But we don't have any other choice. L2CAP doesn't
7655 		 * provide flow control mechanism. */
7656 
7657 		if (chan->imtu < skb->len) {
7658 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7659 			goto drop;
7660 		}
7661 
7662 		if (!chan->ops->recv(chan, skb))
7663 			goto done;
7664 		break;
7665 
7666 	case L2CAP_MODE_ERTM:
7667 	case L2CAP_MODE_STREAMING:
7668 		l2cap_data_rcv(chan, skb);
7669 		goto done;
7670 
7671 	default:
7672 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7673 		break;
7674 	}
7675 
7676 drop:
7677 	kfree_skb(skb);
7678 
7679 done:
7680 	l2cap_chan_unlock(chan);
7681 	l2cap_chan_put(chan);
7682 }
7683 
7684 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7685 				  struct sk_buff *skb)
7686 {
7687 	struct hci_conn *hcon = conn->hcon;
7688 	struct l2cap_chan *chan;
7689 
7690 	if (hcon->type != ACL_LINK)
7691 		goto free_skb;
7692 
7693 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7694 					ACL_LINK);
7695 	if (!chan)
7696 		goto free_skb;
7697 
7698 	BT_DBG("chan %p, len %d", chan, skb->len);
7699 
7700 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7701 		goto drop;
7702 
7703 	if (chan->imtu < skb->len)
7704 		goto drop;
7705 
7706 	/* Store remote BD_ADDR and PSM for msg_name */
7707 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7708 	bt_cb(skb)->l2cap.psm = psm;
7709 
7710 	if (!chan->ops->recv(chan, skb)) {
7711 		l2cap_chan_put(chan);
7712 		return;
7713 	}
7714 
7715 drop:
7716 	l2cap_chan_put(chan);
7717 free_skb:
7718 	kfree_skb(skb);
7719 }
7720 
7721 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7722 {
7723 	struct l2cap_hdr *lh = (void *) skb->data;
7724 	struct hci_conn *hcon = conn->hcon;
7725 	u16 cid, len;
7726 	__le16 psm;
7727 
7728 	if (hcon->state != BT_CONNECTED) {
7729 		BT_DBG("queueing pending rx skb");
7730 		skb_queue_tail(&conn->pending_rx, skb);
7731 		return;
7732 	}
7733 
7734 	skb_pull(skb, L2CAP_HDR_SIZE);
7735 	cid = __le16_to_cpu(lh->cid);
7736 	len = __le16_to_cpu(lh->len);
7737 
7738 	if (len != skb->len) {
7739 		kfree_skb(skb);
7740 		return;
7741 	}
7742 
7743 	/* Since we can't actively block incoming LE connections we must
7744 	 * at least ensure that we ignore incoming data from them.
7745 	 */
7746 	if (hcon->type == LE_LINK &&
7747 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7748 				   bdaddr_dst_type(hcon))) {
7749 		kfree_skb(skb);
7750 		return;
7751 	}
7752 
7753 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7754 
7755 	switch (cid) {
7756 	case L2CAP_CID_SIGNALING:
7757 		l2cap_sig_channel(conn, skb);
7758 		break;
7759 
7760 	case L2CAP_CID_CONN_LESS:
7761 		psm = get_unaligned((__le16 *) skb->data);
7762 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7763 		l2cap_conless_channel(conn, psm, skb);
7764 		break;
7765 
7766 	case L2CAP_CID_LE_SIGNALING:
7767 		l2cap_le_sig_channel(conn, skb);
7768 		break;
7769 
7770 	default:
7771 		l2cap_data_channel(conn, cid, skb);
7772 		break;
7773 	}
7774 }
7775 
7776 static void process_pending_rx(struct work_struct *work)
7777 {
7778 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7779 					       pending_rx_work);
7780 	struct sk_buff *skb;
7781 
7782 	BT_DBG("");
7783 
7784 	while ((skb = skb_dequeue(&conn->pending_rx)))
7785 		l2cap_recv_frame(conn, skb);
7786 }
7787 
7788 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7789 {
7790 	struct l2cap_conn *conn = hcon->l2cap_data;
7791 	struct hci_chan *hchan;
7792 
7793 	if (conn)
7794 		return conn;
7795 
7796 	hchan = hci_chan_create(hcon);
7797 	if (!hchan)
7798 		return NULL;
7799 
7800 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7801 	if (!conn) {
7802 		hci_chan_del(hchan);
7803 		return NULL;
7804 	}
7805 
7806 	kref_init(&conn->ref);
7807 	hcon->l2cap_data = conn;
7808 	conn->hcon = hci_conn_get(hcon);
7809 	conn->hchan = hchan;
7810 
7811 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7812 
7813 	switch (hcon->type) {
7814 	case LE_LINK:
7815 		if (hcon->hdev->le_mtu) {
7816 			conn->mtu = hcon->hdev->le_mtu;
7817 			break;
7818 		}
7819 		fallthrough;
7820 	default:
7821 		conn->mtu = hcon->hdev->acl_mtu;
7822 		break;
7823 	}
7824 
7825 	conn->feat_mask = 0;
7826 
7827 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7828 
7829 	if (hcon->type == ACL_LINK &&
7830 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7831 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7832 
7833 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7834 	    (bredr_sc_enabled(hcon->hdev) ||
7835 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7836 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7837 
7838 	mutex_init(&conn->ident_lock);
7839 	mutex_init(&conn->chan_lock);
7840 
7841 	INIT_LIST_HEAD(&conn->chan_l);
7842 	INIT_LIST_HEAD(&conn->users);
7843 
7844 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7845 
7846 	skb_queue_head_init(&conn->pending_rx);
7847 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7848 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7849 
7850 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7851 
7852 	return conn;
7853 }
7854 
7855 static bool is_valid_psm(u16 psm, u8 dst_type)
7856 {
7857 	if (!psm)
7858 		return false;
7859 
7860 	if (bdaddr_type_is_le(dst_type))
7861 		return (psm <= 0x00ff);
7862 
7863 	/* PSM must be odd and lsb of upper byte must be 0 */
7864 	return ((psm & 0x0101) == 0x0001);
7865 }
7866 
7867 struct l2cap_chan_data {
7868 	struct l2cap_chan *chan;
7869 	struct pid *pid;
7870 	int count;
7871 };
7872 
7873 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7874 {
7875 	struct l2cap_chan_data *d = data;
7876 	struct pid *pid;
7877 
7878 	if (chan == d->chan)
7879 		return;
7880 
7881 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7882 		return;
7883 
7884 	pid = chan->ops->get_peer_pid(chan);
7885 
7886 	/* Only count deferred channels with the same PID/PSM */
7887 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7888 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7889 		return;
7890 
7891 	d->count++;
7892 }
7893 
7894 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7895 		       bdaddr_t *dst, u8 dst_type)
7896 {
7897 	struct l2cap_conn *conn;
7898 	struct hci_conn *hcon;
7899 	struct hci_dev *hdev;
7900 	int err;
7901 
7902 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7903 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7904 
7905 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7906 	if (!hdev)
7907 		return -EHOSTUNREACH;
7908 
7909 	hci_dev_lock(hdev);
7910 
7911 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7912 	    chan->chan_type != L2CAP_CHAN_RAW) {
7913 		err = -EINVAL;
7914 		goto done;
7915 	}
7916 
7917 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7918 		err = -EINVAL;
7919 		goto done;
7920 	}
7921 
7922 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7923 		err = -EINVAL;
7924 		goto done;
7925 	}
7926 
7927 	switch (chan->mode) {
7928 	case L2CAP_MODE_BASIC:
7929 		break;
7930 	case L2CAP_MODE_LE_FLOWCTL:
7931 		break;
7932 	case L2CAP_MODE_EXT_FLOWCTL:
7933 		if (!enable_ecred) {
7934 			err = -EOPNOTSUPP;
7935 			goto done;
7936 		}
7937 		break;
7938 	case L2CAP_MODE_ERTM:
7939 	case L2CAP_MODE_STREAMING:
7940 		if (!disable_ertm)
7941 			break;
7942 		fallthrough;
7943 	default:
7944 		err = -EOPNOTSUPP;
7945 		goto done;
7946 	}
7947 
7948 	switch (chan->state) {
7949 	case BT_CONNECT:
7950 	case BT_CONNECT2:
7951 	case BT_CONFIG:
7952 		/* Already connecting */
7953 		err = 0;
7954 		goto done;
7955 
7956 	case BT_CONNECTED:
7957 		/* Already connected */
7958 		err = -EISCONN;
7959 		goto done;
7960 
7961 	case BT_OPEN:
7962 	case BT_BOUND:
7963 		/* Can connect */
7964 		break;
7965 
7966 	default:
7967 		err = -EBADFD;
7968 		goto done;
7969 	}
7970 
7971 	/* Set destination address and psm */
7972 	bacpy(&chan->dst, dst);
7973 	chan->dst_type = dst_type;
7974 
7975 	chan->psm = psm;
7976 	chan->dcid = cid;
7977 
7978 	if (bdaddr_type_is_le(dst_type)) {
7979 		/* Convert from L2CAP channel address type to HCI address type
7980 		 */
7981 		if (dst_type == BDADDR_LE_PUBLIC)
7982 			dst_type = ADDR_LE_DEV_PUBLIC;
7983 		else
7984 			dst_type = ADDR_LE_DEV_RANDOM;
7985 
7986 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7987 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7988 					      chan->sec_level,
7989 					      HCI_LE_CONN_TIMEOUT,
7990 					      HCI_ROLE_SLAVE);
7991 		else
7992 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7993 						   chan->sec_level,
7994 						   HCI_LE_CONN_TIMEOUT,
7995 						   CONN_REASON_L2CAP_CHAN);
7996 
7997 	} else {
7998 		u8 auth_type = l2cap_get_auth_type(chan);
7999 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8000 				       CONN_REASON_L2CAP_CHAN);
8001 	}
8002 
8003 	if (IS_ERR(hcon)) {
8004 		err = PTR_ERR(hcon);
8005 		goto done;
8006 	}
8007 
8008 	conn = l2cap_conn_add(hcon);
8009 	if (!conn) {
8010 		hci_conn_drop(hcon);
8011 		err = -ENOMEM;
8012 		goto done;
8013 	}
8014 
8015 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8016 		struct l2cap_chan_data data;
8017 
8018 		data.chan = chan;
8019 		data.pid = chan->ops->get_peer_pid(chan);
8020 		data.count = 1;
8021 
8022 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8023 
8024 		/* Check if there isn't too many channels being connected */
8025 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8026 			hci_conn_drop(hcon);
8027 			err = -EPROTO;
8028 			goto done;
8029 		}
8030 	}
8031 
8032 	mutex_lock(&conn->chan_lock);
8033 	l2cap_chan_lock(chan);
8034 
8035 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8036 		hci_conn_drop(hcon);
8037 		err = -EBUSY;
8038 		goto chan_unlock;
8039 	}
8040 
8041 	/* Update source addr of the socket */
8042 	bacpy(&chan->src, &hcon->src);
8043 	chan->src_type = bdaddr_src_type(hcon);
8044 
8045 	__l2cap_chan_add(conn, chan);
8046 
8047 	/* l2cap_chan_add takes its own ref so we can drop this one */
8048 	hci_conn_drop(hcon);
8049 
8050 	l2cap_state_change(chan, BT_CONNECT);
8051 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8052 
8053 	/* Release chan->sport so that it can be reused by other
8054 	 * sockets (as it's only used for listening sockets).
8055 	 */
8056 	write_lock(&chan_list_lock);
8057 	chan->sport = 0;
8058 	write_unlock(&chan_list_lock);
8059 
8060 	if (hcon->state == BT_CONNECTED) {
8061 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8062 			__clear_chan_timer(chan);
8063 			if (l2cap_chan_check_security(chan, true))
8064 				l2cap_state_change(chan, BT_CONNECTED);
8065 		} else
8066 			l2cap_do_start(chan);
8067 	}
8068 
8069 	err = 0;
8070 
8071 chan_unlock:
8072 	l2cap_chan_unlock(chan);
8073 	mutex_unlock(&conn->chan_lock);
8074 done:
8075 	hci_dev_unlock(hdev);
8076 	hci_dev_put(hdev);
8077 	return err;
8078 }
8079 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8080 
8081 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8082 {
8083 	struct l2cap_conn *conn = chan->conn;
8084 	struct {
8085 		struct l2cap_ecred_reconf_req req;
8086 		__le16 scid;
8087 	} pdu;
8088 
8089 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8090 	pdu.req.mps = cpu_to_le16(chan->mps);
8091 	pdu.scid    = cpu_to_le16(chan->scid);
8092 
8093 	chan->ident = l2cap_get_ident(conn);
8094 
8095 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8096 		       sizeof(pdu), &pdu);
8097 }
8098 
8099 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8100 {
8101 	if (chan->imtu > mtu)
8102 		return -EINVAL;
8103 
8104 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8105 
8106 	chan->imtu = mtu;
8107 
8108 	l2cap_ecred_reconfigure(chan);
8109 
8110 	return 0;
8111 }
8112 
8113 /* ---- L2CAP interface with lower layer (HCI) ---- */
8114 
8115 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8116 {
8117 	int exact = 0, lm1 = 0, lm2 = 0;
8118 	struct l2cap_chan *c;
8119 
8120 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8121 
8122 	/* Find listening sockets and check their link_mode */
8123 	read_lock(&chan_list_lock);
8124 	list_for_each_entry(c, &chan_list, global_l) {
8125 		if (c->state != BT_LISTEN)
8126 			continue;
8127 
8128 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8129 			lm1 |= HCI_LM_ACCEPT;
8130 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8131 				lm1 |= HCI_LM_MASTER;
8132 			exact++;
8133 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8134 			lm2 |= HCI_LM_ACCEPT;
8135 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8136 				lm2 |= HCI_LM_MASTER;
8137 		}
8138 	}
8139 	read_unlock(&chan_list_lock);
8140 
8141 	return exact ? lm1 : lm2;
8142 }
8143 
8144 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8145  * from an existing channel in the list or from the beginning of the
8146  * global list (by passing NULL as first parameter).
8147  */
8148 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8149 						  struct hci_conn *hcon)
8150 {
8151 	u8 src_type = bdaddr_src_type(hcon);
8152 
8153 	read_lock(&chan_list_lock);
8154 
8155 	if (c)
8156 		c = list_next_entry(c, global_l);
8157 	else
8158 		c = list_entry(chan_list.next, typeof(*c), global_l);
8159 
8160 	list_for_each_entry_from(c, &chan_list, global_l) {
8161 		if (c->chan_type != L2CAP_CHAN_FIXED)
8162 			continue;
8163 		if (c->state != BT_LISTEN)
8164 			continue;
8165 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8166 			continue;
8167 		if (src_type != c->src_type)
8168 			continue;
8169 
8170 		c = l2cap_chan_hold_unless_zero(c);
8171 		read_unlock(&chan_list_lock);
8172 		return c;
8173 	}
8174 
8175 	read_unlock(&chan_list_lock);
8176 
8177 	return NULL;
8178 }
8179 
8180 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8181 {
8182 	struct hci_dev *hdev = hcon->hdev;
8183 	struct l2cap_conn *conn;
8184 	struct l2cap_chan *pchan;
8185 	u8 dst_type;
8186 
8187 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8188 		return;
8189 
8190 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8191 
8192 	if (status) {
8193 		l2cap_conn_del(hcon, bt_to_errno(status));
8194 		return;
8195 	}
8196 
8197 	conn = l2cap_conn_add(hcon);
8198 	if (!conn)
8199 		return;
8200 
8201 	dst_type = bdaddr_dst_type(hcon);
8202 
8203 	/* If device is blocked, do not create channels for it */
8204 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8205 		return;
8206 
8207 	/* Find fixed channels and notify them of the new connection. We
8208 	 * use multiple individual lookups, continuing each time where
8209 	 * we left off, because the list lock would prevent calling the
8210 	 * potentially sleeping l2cap_chan_lock() function.
8211 	 */
8212 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8213 	while (pchan) {
8214 		struct l2cap_chan *chan, *next;
8215 
8216 		/* Client fixed channels should override server ones */
8217 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8218 			goto next;
8219 
8220 		l2cap_chan_lock(pchan);
8221 		chan = pchan->ops->new_connection(pchan);
8222 		if (chan) {
8223 			bacpy(&chan->src, &hcon->src);
8224 			bacpy(&chan->dst, &hcon->dst);
8225 			chan->src_type = bdaddr_src_type(hcon);
8226 			chan->dst_type = dst_type;
8227 
8228 			__l2cap_chan_add(conn, chan);
8229 		}
8230 
8231 		l2cap_chan_unlock(pchan);
8232 next:
8233 		next = l2cap_global_fixed_chan(pchan, hcon);
8234 		l2cap_chan_put(pchan);
8235 		pchan = next;
8236 	}
8237 
8238 	l2cap_conn_ready(conn);
8239 }
8240 
8241 int l2cap_disconn_ind(struct hci_conn *hcon)
8242 {
8243 	struct l2cap_conn *conn = hcon->l2cap_data;
8244 
8245 	BT_DBG("hcon %p", hcon);
8246 
8247 	if (!conn)
8248 		return HCI_ERROR_REMOTE_USER_TERM;
8249 	return conn->disc_reason;
8250 }
8251 
8252 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8253 {
8254 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8255 		return;
8256 
8257 	BT_DBG("hcon %p reason %d", hcon, reason);
8258 
8259 	l2cap_conn_del(hcon, bt_to_errno(reason));
8260 }
8261 
8262 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8263 {
8264 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8265 		return;
8266 
8267 	if (encrypt == 0x00) {
8268 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8269 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8270 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8271 			   chan->sec_level == BT_SECURITY_FIPS)
8272 			l2cap_chan_close(chan, ECONNREFUSED);
8273 	} else {
8274 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8275 			__clear_chan_timer(chan);
8276 	}
8277 }
8278 
8279 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8280 {
8281 	struct l2cap_conn *conn = hcon->l2cap_data;
8282 	struct l2cap_chan *chan;
8283 
8284 	if (!conn)
8285 		return;
8286 
8287 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8288 
8289 	mutex_lock(&conn->chan_lock);
8290 
8291 	list_for_each_entry(chan, &conn->chan_l, list) {
8292 		l2cap_chan_lock(chan);
8293 
8294 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8295 		       state_to_string(chan->state));
8296 
8297 		if (chan->scid == L2CAP_CID_A2MP) {
8298 			l2cap_chan_unlock(chan);
8299 			continue;
8300 		}
8301 
8302 		if (!status && encrypt)
8303 			chan->sec_level = hcon->sec_level;
8304 
8305 		if (!__l2cap_no_conn_pending(chan)) {
8306 			l2cap_chan_unlock(chan);
8307 			continue;
8308 		}
8309 
8310 		if (!status && (chan->state == BT_CONNECTED ||
8311 				chan->state == BT_CONFIG)) {
8312 			chan->ops->resume(chan);
8313 			l2cap_check_encryption(chan, encrypt);
8314 			l2cap_chan_unlock(chan);
8315 			continue;
8316 		}
8317 
8318 		if (chan->state == BT_CONNECT) {
8319 			if (!status && l2cap_check_enc_key_size(hcon))
8320 				l2cap_start_connection(chan);
8321 			else
8322 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8323 		} else if (chan->state == BT_CONNECT2 &&
8324 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8325 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8326 			struct l2cap_conn_rsp rsp;
8327 			__u16 res, stat;
8328 
8329 			if (!status && l2cap_check_enc_key_size(hcon)) {
8330 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8331 					res = L2CAP_CR_PEND;
8332 					stat = L2CAP_CS_AUTHOR_PEND;
8333 					chan->ops->defer(chan);
8334 				} else {
8335 					l2cap_state_change(chan, BT_CONFIG);
8336 					res = L2CAP_CR_SUCCESS;
8337 					stat = L2CAP_CS_NO_INFO;
8338 				}
8339 			} else {
8340 				l2cap_state_change(chan, BT_DISCONN);
8341 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8342 				res = L2CAP_CR_SEC_BLOCK;
8343 				stat = L2CAP_CS_NO_INFO;
8344 			}
8345 
8346 			rsp.scid   = cpu_to_le16(chan->dcid);
8347 			rsp.dcid   = cpu_to_le16(chan->scid);
8348 			rsp.result = cpu_to_le16(res);
8349 			rsp.status = cpu_to_le16(stat);
8350 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8351 				       sizeof(rsp), &rsp);
8352 
8353 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8354 			    res == L2CAP_CR_SUCCESS) {
8355 				char buf[128];
8356 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8357 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8358 					       L2CAP_CONF_REQ,
8359 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8360 					       buf);
8361 				chan->num_conf_req++;
8362 			}
8363 		}
8364 
8365 		l2cap_chan_unlock(chan);
8366 	}
8367 
8368 	mutex_unlock(&conn->chan_lock);
8369 }
8370 
8371 /* Append fragment into frame respecting the maximum len of rx_skb */
8372 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8373 			   u16 len)
8374 {
8375 	if (!conn->rx_skb) {
8376 		/* Allocate skb for the complete frame (with header) */
8377 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8378 		if (!conn->rx_skb)
8379 			return -ENOMEM;
8380 		/* Init rx_len */
8381 		conn->rx_len = len;
8382 	}
8383 
8384 	/* Copy as much as the rx_skb can hold */
8385 	len = min_t(u16, len, skb->len);
8386 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8387 	skb_pull(skb, len);
8388 	conn->rx_len -= len;
8389 
8390 	return len;
8391 }
8392 
8393 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8394 {
8395 	struct sk_buff *rx_skb;
8396 	int len;
8397 
8398 	/* Append just enough to complete the header */
8399 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8400 
8401 	/* If header could not be read just continue */
8402 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8403 		return len;
8404 
8405 	rx_skb = conn->rx_skb;
8406 	len = get_unaligned_le16(rx_skb->data);
8407 
8408 	/* Check if rx_skb has enough space to received all fragments */
8409 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8410 		/* Update expected len */
8411 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8412 		return L2CAP_LEN_SIZE;
8413 	}
8414 
8415 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8416 	 * fit all fragments.
8417 	 */
8418 	conn->rx_skb = NULL;
8419 
8420 	/* Reallocates rx_skb using the exact expected length */
8421 	len = l2cap_recv_frag(conn, rx_skb,
8422 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8423 	kfree_skb(rx_skb);
8424 
8425 	return len;
8426 }
8427 
8428 static void l2cap_recv_reset(struct l2cap_conn *conn)
8429 {
8430 	kfree_skb(conn->rx_skb);
8431 	conn->rx_skb = NULL;
8432 	conn->rx_len = 0;
8433 }
8434 
8435 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8436 {
8437 	struct l2cap_conn *conn = hcon->l2cap_data;
8438 	int len;
8439 
8440 	/* For AMP controller do not create l2cap conn */
8441 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8442 		goto drop;
8443 
8444 	if (!conn)
8445 		conn = l2cap_conn_add(hcon);
8446 
8447 	if (!conn)
8448 		goto drop;
8449 
8450 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8451 
8452 	switch (flags) {
8453 	case ACL_START:
8454 	case ACL_START_NO_FLUSH:
8455 	case ACL_COMPLETE:
8456 		if (conn->rx_skb) {
8457 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8458 			l2cap_recv_reset(conn);
8459 			l2cap_conn_unreliable(conn, ECOMM);
8460 		}
8461 
8462 		/* Start fragment may not contain the L2CAP length so just
8463 		 * copy the initial byte when that happens and use conn->mtu as
8464 		 * expected length.
8465 		 */
8466 		if (skb->len < L2CAP_LEN_SIZE) {
8467 			l2cap_recv_frag(conn, skb, conn->mtu);
8468 			break;
8469 		}
8470 
8471 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8472 
8473 		if (len == skb->len) {
8474 			/* Complete frame received */
8475 			l2cap_recv_frame(conn, skb);
8476 			return;
8477 		}
8478 
8479 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8480 
8481 		if (skb->len > len) {
8482 			BT_ERR("Frame is too long (len %u, expected len %d)",
8483 			       skb->len, len);
8484 			l2cap_conn_unreliable(conn, ECOMM);
8485 			goto drop;
8486 		}
8487 
8488 		/* Append fragment into frame (with header) */
8489 		if (l2cap_recv_frag(conn, skb, len) < 0)
8490 			goto drop;
8491 
8492 		break;
8493 
8494 	case ACL_CONT:
8495 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8496 
8497 		if (!conn->rx_skb) {
8498 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8499 			l2cap_conn_unreliable(conn, ECOMM);
8500 			goto drop;
8501 		}
8502 
8503 		/* Complete the L2CAP length if it has not been read */
8504 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8505 			if (l2cap_recv_len(conn, skb) < 0) {
8506 				l2cap_conn_unreliable(conn, ECOMM);
8507 				goto drop;
8508 			}
8509 
8510 			/* Header still could not be read just continue */
8511 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8512 				break;
8513 		}
8514 
8515 		if (skb->len > conn->rx_len) {
8516 			BT_ERR("Fragment is too long (len %u, expected %u)",
8517 			       skb->len, conn->rx_len);
8518 			l2cap_recv_reset(conn);
8519 			l2cap_conn_unreliable(conn, ECOMM);
8520 			goto drop;
8521 		}
8522 
8523 		/* Append fragment into frame (with header) */
8524 		l2cap_recv_frag(conn, skb, skb->len);
8525 
8526 		if (!conn->rx_len) {
8527 			/* Complete frame received. l2cap_recv_frame
8528 			 * takes ownership of the skb so set the global
8529 			 * rx_skb pointer to NULL first.
8530 			 */
8531 			struct sk_buff *rx_skb = conn->rx_skb;
8532 			conn->rx_skb = NULL;
8533 			l2cap_recv_frame(conn, rx_skb);
8534 		}
8535 		break;
8536 	}
8537 
8538 drop:
8539 	kfree_skb(skb);
8540 }
8541 
8542 static struct hci_cb l2cap_cb = {
8543 	.name		= "L2CAP",
8544 	.connect_cfm	= l2cap_connect_cfm,
8545 	.disconn_cfm	= l2cap_disconn_cfm,
8546 	.security_cfm	= l2cap_security_cfm,
8547 };
8548 
8549 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8550 {
8551 	struct l2cap_chan *c;
8552 
8553 	read_lock(&chan_list_lock);
8554 
8555 	list_for_each_entry(c, &chan_list, global_l) {
8556 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8557 			   &c->src, c->src_type, &c->dst, c->dst_type,
8558 			   c->state, __le16_to_cpu(c->psm),
8559 			   c->scid, c->dcid, c->imtu, c->omtu,
8560 			   c->sec_level, c->mode);
8561 	}
8562 
8563 	read_unlock(&chan_list_lock);
8564 
8565 	return 0;
8566 }
8567 
8568 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8569 
8570 static struct dentry *l2cap_debugfs;
8571 
8572 int __init l2cap_init(void)
8573 {
8574 	int err;
8575 
8576 	err = l2cap_init_sockets();
8577 	if (err < 0)
8578 		return err;
8579 
8580 	hci_register_cb(&l2cap_cb);
8581 
8582 	if (IS_ERR_OR_NULL(bt_debugfs))
8583 		return 0;
8584 
8585 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8586 					    NULL, &l2cap_debugfs_fops);
8587 
8588 	return 0;
8589 }
8590 
8591 void l2cap_exit(void)
8592 {
8593 	debugfs_remove(l2cap_debugfs);
8594 	hci_unregister_cb(&l2cap_cb);
8595 	l2cap_cleanup_sockets();
8596 }
8597 
8598 module_param(disable_ertm, bool, 0644);
8599 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8600 
8601 module_param(enable_ecred, bool, 0644);
8602 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8603