xref: /linux/net/bluetooth/hci_core.c (revision e77f43d5)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50 
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54 
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58 
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62 
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65 
hci_scan_req(struct hci_request * req,unsigned long opt)66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68 	__u8 scan = opt;
69 
70 	BT_DBG("%s %x", req->hdev->name, scan);
71 
72 	/* Inquiry and Page scans */
73 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74 	return 0;
75 }
76 
hci_auth_req(struct hci_request * req,unsigned long opt)77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79 	__u8 auth = opt;
80 
81 	BT_DBG("%s %x", req->hdev->name, auth);
82 
83 	/* Authentication */
84 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85 	return 0;
86 }
87 
hci_encrypt_req(struct hci_request * req,unsigned long opt)88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90 	__u8 encrypt = opt;
91 
92 	BT_DBG("%s %x", req->hdev->name, encrypt);
93 
94 	/* Encryption */
95 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96 	return 0;
97 }
98 
hci_linkpol_req(struct hci_request * req,unsigned long opt)99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101 	__le16 policy = cpu_to_le16(opt);
102 
103 	BT_DBG("%s %x", req->hdev->name, policy);
104 
105 	/* Default link policy */
106 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107 	return 0;
108 }
109 
110 /* Get HCI device by index.
111  * Device is held on return. */
hci_dev_get(int index)112 struct hci_dev *hci_dev_get(int index)
113 {
114 	struct hci_dev *hdev = NULL, *d;
115 
116 	BT_DBG("%d", index);
117 
118 	if (index < 0)
119 		return NULL;
120 
121 	read_lock(&hci_dev_list_lock);
122 	list_for_each_entry(d, &hci_dev_list, list) {
123 		if (d->id == index) {
124 			hdev = hci_dev_hold(d);
125 			break;
126 		}
127 	}
128 	read_unlock(&hci_dev_list_lock);
129 	return hdev;
130 }
131 
132 /* ---- Inquiry support ---- */
133 
hci_discovery_active(struct hci_dev * hdev)134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136 	struct discovery_state *discov = &hdev->discovery;
137 
138 	switch (discov->state) {
139 	case DISCOVERY_FINDING:
140 	case DISCOVERY_RESOLVING:
141 		return true;
142 
143 	default:
144 		return false;
145 	}
146 }
147 
hci_discovery_set_state(struct hci_dev * hdev,int state)148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150 	int old_state = hdev->discovery.state;
151 
152 	if (old_state == state)
153 		return;
154 
155 	hdev->discovery.state = state;
156 
157 	switch (state) {
158 	case DISCOVERY_STOPPED:
159 		hci_update_passive_scan(hdev);
160 
161 		if (old_state != DISCOVERY_STARTING)
162 			mgmt_discovering(hdev, 0);
163 		break;
164 	case DISCOVERY_STARTING:
165 		break;
166 	case DISCOVERY_FINDING:
167 		/* If discovery was not started then it was initiated by the
168 		 * MGMT interface so no MGMT event shall be generated either
169 		 */
170 		if (old_state != DISCOVERY_STARTING) {
171 			hdev->discovery.state = old_state;
172 			return;
173 		}
174 		mgmt_discovering(hdev, 1);
175 		break;
176 	case DISCOVERY_RESOLVING:
177 		break;
178 	case DISCOVERY_STOPPING:
179 		break;
180 	}
181 
182 	bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
183 }
184 
hci_inquiry_cache_flush(struct hci_dev * hdev)185 void hci_inquiry_cache_flush(struct hci_dev *hdev)
186 {
187 	struct discovery_state *cache = &hdev->discovery;
188 	struct inquiry_entry *p, *n;
189 
190 	list_for_each_entry_safe(p, n, &cache->all, all) {
191 		list_del(&p->all);
192 		kfree(p);
193 	}
194 
195 	INIT_LIST_HEAD(&cache->unknown);
196 	INIT_LIST_HEAD(&cache->resolve);
197 }
198 
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)199 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
200 					       bdaddr_t *bdaddr)
201 {
202 	struct discovery_state *cache = &hdev->discovery;
203 	struct inquiry_entry *e;
204 
205 	BT_DBG("cache %p, %pMR", cache, bdaddr);
206 
207 	list_for_each_entry(e, &cache->all, all) {
208 		if (!bacmp(&e->data.bdaddr, bdaddr))
209 			return e;
210 	}
211 
212 	return NULL;
213 }
214 
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)215 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
216 						       bdaddr_t *bdaddr)
217 {
218 	struct discovery_state *cache = &hdev->discovery;
219 	struct inquiry_entry *e;
220 
221 	BT_DBG("cache %p, %pMR", cache, bdaddr);
222 
223 	list_for_each_entry(e, &cache->unknown, list) {
224 		if (!bacmp(&e->data.bdaddr, bdaddr))
225 			return e;
226 	}
227 
228 	return NULL;
229 }
230 
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)231 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
232 						       bdaddr_t *bdaddr,
233 						       int state)
234 {
235 	struct discovery_state *cache = &hdev->discovery;
236 	struct inquiry_entry *e;
237 
238 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
239 
240 	list_for_each_entry(e, &cache->resolve, list) {
241 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
242 			return e;
243 		if (!bacmp(&e->data.bdaddr, bdaddr))
244 			return e;
245 	}
246 
247 	return NULL;
248 }
249 
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)250 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
251 				      struct inquiry_entry *ie)
252 {
253 	struct discovery_state *cache = &hdev->discovery;
254 	struct list_head *pos = &cache->resolve;
255 	struct inquiry_entry *p;
256 
257 	list_del(&ie->list);
258 
259 	list_for_each_entry(p, &cache->resolve, list) {
260 		if (p->name_state != NAME_PENDING &&
261 		    abs(p->data.rssi) >= abs(ie->data.rssi))
262 			break;
263 		pos = &p->list;
264 	}
265 
266 	list_add(&ie->list, pos);
267 }
268 
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)269 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
270 			     bool name_known)
271 {
272 	struct discovery_state *cache = &hdev->discovery;
273 	struct inquiry_entry *ie;
274 	u32 flags = 0;
275 
276 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
277 
278 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
279 
280 	if (!data->ssp_mode)
281 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
282 
283 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
284 	if (ie) {
285 		if (!ie->data.ssp_mode)
286 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
287 
288 		if (ie->name_state == NAME_NEEDED &&
289 		    data->rssi != ie->data.rssi) {
290 			ie->data.rssi = data->rssi;
291 			hci_inquiry_cache_update_resolve(hdev, ie);
292 		}
293 
294 		goto update;
295 	}
296 
297 	/* Entry not in the cache. Add new one. */
298 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
299 	if (!ie) {
300 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
301 		goto done;
302 	}
303 
304 	list_add(&ie->all, &cache->all);
305 
306 	if (name_known) {
307 		ie->name_state = NAME_KNOWN;
308 	} else {
309 		ie->name_state = NAME_NOT_KNOWN;
310 		list_add(&ie->list, &cache->unknown);
311 	}
312 
313 update:
314 	if (name_known && ie->name_state != NAME_KNOWN &&
315 	    ie->name_state != NAME_PENDING) {
316 		ie->name_state = NAME_KNOWN;
317 		list_del(&ie->list);
318 	}
319 
320 	memcpy(&ie->data, data, sizeof(*data));
321 	ie->timestamp = jiffies;
322 	cache->timestamp = jiffies;
323 
324 	if (ie->name_state == NAME_NOT_KNOWN)
325 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
326 
327 done:
328 	return flags;
329 }
330 
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)331 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
332 {
333 	struct discovery_state *cache = &hdev->discovery;
334 	struct inquiry_info *info = (struct inquiry_info *) buf;
335 	struct inquiry_entry *e;
336 	int copied = 0;
337 
338 	list_for_each_entry(e, &cache->all, all) {
339 		struct inquiry_data *data = &e->data;
340 
341 		if (copied >= num)
342 			break;
343 
344 		bacpy(&info->bdaddr, &data->bdaddr);
345 		info->pscan_rep_mode	= data->pscan_rep_mode;
346 		info->pscan_period_mode	= data->pscan_period_mode;
347 		info->pscan_mode	= data->pscan_mode;
348 		memcpy(info->dev_class, data->dev_class, 3);
349 		info->clock_offset	= data->clock_offset;
350 
351 		info++;
352 		copied++;
353 	}
354 
355 	BT_DBG("cache %p, copied %d", cache, copied);
356 	return copied;
357 }
358 
hci_inq_req(struct hci_request * req,unsigned long opt)359 static int hci_inq_req(struct hci_request *req, unsigned long opt)
360 {
361 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
362 	struct hci_dev *hdev = req->hdev;
363 	struct hci_cp_inquiry cp;
364 
365 	BT_DBG("%s", hdev->name);
366 
367 	if (test_bit(HCI_INQUIRY, &hdev->flags))
368 		return 0;
369 
370 	/* Start Inquiry */
371 	memcpy(&cp.lap, &ir->lap, 3);
372 	cp.length  = ir->length;
373 	cp.num_rsp = ir->num_rsp;
374 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
375 
376 	return 0;
377 }
378 
hci_inquiry(void __user * arg)379 int hci_inquiry(void __user *arg)
380 {
381 	__u8 __user *ptr = arg;
382 	struct hci_inquiry_req ir;
383 	struct hci_dev *hdev;
384 	int err = 0, do_inquiry = 0, max_rsp;
385 	long timeo;
386 	__u8 *buf;
387 
388 	if (copy_from_user(&ir, ptr, sizeof(ir)))
389 		return -EFAULT;
390 
391 	hdev = hci_dev_get(ir.dev_id);
392 	if (!hdev)
393 		return -ENODEV;
394 
395 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
396 		err = -EBUSY;
397 		goto done;
398 	}
399 
400 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
401 		err = -EOPNOTSUPP;
402 		goto done;
403 	}
404 
405 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
406 		err = -EOPNOTSUPP;
407 		goto done;
408 	}
409 
410 	/* Restrict maximum inquiry length to 60 seconds */
411 	if (ir.length > 60) {
412 		err = -EINVAL;
413 		goto done;
414 	}
415 
416 	hci_dev_lock(hdev);
417 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
418 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
419 		hci_inquiry_cache_flush(hdev);
420 		do_inquiry = 1;
421 	}
422 	hci_dev_unlock(hdev);
423 
424 	timeo = ir.length * msecs_to_jiffies(2000);
425 
426 	if (do_inquiry) {
427 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
428 				   timeo, NULL);
429 		if (err < 0)
430 			goto done;
431 
432 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
433 		 * cleared). If it is interrupted by a signal, return -EINTR.
434 		 */
435 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
436 				TASK_INTERRUPTIBLE)) {
437 			err = -EINTR;
438 			goto done;
439 		}
440 	}
441 
442 	/* for unlimited number of responses we will use buffer with
443 	 * 255 entries
444 	 */
445 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
446 
447 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
448 	 * copy it to the user space.
449 	 */
450 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
451 	if (!buf) {
452 		err = -ENOMEM;
453 		goto done;
454 	}
455 
456 	hci_dev_lock(hdev);
457 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
458 	hci_dev_unlock(hdev);
459 
460 	BT_DBG("num_rsp %d", ir.num_rsp);
461 
462 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
463 		ptr += sizeof(ir);
464 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
465 				 ir.num_rsp))
466 			err = -EFAULT;
467 	} else
468 		err = -EFAULT;
469 
470 	kfree(buf);
471 
472 done:
473 	hci_dev_put(hdev);
474 	return err;
475 }
476 
hci_dev_do_open(struct hci_dev * hdev)477 static int hci_dev_do_open(struct hci_dev *hdev)
478 {
479 	int ret = 0;
480 
481 	BT_DBG("%s %p", hdev->name, hdev);
482 
483 	hci_req_sync_lock(hdev);
484 
485 	ret = hci_dev_open_sync(hdev);
486 
487 	hci_req_sync_unlock(hdev);
488 	return ret;
489 }
490 
491 /* ---- HCI ioctl helpers ---- */
492 
hci_dev_open(__u16 dev)493 int hci_dev_open(__u16 dev)
494 {
495 	struct hci_dev *hdev;
496 	int err;
497 
498 	hdev = hci_dev_get(dev);
499 	if (!hdev)
500 		return -ENODEV;
501 
502 	/* Devices that are marked as unconfigured can only be powered
503 	 * up as user channel. Trying to bring them up as normal devices
504 	 * will result into a failure. Only user channel operation is
505 	 * possible.
506 	 *
507 	 * When this function is called for a user channel, the flag
508 	 * HCI_USER_CHANNEL will be set first before attempting to
509 	 * open the device.
510 	 */
511 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
512 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
513 		err = -EOPNOTSUPP;
514 		goto done;
515 	}
516 
517 	/* We need to ensure that no other power on/off work is pending
518 	 * before proceeding to call hci_dev_do_open. This is
519 	 * particularly important if the setup procedure has not yet
520 	 * completed.
521 	 */
522 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
523 		cancel_delayed_work(&hdev->power_off);
524 
525 	/* After this call it is guaranteed that the setup procedure
526 	 * has finished. This means that error conditions like RFKILL
527 	 * or no valid public or static random address apply.
528 	 */
529 	flush_workqueue(hdev->req_workqueue);
530 
531 	/* For controllers not using the management interface and that
532 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
533 	 * so that pairing works for them. Once the management interface
534 	 * is in use this bit will be cleared again and userspace has
535 	 * to explicitly enable it.
536 	 */
537 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
538 	    !hci_dev_test_flag(hdev, HCI_MGMT))
539 		hci_dev_set_flag(hdev, HCI_BONDABLE);
540 
541 	err = hci_dev_do_open(hdev);
542 
543 done:
544 	hci_dev_put(hdev);
545 	return err;
546 }
547 
hci_dev_do_close(struct hci_dev * hdev)548 int hci_dev_do_close(struct hci_dev *hdev)
549 {
550 	int err;
551 
552 	BT_DBG("%s %p", hdev->name, hdev);
553 
554 	hci_req_sync_lock(hdev);
555 
556 	err = hci_dev_close_sync(hdev);
557 
558 	hci_req_sync_unlock(hdev);
559 
560 	return err;
561 }
562 
hci_dev_close(__u16 dev)563 int hci_dev_close(__u16 dev)
564 {
565 	struct hci_dev *hdev;
566 	int err;
567 
568 	hdev = hci_dev_get(dev);
569 	if (!hdev)
570 		return -ENODEV;
571 
572 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
573 		err = -EBUSY;
574 		goto done;
575 	}
576 
577 	cancel_work_sync(&hdev->power_on);
578 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
579 		cancel_delayed_work(&hdev->power_off);
580 
581 	err = hci_dev_do_close(hdev);
582 
583 done:
584 	hci_dev_put(hdev);
585 	return err;
586 }
587 
hci_dev_do_reset(struct hci_dev * hdev)588 static int hci_dev_do_reset(struct hci_dev *hdev)
589 {
590 	int ret;
591 
592 	BT_DBG("%s %p", hdev->name, hdev);
593 
594 	hci_req_sync_lock(hdev);
595 
596 	/* Drop queues */
597 	skb_queue_purge(&hdev->rx_q);
598 	skb_queue_purge(&hdev->cmd_q);
599 
600 	/* Cancel these to avoid queueing non-chained pending work */
601 	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
602 	/* Wait for
603 	 *
604 	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
605 	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
606 	 *
607 	 * inside RCU section to see the flag or complete scheduling.
608 	 */
609 	synchronize_rcu();
610 	/* Explicitly cancel works in case scheduled after setting the flag. */
611 	cancel_delayed_work(&hdev->cmd_timer);
612 	cancel_delayed_work(&hdev->ncmd_timer);
613 
614 	/* Avoid potential lockdep warnings from the *_flush() calls by
615 	 * ensuring the workqueue is empty up front.
616 	 */
617 	drain_workqueue(hdev->workqueue);
618 
619 	hci_dev_lock(hdev);
620 	hci_inquiry_cache_flush(hdev);
621 	hci_conn_hash_flush(hdev);
622 	hci_dev_unlock(hdev);
623 
624 	if (hdev->flush)
625 		hdev->flush(hdev);
626 
627 	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
628 
629 	atomic_set(&hdev->cmd_cnt, 1);
630 	hdev->acl_cnt = 0;
631 	hdev->sco_cnt = 0;
632 	hdev->le_cnt = 0;
633 	hdev->iso_cnt = 0;
634 
635 	ret = hci_reset_sync(hdev);
636 
637 	hci_req_sync_unlock(hdev);
638 	return ret;
639 }
640 
hci_dev_reset(__u16 dev)641 int hci_dev_reset(__u16 dev)
642 {
643 	struct hci_dev *hdev;
644 	int err;
645 
646 	hdev = hci_dev_get(dev);
647 	if (!hdev)
648 		return -ENODEV;
649 
650 	if (!test_bit(HCI_UP, &hdev->flags)) {
651 		err = -ENETDOWN;
652 		goto done;
653 	}
654 
655 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
656 		err = -EBUSY;
657 		goto done;
658 	}
659 
660 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
661 		err = -EOPNOTSUPP;
662 		goto done;
663 	}
664 
665 	err = hci_dev_do_reset(hdev);
666 
667 done:
668 	hci_dev_put(hdev);
669 	return err;
670 }
671 
hci_dev_reset_stat(__u16 dev)672 int hci_dev_reset_stat(__u16 dev)
673 {
674 	struct hci_dev *hdev;
675 	int ret = 0;
676 
677 	hdev = hci_dev_get(dev);
678 	if (!hdev)
679 		return -ENODEV;
680 
681 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
682 		ret = -EBUSY;
683 		goto done;
684 	}
685 
686 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
687 		ret = -EOPNOTSUPP;
688 		goto done;
689 	}
690 
691 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
692 
693 done:
694 	hci_dev_put(hdev);
695 	return ret;
696 }
697 
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)698 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
699 {
700 	bool conn_changed, discov_changed;
701 
702 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
703 
704 	if ((scan & SCAN_PAGE))
705 		conn_changed = !hci_dev_test_and_set_flag(hdev,
706 							  HCI_CONNECTABLE);
707 	else
708 		conn_changed = hci_dev_test_and_clear_flag(hdev,
709 							   HCI_CONNECTABLE);
710 
711 	if ((scan & SCAN_INQUIRY)) {
712 		discov_changed = !hci_dev_test_and_set_flag(hdev,
713 							    HCI_DISCOVERABLE);
714 	} else {
715 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
716 		discov_changed = hci_dev_test_and_clear_flag(hdev,
717 							     HCI_DISCOVERABLE);
718 	}
719 
720 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
721 		return;
722 
723 	if (conn_changed || discov_changed) {
724 		/* In case this was disabled through mgmt */
725 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
726 
727 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
728 			hci_update_adv_data(hdev, hdev->cur_adv_instance);
729 
730 		mgmt_new_settings(hdev);
731 	}
732 }
733 
hci_dev_cmd(unsigned int cmd,void __user * arg)734 int hci_dev_cmd(unsigned int cmd, void __user *arg)
735 {
736 	struct hci_dev *hdev;
737 	struct hci_dev_req dr;
738 	int err = 0;
739 
740 	if (copy_from_user(&dr, arg, sizeof(dr)))
741 		return -EFAULT;
742 
743 	hdev = hci_dev_get(dr.dev_id);
744 	if (!hdev)
745 		return -ENODEV;
746 
747 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
748 		err = -EBUSY;
749 		goto done;
750 	}
751 
752 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
753 		err = -EOPNOTSUPP;
754 		goto done;
755 	}
756 
757 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
758 		err = -EOPNOTSUPP;
759 		goto done;
760 	}
761 
762 	switch (cmd) {
763 	case HCISETAUTH:
764 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
765 				   HCI_INIT_TIMEOUT, NULL);
766 		break;
767 
768 	case HCISETENCRYPT:
769 		if (!lmp_encrypt_capable(hdev)) {
770 			err = -EOPNOTSUPP;
771 			break;
772 		}
773 
774 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
775 			/* Auth must be enabled first */
776 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
777 					   HCI_INIT_TIMEOUT, NULL);
778 			if (err)
779 				break;
780 		}
781 
782 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
783 				   HCI_INIT_TIMEOUT, NULL);
784 		break;
785 
786 	case HCISETSCAN:
787 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
788 				   HCI_INIT_TIMEOUT, NULL);
789 
790 		/* Ensure that the connectable and discoverable states
791 		 * get correctly modified as this was a non-mgmt change.
792 		 */
793 		if (!err)
794 			hci_update_passive_scan_state(hdev, dr.dev_opt);
795 		break;
796 
797 	case HCISETLINKPOL:
798 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
799 				   HCI_INIT_TIMEOUT, NULL);
800 		break;
801 
802 	case HCISETLINKMODE:
803 		hdev->link_mode = ((__u16) dr.dev_opt) &
804 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
805 		break;
806 
807 	case HCISETPTYPE:
808 		if (hdev->pkt_type == (__u16) dr.dev_opt)
809 			break;
810 
811 		hdev->pkt_type = (__u16) dr.dev_opt;
812 		mgmt_phy_configuration_changed(hdev, NULL);
813 		break;
814 
815 	case HCISETACLMTU:
816 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
817 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
818 		break;
819 
820 	case HCISETSCOMTU:
821 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
822 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
823 		break;
824 
825 	default:
826 		err = -EINVAL;
827 		break;
828 	}
829 
830 done:
831 	hci_dev_put(hdev);
832 	return err;
833 }
834 
hci_get_dev_list(void __user * arg)835 int hci_get_dev_list(void __user *arg)
836 {
837 	struct hci_dev *hdev;
838 	struct hci_dev_list_req *dl;
839 	struct hci_dev_req *dr;
840 	int n = 0, size, err;
841 	__u16 dev_num;
842 
843 	if (get_user(dev_num, (__u16 __user *) arg))
844 		return -EFAULT;
845 
846 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
847 		return -EINVAL;
848 
849 	size = sizeof(*dl) + dev_num * sizeof(*dr);
850 
851 	dl = kzalloc(size, GFP_KERNEL);
852 	if (!dl)
853 		return -ENOMEM;
854 
855 	dr = dl->dev_req;
856 
857 	read_lock(&hci_dev_list_lock);
858 	list_for_each_entry(hdev, &hci_dev_list, list) {
859 		unsigned long flags = hdev->flags;
860 
861 		/* When the auto-off is configured it means the transport
862 		 * is running, but in that case still indicate that the
863 		 * device is actually down.
864 		 */
865 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
866 			flags &= ~BIT(HCI_UP);
867 
868 		(dr + n)->dev_id  = hdev->id;
869 		(dr + n)->dev_opt = flags;
870 
871 		if (++n >= dev_num)
872 			break;
873 	}
874 	read_unlock(&hci_dev_list_lock);
875 
876 	dl->dev_num = n;
877 	size = sizeof(*dl) + n * sizeof(*dr);
878 
879 	err = copy_to_user(arg, dl, size);
880 	kfree(dl);
881 
882 	return err ? -EFAULT : 0;
883 }
884 
hci_get_dev_info(void __user * arg)885 int hci_get_dev_info(void __user *arg)
886 {
887 	struct hci_dev *hdev;
888 	struct hci_dev_info di;
889 	unsigned long flags;
890 	int err = 0;
891 
892 	if (copy_from_user(&di, arg, sizeof(di)))
893 		return -EFAULT;
894 
895 	hdev = hci_dev_get(di.dev_id);
896 	if (!hdev)
897 		return -ENODEV;
898 
899 	/* When the auto-off is configured it means the transport
900 	 * is running, but in that case still indicate that the
901 	 * device is actually down.
902 	 */
903 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
904 		flags = hdev->flags & ~BIT(HCI_UP);
905 	else
906 		flags = hdev->flags;
907 
908 	strscpy(di.name, hdev->name, sizeof(di.name));
909 	di.bdaddr   = hdev->bdaddr;
910 	di.type     = (hdev->bus & 0x0f);
911 	di.flags    = flags;
912 	di.pkt_type = hdev->pkt_type;
913 	if (lmp_bredr_capable(hdev)) {
914 		di.acl_mtu  = hdev->acl_mtu;
915 		di.acl_pkts = hdev->acl_pkts;
916 		di.sco_mtu  = hdev->sco_mtu;
917 		di.sco_pkts = hdev->sco_pkts;
918 	} else {
919 		di.acl_mtu  = hdev->le_mtu;
920 		di.acl_pkts = hdev->le_pkts;
921 		di.sco_mtu  = 0;
922 		di.sco_pkts = 0;
923 	}
924 	di.link_policy = hdev->link_policy;
925 	di.link_mode   = hdev->link_mode;
926 
927 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
928 	memcpy(&di.features, &hdev->features, sizeof(di.features));
929 
930 	if (copy_to_user(arg, &di, sizeof(di)))
931 		err = -EFAULT;
932 
933 	hci_dev_put(hdev);
934 
935 	return err;
936 }
937 
938 /* ---- Interface to HCI drivers ---- */
939 
hci_dev_do_poweroff(struct hci_dev * hdev)940 static int hci_dev_do_poweroff(struct hci_dev *hdev)
941 {
942 	int err;
943 
944 	BT_DBG("%s %p", hdev->name, hdev);
945 
946 	hci_req_sync_lock(hdev);
947 
948 	err = hci_set_powered_sync(hdev, false);
949 
950 	hci_req_sync_unlock(hdev);
951 
952 	return err;
953 }
954 
hci_rfkill_set_block(void * data,bool blocked)955 static int hci_rfkill_set_block(void *data, bool blocked)
956 {
957 	struct hci_dev *hdev = data;
958 	int err;
959 
960 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
961 
962 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
963 		return -EBUSY;
964 
965 	if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
966 		return 0;
967 
968 	if (blocked) {
969 		hci_dev_set_flag(hdev, HCI_RFKILLED);
970 
971 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
972 		    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
973 			err = hci_dev_do_poweroff(hdev);
974 			if (err) {
975 				bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
976 					   err);
977 
978 				/* Make sure the device is still closed even if
979 				 * anything during power off sequence (eg.
980 				 * disconnecting devices) failed.
981 				 */
982 				hci_dev_do_close(hdev);
983 			}
984 		}
985 	} else {
986 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
987 	}
988 
989 	return 0;
990 }
991 
992 static const struct rfkill_ops hci_rfkill_ops = {
993 	.set_block = hci_rfkill_set_block,
994 };
995 
hci_power_on(struct work_struct * work)996 static void hci_power_on(struct work_struct *work)
997 {
998 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
999 	int err;
1000 
1001 	BT_DBG("%s", hdev->name);
1002 
1003 	if (test_bit(HCI_UP, &hdev->flags) &&
1004 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
1005 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1006 		cancel_delayed_work(&hdev->power_off);
1007 		err = hci_powered_update_sync(hdev);
1008 		mgmt_power_on(hdev, err);
1009 		return;
1010 	}
1011 
1012 	err = hci_dev_do_open(hdev);
1013 	if (err < 0) {
1014 		hci_dev_lock(hdev);
1015 		mgmt_set_powered_failed(hdev, err);
1016 		hci_dev_unlock(hdev);
1017 		return;
1018 	}
1019 
1020 	/* During the HCI setup phase, a few error conditions are
1021 	 * ignored and they need to be checked now. If they are still
1022 	 * valid, it is important to turn the device back off.
1023 	 */
1024 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1025 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
1026 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1027 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1028 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1029 		hci_dev_do_close(hdev);
1030 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1031 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1032 				   HCI_AUTO_OFF_TIMEOUT);
1033 	}
1034 
1035 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1036 		/* For unconfigured devices, set the HCI_RAW flag
1037 		 * so that userspace can easily identify them.
1038 		 */
1039 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1040 			set_bit(HCI_RAW, &hdev->flags);
1041 
1042 		/* For fully configured devices, this will send
1043 		 * the Index Added event. For unconfigured devices,
1044 		 * it will send Unconfigued Index Added event.
1045 		 *
1046 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1047 		 * and no event will be send.
1048 		 */
1049 		mgmt_index_added(hdev);
1050 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1051 		/* When the controller is now configured, then it
1052 		 * is important to clear the HCI_RAW flag.
1053 		 */
1054 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1055 			clear_bit(HCI_RAW, &hdev->flags);
1056 
1057 		/* Powering on the controller with HCI_CONFIG set only
1058 		 * happens with the transition from unconfigured to
1059 		 * configured. This will send the Index Added event.
1060 		 */
1061 		mgmt_index_added(hdev);
1062 	}
1063 }
1064 
hci_power_off(struct work_struct * work)1065 static void hci_power_off(struct work_struct *work)
1066 {
1067 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1068 					    power_off.work);
1069 
1070 	BT_DBG("%s", hdev->name);
1071 
1072 	hci_dev_do_close(hdev);
1073 }
1074 
hci_error_reset(struct work_struct * work)1075 static void hci_error_reset(struct work_struct *work)
1076 {
1077 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1078 
1079 	hci_dev_hold(hdev);
1080 	BT_DBG("%s", hdev->name);
1081 
1082 	if (hdev->hw_error)
1083 		hdev->hw_error(hdev, hdev->hw_error_code);
1084 	else
1085 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1086 
1087 	if (!hci_dev_do_close(hdev))
1088 		hci_dev_do_open(hdev);
1089 
1090 	hci_dev_put(hdev);
1091 }
1092 
hci_uuids_clear(struct hci_dev * hdev)1093 void hci_uuids_clear(struct hci_dev *hdev)
1094 {
1095 	struct bt_uuid *uuid, *tmp;
1096 
1097 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1098 		list_del(&uuid->list);
1099 		kfree(uuid);
1100 	}
1101 }
1102 
hci_link_keys_clear(struct hci_dev * hdev)1103 void hci_link_keys_clear(struct hci_dev *hdev)
1104 {
1105 	struct link_key *key, *tmp;
1106 
1107 	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1108 		list_del_rcu(&key->list);
1109 		kfree_rcu(key, rcu);
1110 	}
1111 }
1112 
hci_smp_ltks_clear(struct hci_dev * hdev)1113 void hci_smp_ltks_clear(struct hci_dev *hdev)
1114 {
1115 	struct smp_ltk *k, *tmp;
1116 
1117 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1118 		list_del_rcu(&k->list);
1119 		kfree_rcu(k, rcu);
1120 	}
1121 }
1122 
hci_smp_irks_clear(struct hci_dev * hdev)1123 void hci_smp_irks_clear(struct hci_dev *hdev)
1124 {
1125 	struct smp_irk *k, *tmp;
1126 
1127 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1128 		list_del_rcu(&k->list);
1129 		kfree_rcu(k, rcu);
1130 	}
1131 }
1132 
hci_blocked_keys_clear(struct hci_dev * hdev)1133 void hci_blocked_keys_clear(struct hci_dev *hdev)
1134 {
1135 	struct blocked_key *b, *tmp;
1136 
1137 	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1138 		list_del_rcu(&b->list);
1139 		kfree_rcu(b, rcu);
1140 	}
1141 }
1142 
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1143 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1144 {
1145 	bool blocked = false;
1146 	struct blocked_key *b;
1147 
1148 	rcu_read_lock();
1149 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1150 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1151 			blocked = true;
1152 			break;
1153 		}
1154 	}
1155 
1156 	rcu_read_unlock();
1157 	return blocked;
1158 }
1159 
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1160 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1161 {
1162 	struct link_key *k;
1163 
1164 	rcu_read_lock();
1165 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1166 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1167 			rcu_read_unlock();
1168 
1169 			if (hci_is_blocked_key(hdev,
1170 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1171 					       k->val)) {
1172 				bt_dev_warn_ratelimited(hdev,
1173 							"Link key blocked for %pMR",
1174 							&k->bdaddr);
1175 				return NULL;
1176 			}
1177 
1178 			return k;
1179 		}
1180 	}
1181 	rcu_read_unlock();
1182 
1183 	return NULL;
1184 }
1185 
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1186 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1187 			       u8 key_type, u8 old_key_type)
1188 {
1189 	/* Legacy key */
1190 	if (key_type < 0x03)
1191 		return true;
1192 
1193 	/* Debug keys are insecure so don't store them persistently */
1194 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1195 		return false;
1196 
1197 	/* Changed combination key and there's no previous one */
1198 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1199 		return false;
1200 
1201 	/* Security mode 3 case */
1202 	if (!conn)
1203 		return true;
1204 
1205 	/* BR/EDR key derived using SC from an LE link */
1206 	if (conn->type == LE_LINK)
1207 		return true;
1208 
1209 	/* Neither local nor remote side had no-bonding as requirement */
1210 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1211 		return true;
1212 
1213 	/* Local side had dedicated bonding as requirement */
1214 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1215 		return true;
1216 
1217 	/* Remote side had dedicated bonding as requirement */
1218 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1219 		return true;
1220 
1221 	/* If none of the above criteria match, then don't store the key
1222 	 * persistently */
1223 	return false;
1224 }
1225 
ltk_role(u8 type)1226 static u8 ltk_role(u8 type)
1227 {
1228 	if (type == SMP_LTK)
1229 		return HCI_ROLE_MASTER;
1230 
1231 	return HCI_ROLE_SLAVE;
1232 }
1233 
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1234 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1235 			     u8 addr_type, u8 role)
1236 {
1237 	struct smp_ltk *k;
1238 
1239 	rcu_read_lock();
1240 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1241 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1242 			continue;
1243 
1244 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1245 			rcu_read_unlock();
1246 
1247 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1248 					       k->val)) {
1249 				bt_dev_warn_ratelimited(hdev,
1250 							"LTK blocked for %pMR",
1251 							&k->bdaddr);
1252 				return NULL;
1253 			}
1254 
1255 			return k;
1256 		}
1257 	}
1258 	rcu_read_unlock();
1259 
1260 	return NULL;
1261 }
1262 
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1263 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1264 {
1265 	struct smp_irk *irk_to_return = NULL;
1266 	struct smp_irk *irk;
1267 
1268 	rcu_read_lock();
1269 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1270 		if (!bacmp(&irk->rpa, rpa)) {
1271 			irk_to_return = irk;
1272 			goto done;
1273 		}
1274 	}
1275 
1276 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1277 		if (smp_irk_matches(hdev, irk->val, rpa)) {
1278 			bacpy(&irk->rpa, rpa);
1279 			irk_to_return = irk;
1280 			goto done;
1281 		}
1282 	}
1283 
1284 done:
1285 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1286 						irk_to_return->val)) {
1287 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1288 					&irk_to_return->bdaddr);
1289 		irk_to_return = NULL;
1290 	}
1291 
1292 	rcu_read_unlock();
1293 
1294 	return irk_to_return;
1295 }
1296 
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1297 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1298 				     u8 addr_type)
1299 {
1300 	struct smp_irk *irk_to_return = NULL;
1301 	struct smp_irk *irk;
1302 
1303 	/* Identity Address must be public or static random */
1304 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1305 		return NULL;
1306 
1307 	rcu_read_lock();
1308 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1309 		if (addr_type == irk->addr_type &&
1310 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1311 			irk_to_return = irk;
1312 			goto done;
1313 		}
1314 	}
1315 
1316 done:
1317 
1318 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1319 						irk_to_return->val)) {
1320 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1321 					&irk_to_return->bdaddr);
1322 		irk_to_return = NULL;
1323 	}
1324 
1325 	rcu_read_unlock();
1326 
1327 	return irk_to_return;
1328 }
1329 
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1330 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1331 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1332 				  u8 pin_len, bool *persistent)
1333 {
1334 	struct link_key *key, *old_key;
1335 	u8 old_key_type;
1336 
1337 	old_key = hci_find_link_key(hdev, bdaddr);
1338 	if (old_key) {
1339 		old_key_type = old_key->type;
1340 		key = old_key;
1341 	} else {
1342 		old_key_type = conn ? conn->key_type : 0xff;
1343 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1344 		if (!key)
1345 			return NULL;
1346 		list_add_rcu(&key->list, &hdev->link_keys);
1347 	}
1348 
1349 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1350 
1351 	/* Some buggy controller combinations generate a changed
1352 	 * combination key for legacy pairing even when there's no
1353 	 * previous key */
1354 	if (type == HCI_LK_CHANGED_COMBINATION &&
1355 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1356 		type = HCI_LK_COMBINATION;
1357 		if (conn)
1358 			conn->key_type = type;
1359 	}
1360 
1361 	bacpy(&key->bdaddr, bdaddr);
1362 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1363 	key->pin_len = pin_len;
1364 
1365 	if (type == HCI_LK_CHANGED_COMBINATION)
1366 		key->type = old_key_type;
1367 	else
1368 		key->type = type;
1369 
1370 	if (persistent)
1371 		*persistent = hci_persistent_key(hdev, conn, type,
1372 						 old_key_type);
1373 
1374 	return key;
1375 }
1376 
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1377 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1378 			    u8 addr_type, u8 type, u8 authenticated,
1379 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1380 {
1381 	struct smp_ltk *key, *old_key;
1382 	u8 role = ltk_role(type);
1383 
1384 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1385 	if (old_key)
1386 		key = old_key;
1387 	else {
1388 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1389 		if (!key)
1390 			return NULL;
1391 		list_add_rcu(&key->list, &hdev->long_term_keys);
1392 	}
1393 
1394 	bacpy(&key->bdaddr, bdaddr);
1395 	key->bdaddr_type = addr_type;
1396 	memcpy(key->val, tk, sizeof(key->val));
1397 	key->authenticated = authenticated;
1398 	key->ediv = ediv;
1399 	key->rand = rand;
1400 	key->enc_size = enc_size;
1401 	key->type = type;
1402 
1403 	return key;
1404 }
1405 
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1406 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1407 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1408 {
1409 	struct smp_irk *irk;
1410 
1411 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1412 	if (!irk) {
1413 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1414 		if (!irk)
1415 			return NULL;
1416 
1417 		bacpy(&irk->bdaddr, bdaddr);
1418 		irk->addr_type = addr_type;
1419 
1420 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1421 	}
1422 
1423 	memcpy(irk->val, val, 16);
1424 	bacpy(&irk->rpa, rpa);
1425 
1426 	return irk;
1427 }
1428 
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1429 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1430 {
1431 	struct link_key *key;
1432 
1433 	key = hci_find_link_key(hdev, bdaddr);
1434 	if (!key)
1435 		return -ENOENT;
1436 
1437 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1438 
1439 	list_del_rcu(&key->list);
1440 	kfree_rcu(key, rcu);
1441 
1442 	return 0;
1443 }
1444 
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1445 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1446 {
1447 	struct smp_ltk *k, *tmp;
1448 	int removed = 0;
1449 
1450 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1451 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1452 			continue;
1453 
1454 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1455 
1456 		list_del_rcu(&k->list);
1457 		kfree_rcu(k, rcu);
1458 		removed++;
1459 	}
1460 
1461 	return removed ? 0 : -ENOENT;
1462 }
1463 
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1464 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1465 {
1466 	struct smp_irk *k, *tmp;
1467 
1468 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1469 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1470 			continue;
1471 
1472 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1473 
1474 		list_del_rcu(&k->list);
1475 		kfree_rcu(k, rcu);
1476 	}
1477 }
1478 
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1479 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1480 {
1481 	struct smp_ltk *k;
1482 	struct smp_irk *irk;
1483 	u8 addr_type;
1484 
1485 	if (type == BDADDR_BREDR) {
1486 		if (hci_find_link_key(hdev, bdaddr))
1487 			return true;
1488 		return false;
1489 	}
1490 
1491 	/* Convert to HCI addr type which struct smp_ltk uses */
1492 	if (type == BDADDR_LE_PUBLIC)
1493 		addr_type = ADDR_LE_DEV_PUBLIC;
1494 	else
1495 		addr_type = ADDR_LE_DEV_RANDOM;
1496 
1497 	irk = hci_get_irk(hdev, bdaddr, addr_type);
1498 	if (irk) {
1499 		bdaddr = &irk->bdaddr;
1500 		addr_type = irk->addr_type;
1501 	}
1502 
1503 	rcu_read_lock();
1504 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1505 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1506 			rcu_read_unlock();
1507 			return true;
1508 		}
1509 	}
1510 	rcu_read_unlock();
1511 
1512 	return false;
1513 }
1514 
1515 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1516 static void hci_cmd_timeout(struct work_struct *work)
1517 {
1518 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1519 					    cmd_timer.work);
1520 
1521 	if (hdev->req_skb) {
1522 		u16 opcode = hci_skb_opcode(hdev->req_skb);
1523 
1524 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1525 
1526 		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1527 	} else {
1528 		bt_dev_err(hdev, "command tx timeout");
1529 	}
1530 
1531 	if (hdev->cmd_timeout)
1532 		hdev->cmd_timeout(hdev);
1533 
1534 	atomic_set(&hdev->cmd_cnt, 1);
1535 	queue_work(hdev->workqueue, &hdev->cmd_work);
1536 }
1537 
1538 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1539 static void hci_ncmd_timeout(struct work_struct *work)
1540 {
1541 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1542 					    ncmd_timer.work);
1543 
1544 	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1545 
1546 	/* During HCI_INIT phase no events can be injected if the ncmd timer
1547 	 * triggers since the procedure has its own timeout handling.
1548 	 */
1549 	if (test_bit(HCI_INIT, &hdev->flags))
1550 		return;
1551 
1552 	/* This is an irrecoverable state, inject hardware error event */
1553 	hci_reset_dev(hdev);
1554 }
1555 
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1556 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1557 					  bdaddr_t *bdaddr, u8 bdaddr_type)
1558 {
1559 	struct oob_data *data;
1560 
1561 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1562 		if (bacmp(bdaddr, &data->bdaddr) != 0)
1563 			continue;
1564 		if (data->bdaddr_type != bdaddr_type)
1565 			continue;
1566 		return data;
1567 	}
1568 
1569 	return NULL;
1570 }
1571 
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1572 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1573 			       u8 bdaddr_type)
1574 {
1575 	struct oob_data *data;
1576 
1577 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1578 	if (!data)
1579 		return -ENOENT;
1580 
1581 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1582 
1583 	list_del(&data->list);
1584 	kfree(data);
1585 
1586 	return 0;
1587 }
1588 
hci_remote_oob_data_clear(struct hci_dev * hdev)1589 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1590 {
1591 	struct oob_data *data, *n;
1592 
1593 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1594 		list_del(&data->list);
1595 		kfree(data);
1596 	}
1597 }
1598 
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1599 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1600 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1601 			    u8 *hash256, u8 *rand256)
1602 {
1603 	struct oob_data *data;
1604 
1605 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1606 	if (!data) {
1607 		data = kmalloc(sizeof(*data), GFP_KERNEL);
1608 		if (!data)
1609 			return -ENOMEM;
1610 
1611 		bacpy(&data->bdaddr, bdaddr);
1612 		data->bdaddr_type = bdaddr_type;
1613 		list_add(&data->list, &hdev->remote_oob_data);
1614 	}
1615 
1616 	if (hash192 && rand192) {
1617 		memcpy(data->hash192, hash192, sizeof(data->hash192));
1618 		memcpy(data->rand192, rand192, sizeof(data->rand192));
1619 		if (hash256 && rand256)
1620 			data->present = 0x03;
1621 	} else {
1622 		memset(data->hash192, 0, sizeof(data->hash192));
1623 		memset(data->rand192, 0, sizeof(data->rand192));
1624 		if (hash256 && rand256)
1625 			data->present = 0x02;
1626 		else
1627 			data->present = 0x00;
1628 	}
1629 
1630 	if (hash256 && rand256) {
1631 		memcpy(data->hash256, hash256, sizeof(data->hash256));
1632 		memcpy(data->rand256, rand256, sizeof(data->rand256));
1633 	} else {
1634 		memset(data->hash256, 0, sizeof(data->hash256));
1635 		memset(data->rand256, 0, sizeof(data->rand256));
1636 		if (hash192 && rand192)
1637 			data->present = 0x01;
1638 	}
1639 
1640 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1641 
1642 	return 0;
1643 }
1644 
1645 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1646 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1647 {
1648 	struct adv_info *adv_instance;
1649 
1650 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1651 		if (adv_instance->instance == instance)
1652 			return adv_instance;
1653 	}
1654 
1655 	return NULL;
1656 }
1657 
1658 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1659 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1660 {
1661 	struct adv_info *cur_instance;
1662 
1663 	cur_instance = hci_find_adv_instance(hdev, instance);
1664 	if (!cur_instance)
1665 		return NULL;
1666 
1667 	if (cur_instance == list_last_entry(&hdev->adv_instances,
1668 					    struct adv_info, list))
1669 		return list_first_entry(&hdev->adv_instances,
1670 						 struct adv_info, list);
1671 	else
1672 		return list_next_entry(cur_instance, list);
1673 }
1674 
1675 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1676 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1677 {
1678 	struct adv_info *adv_instance;
1679 
1680 	adv_instance = hci_find_adv_instance(hdev, instance);
1681 	if (!adv_instance)
1682 		return -ENOENT;
1683 
1684 	BT_DBG("%s removing %dMR", hdev->name, instance);
1685 
1686 	if (hdev->cur_adv_instance == instance) {
1687 		if (hdev->adv_instance_timeout) {
1688 			cancel_delayed_work(&hdev->adv_instance_expire);
1689 			hdev->adv_instance_timeout = 0;
1690 		}
1691 		hdev->cur_adv_instance = 0x00;
1692 	}
1693 
1694 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1695 
1696 	list_del(&adv_instance->list);
1697 	kfree(adv_instance);
1698 
1699 	hdev->adv_instance_cnt--;
1700 
1701 	return 0;
1702 }
1703 
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1704 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1705 {
1706 	struct adv_info *adv_instance, *n;
1707 
1708 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1709 		adv_instance->rpa_expired = rpa_expired;
1710 }
1711 
1712 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1713 void hci_adv_instances_clear(struct hci_dev *hdev)
1714 {
1715 	struct adv_info *adv_instance, *n;
1716 
1717 	if (hdev->adv_instance_timeout) {
1718 		cancel_delayed_work(&hdev->adv_instance_expire);
1719 		hdev->adv_instance_timeout = 0;
1720 	}
1721 
1722 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1723 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1724 		list_del(&adv_instance->list);
1725 		kfree(adv_instance);
1726 	}
1727 
1728 	hdev->adv_instance_cnt = 0;
1729 	hdev->cur_adv_instance = 0x00;
1730 }
1731 
adv_instance_rpa_expired(struct work_struct * work)1732 static void adv_instance_rpa_expired(struct work_struct *work)
1733 {
1734 	struct adv_info *adv_instance = container_of(work, struct adv_info,
1735 						     rpa_expired_cb.work);
1736 
1737 	BT_DBG("");
1738 
1739 	adv_instance->rpa_expired = true;
1740 }
1741 
1742 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1743 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1744 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1745 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1746 				      u16 timeout, u16 duration, s8 tx_power,
1747 				      u32 min_interval, u32 max_interval,
1748 				      u8 mesh_handle)
1749 {
1750 	struct adv_info *adv;
1751 
1752 	adv = hci_find_adv_instance(hdev, instance);
1753 	if (adv) {
1754 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1755 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1756 		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1757 	} else {
1758 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1759 		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1760 			return ERR_PTR(-EOVERFLOW);
1761 
1762 		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1763 		if (!adv)
1764 			return ERR_PTR(-ENOMEM);
1765 
1766 		adv->pending = true;
1767 		adv->instance = instance;
1768 
1769 		/* If controller support only one set and the instance is set to
1770 		 * 1 then there is no option other than using handle 0x00.
1771 		 */
1772 		if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1773 			adv->handle = 0x00;
1774 		else
1775 			adv->handle = instance;
1776 
1777 		list_add(&adv->list, &hdev->adv_instances);
1778 		hdev->adv_instance_cnt++;
1779 	}
1780 
1781 	adv->flags = flags;
1782 	adv->min_interval = min_interval;
1783 	adv->max_interval = max_interval;
1784 	adv->tx_power = tx_power;
1785 	/* Defining a mesh_handle changes the timing units to ms,
1786 	 * rather than seconds, and ties the instance to the requested
1787 	 * mesh_tx queue.
1788 	 */
1789 	adv->mesh = mesh_handle;
1790 
1791 	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1792 				  scan_rsp_len, scan_rsp_data);
1793 
1794 	adv->timeout = timeout;
1795 	adv->remaining_time = timeout;
1796 
1797 	if (duration == 0)
1798 		adv->duration = hdev->def_multi_adv_rotation_duration;
1799 	else
1800 		adv->duration = duration;
1801 
1802 	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1803 
1804 	BT_DBG("%s for %dMR", hdev->name, instance);
1805 
1806 	return adv;
1807 }
1808 
1809 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1810 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1811 				      u32 flags, u8 data_len, u8 *data,
1812 				      u32 min_interval, u32 max_interval)
1813 {
1814 	struct adv_info *adv;
1815 
1816 	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1817 				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1818 				   min_interval, max_interval, 0);
1819 	if (IS_ERR(adv))
1820 		return adv;
1821 
1822 	adv->periodic = true;
1823 	adv->per_adv_data_len = data_len;
1824 
1825 	if (data)
1826 		memcpy(adv->per_adv_data, data, data_len);
1827 
1828 	return adv;
1829 }
1830 
1831 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1832 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1833 			      u16 adv_data_len, u8 *adv_data,
1834 			      u16 scan_rsp_len, u8 *scan_rsp_data)
1835 {
1836 	struct adv_info *adv;
1837 
1838 	adv = hci_find_adv_instance(hdev, instance);
1839 
1840 	/* If advertisement doesn't exist, we can't modify its data */
1841 	if (!adv)
1842 		return -ENOENT;
1843 
1844 	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1845 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1846 		memcpy(adv->adv_data, adv_data, adv_data_len);
1847 		adv->adv_data_len = adv_data_len;
1848 		adv->adv_data_changed = true;
1849 	}
1850 
1851 	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1852 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1853 		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1854 		adv->scan_rsp_len = scan_rsp_len;
1855 		adv->scan_rsp_changed = true;
1856 	}
1857 
1858 	/* Mark as changed if there are flags which would affect it */
1859 	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1860 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1861 		adv->scan_rsp_changed = true;
1862 
1863 	return 0;
1864 }
1865 
1866 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1867 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1868 {
1869 	u32 flags;
1870 	struct adv_info *adv;
1871 
1872 	if (instance == 0x00) {
1873 		/* Instance 0 always manages the "Tx Power" and "Flags"
1874 		 * fields
1875 		 */
1876 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1877 
1878 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1879 		 * corresponds to the "connectable" instance flag.
1880 		 */
1881 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1882 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1883 
1884 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1885 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1886 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1887 			flags |= MGMT_ADV_FLAG_DISCOV;
1888 
1889 		return flags;
1890 	}
1891 
1892 	adv = hci_find_adv_instance(hdev, instance);
1893 
1894 	/* Return 0 when we got an invalid instance identifier. */
1895 	if (!adv)
1896 		return 0;
1897 
1898 	return adv->flags;
1899 }
1900 
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1901 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1902 {
1903 	struct adv_info *adv;
1904 
1905 	/* Instance 0x00 always set local name */
1906 	if (instance == 0x00)
1907 		return true;
1908 
1909 	adv = hci_find_adv_instance(hdev, instance);
1910 	if (!adv)
1911 		return false;
1912 
1913 	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1914 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1915 		return true;
1916 
1917 	return adv->scan_rsp_len ? true : false;
1918 }
1919 
1920 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1921 void hci_adv_monitors_clear(struct hci_dev *hdev)
1922 {
1923 	struct adv_monitor *monitor;
1924 	int handle;
1925 
1926 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1927 		hci_free_adv_monitor(hdev, monitor);
1928 
1929 	idr_destroy(&hdev->adv_monitors_idr);
1930 }
1931 
1932 /* Frees the monitor structure and do some bookkeepings.
1933  * This function requires the caller holds hdev->lock.
1934  */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1935 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1936 {
1937 	struct adv_pattern *pattern;
1938 	struct adv_pattern *tmp;
1939 
1940 	if (!monitor)
1941 		return;
1942 
1943 	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1944 		list_del(&pattern->list);
1945 		kfree(pattern);
1946 	}
1947 
1948 	if (monitor->handle)
1949 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1950 
1951 	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1952 		hdev->adv_monitors_cnt--;
1953 		mgmt_adv_monitor_removed(hdev, monitor->handle);
1954 	}
1955 
1956 	kfree(monitor);
1957 }
1958 
1959 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1960  * also attempts to forward the request to the controller.
1961  * This function requires the caller holds hci_req_sync_lock.
1962  */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1963 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1964 {
1965 	int min, max, handle;
1966 	int status = 0;
1967 
1968 	if (!monitor)
1969 		return -EINVAL;
1970 
1971 	hci_dev_lock(hdev);
1972 
1973 	min = HCI_MIN_ADV_MONITOR_HANDLE;
1974 	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1975 	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1976 			   GFP_KERNEL);
1977 
1978 	hci_dev_unlock(hdev);
1979 
1980 	if (handle < 0)
1981 		return handle;
1982 
1983 	monitor->handle = handle;
1984 
1985 	if (!hdev_is_powered(hdev))
1986 		return status;
1987 
1988 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1989 	case HCI_ADV_MONITOR_EXT_NONE:
1990 		bt_dev_dbg(hdev, "add monitor %d status %d",
1991 			   monitor->handle, status);
1992 		/* Message was not forwarded to controller - not an error */
1993 		break;
1994 
1995 	case HCI_ADV_MONITOR_EXT_MSFT:
1996 		status = msft_add_monitor_pattern(hdev, monitor);
1997 		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1998 			   handle, status);
1999 		break;
2000 	}
2001 
2002 	return status;
2003 }
2004 
2005 /* Attempts to tell the controller and free the monitor. If somehow the
2006  * controller doesn't have a corresponding handle, remove anyway.
2007  * This function requires the caller holds hci_req_sync_lock.
2008  */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)2009 static int hci_remove_adv_monitor(struct hci_dev *hdev,
2010 				  struct adv_monitor *monitor)
2011 {
2012 	int status = 0;
2013 	int handle;
2014 
2015 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
2016 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
2017 		bt_dev_dbg(hdev, "remove monitor %d status %d",
2018 			   monitor->handle, status);
2019 		goto free_monitor;
2020 
2021 	case HCI_ADV_MONITOR_EXT_MSFT:
2022 		handle = monitor->handle;
2023 		status = msft_remove_monitor(hdev, monitor);
2024 		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
2025 			   handle, status);
2026 		break;
2027 	}
2028 
2029 	/* In case no matching handle registered, just free the monitor */
2030 	if (status == -ENOENT)
2031 		goto free_monitor;
2032 
2033 	return status;
2034 
2035 free_monitor:
2036 	if (status == -ENOENT)
2037 		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2038 			    monitor->handle);
2039 	hci_free_adv_monitor(hdev, monitor);
2040 
2041 	return status;
2042 }
2043 
2044 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)2045 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2046 {
2047 	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2048 
2049 	if (!monitor)
2050 		return -EINVAL;
2051 
2052 	return hci_remove_adv_monitor(hdev, monitor);
2053 }
2054 
2055 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)2056 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2057 {
2058 	struct adv_monitor *monitor;
2059 	int idr_next_id = 0;
2060 	int status = 0;
2061 
2062 	while (1) {
2063 		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2064 		if (!monitor)
2065 			break;
2066 
2067 		status = hci_remove_adv_monitor(hdev, monitor);
2068 		if (status)
2069 			return status;
2070 
2071 		idr_next_id++;
2072 	}
2073 
2074 	return status;
2075 }
2076 
2077 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2078 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2079 {
2080 	return !idr_is_empty(&hdev->adv_monitors_idr);
2081 }
2082 
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2083 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2084 {
2085 	if (msft_monitor_supported(hdev))
2086 		return HCI_ADV_MONITOR_EXT_MSFT;
2087 
2088 	return HCI_ADV_MONITOR_EXT_NONE;
2089 }
2090 
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2091 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2092 					 bdaddr_t *bdaddr, u8 type)
2093 {
2094 	struct bdaddr_list *b;
2095 
2096 	list_for_each_entry(b, bdaddr_list, list) {
2097 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2098 			return b;
2099 	}
2100 
2101 	return NULL;
2102 }
2103 
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2104 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2105 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2106 				u8 type)
2107 {
2108 	struct bdaddr_list_with_irk *b;
2109 
2110 	list_for_each_entry(b, bdaddr_list, list) {
2111 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2112 			return b;
2113 	}
2114 
2115 	return NULL;
2116 }
2117 
2118 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2119 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2120 				  bdaddr_t *bdaddr, u8 type)
2121 {
2122 	struct bdaddr_list_with_flags *b;
2123 
2124 	list_for_each_entry(b, bdaddr_list, list) {
2125 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2126 			return b;
2127 	}
2128 
2129 	return NULL;
2130 }
2131 
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2132 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2133 {
2134 	struct bdaddr_list *b, *n;
2135 
2136 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2137 		list_del(&b->list);
2138 		kfree(b);
2139 	}
2140 }
2141 
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2142 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2143 {
2144 	struct bdaddr_list *entry;
2145 
2146 	if (!bacmp(bdaddr, BDADDR_ANY))
2147 		return -EBADF;
2148 
2149 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2150 		return -EEXIST;
2151 
2152 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2153 	if (!entry)
2154 		return -ENOMEM;
2155 
2156 	bacpy(&entry->bdaddr, bdaddr);
2157 	entry->bdaddr_type = type;
2158 
2159 	list_add(&entry->list, list);
2160 
2161 	return 0;
2162 }
2163 
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2164 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2165 					u8 type, u8 *peer_irk, u8 *local_irk)
2166 {
2167 	struct bdaddr_list_with_irk *entry;
2168 
2169 	if (!bacmp(bdaddr, BDADDR_ANY))
2170 		return -EBADF;
2171 
2172 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2173 		return -EEXIST;
2174 
2175 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2176 	if (!entry)
2177 		return -ENOMEM;
2178 
2179 	bacpy(&entry->bdaddr, bdaddr);
2180 	entry->bdaddr_type = type;
2181 
2182 	if (peer_irk)
2183 		memcpy(entry->peer_irk, peer_irk, 16);
2184 
2185 	if (local_irk)
2186 		memcpy(entry->local_irk, local_irk, 16);
2187 
2188 	list_add(&entry->list, list);
2189 
2190 	return 0;
2191 }
2192 
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2193 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2194 				   u8 type, u32 flags)
2195 {
2196 	struct bdaddr_list_with_flags *entry;
2197 
2198 	if (!bacmp(bdaddr, BDADDR_ANY))
2199 		return -EBADF;
2200 
2201 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2202 		return -EEXIST;
2203 
2204 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2205 	if (!entry)
2206 		return -ENOMEM;
2207 
2208 	bacpy(&entry->bdaddr, bdaddr);
2209 	entry->bdaddr_type = type;
2210 	entry->flags = flags;
2211 
2212 	list_add(&entry->list, list);
2213 
2214 	return 0;
2215 }
2216 
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2217 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2218 {
2219 	struct bdaddr_list *entry;
2220 
2221 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2222 		hci_bdaddr_list_clear(list);
2223 		return 0;
2224 	}
2225 
2226 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2227 	if (!entry)
2228 		return -ENOENT;
2229 
2230 	list_del(&entry->list);
2231 	kfree(entry);
2232 
2233 	return 0;
2234 }
2235 
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2236 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2237 							u8 type)
2238 {
2239 	struct bdaddr_list_with_irk *entry;
2240 
2241 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2242 		hci_bdaddr_list_clear(list);
2243 		return 0;
2244 	}
2245 
2246 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2247 	if (!entry)
2248 		return -ENOENT;
2249 
2250 	list_del(&entry->list);
2251 	kfree(entry);
2252 
2253 	return 0;
2254 }
2255 
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2256 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2257 				   u8 type)
2258 {
2259 	struct bdaddr_list_with_flags *entry;
2260 
2261 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2262 		hci_bdaddr_list_clear(list);
2263 		return 0;
2264 	}
2265 
2266 	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2267 	if (!entry)
2268 		return -ENOENT;
2269 
2270 	list_del(&entry->list);
2271 	kfree(entry);
2272 
2273 	return 0;
2274 }
2275 
2276 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2277 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2278 					       bdaddr_t *addr, u8 addr_type)
2279 {
2280 	struct hci_conn_params *params;
2281 
2282 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2283 		if (bacmp(&params->addr, addr) == 0 &&
2284 		    params->addr_type == addr_type) {
2285 			return params;
2286 		}
2287 	}
2288 
2289 	return NULL;
2290 }
2291 
2292 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2293 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2294 						  bdaddr_t *addr, u8 addr_type)
2295 {
2296 	struct hci_conn_params *param;
2297 
2298 	rcu_read_lock();
2299 
2300 	list_for_each_entry_rcu(param, list, action) {
2301 		if (bacmp(&param->addr, addr) == 0 &&
2302 		    param->addr_type == addr_type) {
2303 			rcu_read_unlock();
2304 			return param;
2305 		}
2306 	}
2307 
2308 	rcu_read_unlock();
2309 
2310 	return NULL;
2311 }
2312 
2313 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2314 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2315 {
2316 	if (list_empty(&param->action))
2317 		return;
2318 
2319 	list_del_rcu(&param->action);
2320 	synchronize_rcu();
2321 	INIT_LIST_HEAD(&param->action);
2322 }
2323 
2324 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2325 void hci_pend_le_list_add(struct hci_conn_params *param,
2326 			  struct list_head *list)
2327 {
2328 	list_add_rcu(&param->action, list);
2329 }
2330 
2331 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2332 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2333 					    bdaddr_t *addr, u8 addr_type)
2334 {
2335 	struct hci_conn_params *params;
2336 
2337 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2338 	if (params)
2339 		return params;
2340 
2341 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2342 	if (!params) {
2343 		bt_dev_err(hdev, "out of memory");
2344 		return NULL;
2345 	}
2346 
2347 	bacpy(&params->addr, addr);
2348 	params->addr_type = addr_type;
2349 
2350 	list_add(&params->list, &hdev->le_conn_params);
2351 	INIT_LIST_HEAD(&params->action);
2352 
2353 	params->conn_min_interval = hdev->le_conn_min_interval;
2354 	params->conn_max_interval = hdev->le_conn_max_interval;
2355 	params->conn_latency = hdev->le_conn_latency;
2356 	params->supervision_timeout = hdev->le_supv_timeout;
2357 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2358 
2359 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2360 
2361 	return params;
2362 }
2363 
hci_conn_params_free(struct hci_conn_params * params)2364 void hci_conn_params_free(struct hci_conn_params *params)
2365 {
2366 	hci_pend_le_list_del_init(params);
2367 
2368 	if (params->conn) {
2369 		hci_conn_drop(params->conn);
2370 		hci_conn_put(params->conn);
2371 	}
2372 
2373 	list_del(&params->list);
2374 	kfree(params);
2375 }
2376 
2377 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2378 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2379 {
2380 	struct hci_conn_params *params;
2381 
2382 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2383 	if (!params)
2384 		return;
2385 
2386 	hci_conn_params_free(params);
2387 
2388 	hci_update_passive_scan(hdev);
2389 
2390 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2391 }
2392 
2393 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2394 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2395 {
2396 	struct hci_conn_params *params, *tmp;
2397 
2398 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2399 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2400 			continue;
2401 
2402 		/* If trying to establish one time connection to disabled
2403 		 * device, leave the params, but mark them as just once.
2404 		 */
2405 		if (params->explicit_connect) {
2406 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2407 			continue;
2408 		}
2409 
2410 		hci_conn_params_free(params);
2411 	}
2412 
2413 	BT_DBG("All LE disabled connection parameters were removed");
2414 }
2415 
2416 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2417 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2418 {
2419 	struct hci_conn_params *params, *tmp;
2420 
2421 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2422 		hci_conn_params_free(params);
2423 
2424 	BT_DBG("All LE connection parameters were removed");
2425 }
2426 
2427 /* Copy the Identity Address of the controller.
2428  *
2429  * If the controller has a public BD_ADDR, then by default use that one.
2430  * If this is a LE only controller without a public address, default to
2431  * the static random address.
2432  *
2433  * For debugging purposes it is possible to force controllers with a
2434  * public address to use the static random address instead.
2435  *
2436  * In case BR/EDR has been disabled on a dual-mode controller and
2437  * userspace has configured a static address, then that address
2438  * becomes the identity address instead of the public BR/EDR address.
2439  */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2440 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2441 			       u8 *bdaddr_type)
2442 {
2443 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2444 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2445 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2446 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2447 		bacpy(bdaddr, &hdev->static_addr);
2448 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2449 	} else {
2450 		bacpy(bdaddr, &hdev->bdaddr);
2451 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2452 	}
2453 }
2454 
hci_clear_wake_reason(struct hci_dev * hdev)2455 static void hci_clear_wake_reason(struct hci_dev *hdev)
2456 {
2457 	hci_dev_lock(hdev);
2458 
2459 	hdev->wake_reason = 0;
2460 	bacpy(&hdev->wake_addr, BDADDR_ANY);
2461 	hdev->wake_addr_type = 0;
2462 
2463 	hci_dev_unlock(hdev);
2464 }
2465 
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2466 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2467 				void *data)
2468 {
2469 	struct hci_dev *hdev =
2470 		container_of(nb, struct hci_dev, suspend_notifier);
2471 	int ret = 0;
2472 
2473 	/* Userspace has full control of this device. Do nothing. */
2474 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2475 		return NOTIFY_DONE;
2476 
2477 	/* To avoid a potential race with hci_unregister_dev. */
2478 	hci_dev_hold(hdev);
2479 
2480 	if (action == PM_SUSPEND_PREPARE)
2481 		ret = hci_suspend_dev(hdev);
2482 	else if (action == PM_POST_SUSPEND)
2483 		ret = hci_resume_dev(hdev);
2484 
2485 	if (ret)
2486 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2487 			   action, ret);
2488 
2489 	hci_dev_put(hdev);
2490 	return NOTIFY_DONE;
2491 }
2492 
2493 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2494 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2495 {
2496 	struct hci_dev *hdev;
2497 	unsigned int alloc_size;
2498 
2499 	alloc_size = sizeof(*hdev);
2500 	if (sizeof_priv) {
2501 		/* Fixme: May need ALIGN-ment? */
2502 		alloc_size += sizeof_priv;
2503 	}
2504 
2505 	hdev = kzalloc(alloc_size, GFP_KERNEL);
2506 	if (!hdev)
2507 		return NULL;
2508 
2509 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2510 	hdev->esco_type = (ESCO_HV1);
2511 	hdev->link_mode = (HCI_LM_ACCEPT);
2512 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2513 	hdev->io_capability = 0x03;	/* No Input No Output */
2514 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2515 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2516 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2517 	hdev->adv_instance_cnt = 0;
2518 	hdev->cur_adv_instance = 0x00;
2519 	hdev->adv_instance_timeout = 0;
2520 
2521 	hdev->advmon_allowlist_duration = 300;
2522 	hdev->advmon_no_filter_duration = 500;
2523 	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2524 
2525 	hdev->sniff_max_interval = 800;
2526 	hdev->sniff_min_interval = 80;
2527 
2528 	hdev->le_adv_channel_map = 0x07;
2529 	hdev->le_adv_min_interval = 0x0800;
2530 	hdev->le_adv_max_interval = 0x0800;
2531 	hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2532 	hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2533 	hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2534 	hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2535 	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2536 	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2537 	hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2538 	hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2539 	hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2540 	hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2541 	hdev->le_conn_min_interval = 0x0018;
2542 	hdev->le_conn_max_interval = 0x0028;
2543 	hdev->le_conn_latency = 0x0000;
2544 	hdev->le_supv_timeout = 0x002a;
2545 	hdev->le_def_tx_len = 0x001b;
2546 	hdev->le_def_tx_time = 0x0148;
2547 	hdev->le_max_tx_len = 0x001b;
2548 	hdev->le_max_tx_time = 0x0148;
2549 	hdev->le_max_rx_len = 0x001b;
2550 	hdev->le_max_rx_time = 0x0148;
2551 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2552 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2553 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2554 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2555 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2556 	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2557 	hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2558 	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2559 	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2560 
2561 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2562 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2563 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2564 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2565 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2566 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2567 
2568 	/* default 1.28 sec page scan */
2569 	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2570 	hdev->def_page_scan_int = 0x0800;
2571 	hdev->def_page_scan_window = 0x0012;
2572 
2573 	mutex_init(&hdev->lock);
2574 	mutex_init(&hdev->req_lock);
2575 
2576 	ida_init(&hdev->unset_handle_ida);
2577 
2578 	INIT_LIST_HEAD(&hdev->mesh_pending);
2579 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2580 	INIT_LIST_HEAD(&hdev->reject_list);
2581 	INIT_LIST_HEAD(&hdev->accept_list);
2582 	INIT_LIST_HEAD(&hdev->uuids);
2583 	INIT_LIST_HEAD(&hdev->link_keys);
2584 	INIT_LIST_HEAD(&hdev->long_term_keys);
2585 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2586 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2587 	INIT_LIST_HEAD(&hdev->le_accept_list);
2588 	INIT_LIST_HEAD(&hdev->le_resolv_list);
2589 	INIT_LIST_HEAD(&hdev->le_conn_params);
2590 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2591 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2592 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2593 	INIT_LIST_HEAD(&hdev->adv_instances);
2594 	INIT_LIST_HEAD(&hdev->blocked_keys);
2595 	INIT_LIST_HEAD(&hdev->monitored_devices);
2596 
2597 	INIT_LIST_HEAD(&hdev->local_codecs);
2598 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2599 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2600 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2601 	INIT_WORK(&hdev->power_on, hci_power_on);
2602 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2603 
2604 	hci_cmd_sync_init(hdev);
2605 
2606 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2607 
2608 	skb_queue_head_init(&hdev->rx_q);
2609 	skb_queue_head_init(&hdev->cmd_q);
2610 	skb_queue_head_init(&hdev->raw_q);
2611 
2612 	init_waitqueue_head(&hdev->req_wait_q);
2613 
2614 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2615 	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2616 
2617 	hci_devcd_setup(hdev);
2618 	hci_request_setup(hdev);
2619 
2620 	hci_init_sysfs(hdev);
2621 	discovery_init(hdev);
2622 
2623 	return hdev;
2624 }
2625 EXPORT_SYMBOL(hci_alloc_dev_priv);
2626 
2627 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2628 void hci_free_dev(struct hci_dev *hdev)
2629 {
2630 	/* will free via device release */
2631 	put_device(&hdev->dev);
2632 }
2633 EXPORT_SYMBOL(hci_free_dev);
2634 
2635 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2636 int hci_register_dev(struct hci_dev *hdev)
2637 {
2638 	int id, error;
2639 
2640 	if (!hdev->open || !hdev->close || !hdev->send)
2641 		return -EINVAL;
2642 
2643 	id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2644 	if (id < 0)
2645 		return id;
2646 
2647 	error = dev_set_name(&hdev->dev, "hci%u", id);
2648 	if (error)
2649 		return error;
2650 
2651 	hdev->name = dev_name(&hdev->dev);
2652 	hdev->id = id;
2653 
2654 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2655 
2656 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2657 	if (!hdev->workqueue) {
2658 		error = -ENOMEM;
2659 		goto err;
2660 	}
2661 
2662 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2663 						      hdev->name);
2664 	if (!hdev->req_workqueue) {
2665 		destroy_workqueue(hdev->workqueue);
2666 		error = -ENOMEM;
2667 		goto err;
2668 	}
2669 
2670 	if (!IS_ERR_OR_NULL(bt_debugfs))
2671 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2672 
2673 	error = device_add(&hdev->dev);
2674 	if (error < 0)
2675 		goto err_wqueue;
2676 
2677 	hci_leds_init(hdev);
2678 
2679 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2680 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2681 				    hdev);
2682 	if (hdev->rfkill) {
2683 		if (rfkill_register(hdev->rfkill) < 0) {
2684 			rfkill_destroy(hdev->rfkill);
2685 			hdev->rfkill = NULL;
2686 		}
2687 	}
2688 
2689 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2690 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2691 
2692 	hci_dev_set_flag(hdev, HCI_SETUP);
2693 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2694 
2695 	/* Assume BR/EDR support until proven otherwise (such as
2696 	 * through reading supported features during init.
2697 	 */
2698 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2699 
2700 	write_lock(&hci_dev_list_lock);
2701 	list_add(&hdev->list, &hci_dev_list);
2702 	write_unlock(&hci_dev_list_lock);
2703 
2704 	/* Devices that are marked for raw-only usage are unconfigured
2705 	 * and should not be included in normal operation.
2706 	 */
2707 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2708 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2709 
2710 	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2711 	 * callback.
2712 	 */
2713 	if (hdev->wakeup)
2714 		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2715 
2716 	hci_sock_dev_event(hdev, HCI_DEV_REG);
2717 	hci_dev_hold(hdev);
2718 
2719 	error = hci_register_suspend_notifier(hdev);
2720 	if (error)
2721 		BT_WARN("register suspend notifier failed error:%d\n", error);
2722 
2723 	queue_work(hdev->req_workqueue, &hdev->power_on);
2724 
2725 	idr_init(&hdev->adv_monitors_idr);
2726 	msft_register(hdev);
2727 
2728 	return id;
2729 
2730 err_wqueue:
2731 	debugfs_remove_recursive(hdev->debugfs);
2732 	destroy_workqueue(hdev->workqueue);
2733 	destroy_workqueue(hdev->req_workqueue);
2734 err:
2735 	ida_free(&hci_index_ida, hdev->id);
2736 
2737 	return error;
2738 }
2739 EXPORT_SYMBOL(hci_register_dev);
2740 
2741 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2742 void hci_unregister_dev(struct hci_dev *hdev)
2743 {
2744 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2745 
2746 	mutex_lock(&hdev->unregister_lock);
2747 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2748 	mutex_unlock(&hdev->unregister_lock);
2749 
2750 	write_lock(&hci_dev_list_lock);
2751 	list_del(&hdev->list);
2752 	write_unlock(&hci_dev_list_lock);
2753 
2754 	cancel_work_sync(&hdev->power_on);
2755 
2756 	hci_cmd_sync_clear(hdev);
2757 
2758 	hci_unregister_suspend_notifier(hdev);
2759 
2760 	hci_dev_do_close(hdev);
2761 
2762 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2763 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2764 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2765 		hci_dev_lock(hdev);
2766 		mgmt_index_removed(hdev);
2767 		hci_dev_unlock(hdev);
2768 	}
2769 
2770 	/* mgmt_index_removed should take care of emptying the
2771 	 * pending list */
2772 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2773 
2774 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2775 
2776 	if (hdev->rfkill) {
2777 		rfkill_unregister(hdev->rfkill);
2778 		rfkill_destroy(hdev->rfkill);
2779 	}
2780 
2781 	device_del(&hdev->dev);
2782 	/* Actual cleanup is deferred until hci_release_dev(). */
2783 	hci_dev_put(hdev);
2784 }
2785 EXPORT_SYMBOL(hci_unregister_dev);
2786 
2787 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2788 void hci_release_dev(struct hci_dev *hdev)
2789 {
2790 	debugfs_remove_recursive(hdev->debugfs);
2791 	kfree_const(hdev->hw_info);
2792 	kfree_const(hdev->fw_info);
2793 
2794 	destroy_workqueue(hdev->workqueue);
2795 	destroy_workqueue(hdev->req_workqueue);
2796 
2797 	hci_dev_lock(hdev);
2798 	hci_bdaddr_list_clear(&hdev->reject_list);
2799 	hci_bdaddr_list_clear(&hdev->accept_list);
2800 	hci_uuids_clear(hdev);
2801 	hci_link_keys_clear(hdev);
2802 	hci_smp_ltks_clear(hdev);
2803 	hci_smp_irks_clear(hdev);
2804 	hci_remote_oob_data_clear(hdev);
2805 	hci_adv_instances_clear(hdev);
2806 	hci_adv_monitors_clear(hdev);
2807 	hci_bdaddr_list_clear(&hdev->le_accept_list);
2808 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2809 	hci_conn_params_clear_all(hdev);
2810 	hci_discovery_filter_clear(hdev);
2811 	hci_blocked_keys_clear(hdev);
2812 	hci_codec_list_clear(&hdev->local_codecs);
2813 	msft_release(hdev);
2814 	hci_dev_unlock(hdev);
2815 
2816 	ida_destroy(&hdev->unset_handle_ida);
2817 	ida_free(&hci_index_ida, hdev->id);
2818 	kfree_skb(hdev->sent_cmd);
2819 	kfree_skb(hdev->req_skb);
2820 	kfree_skb(hdev->recv_event);
2821 	kfree(hdev);
2822 }
2823 EXPORT_SYMBOL(hci_release_dev);
2824 
hci_register_suspend_notifier(struct hci_dev * hdev)2825 int hci_register_suspend_notifier(struct hci_dev *hdev)
2826 {
2827 	int ret = 0;
2828 
2829 	if (!hdev->suspend_notifier.notifier_call &&
2830 	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2831 		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2832 		ret = register_pm_notifier(&hdev->suspend_notifier);
2833 	}
2834 
2835 	return ret;
2836 }
2837 
hci_unregister_suspend_notifier(struct hci_dev * hdev)2838 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2839 {
2840 	int ret = 0;
2841 
2842 	if (hdev->suspend_notifier.notifier_call) {
2843 		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2844 		if (!ret)
2845 			hdev->suspend_notifier.notifier_call = NULL;
2846 	}
2847 
2848 	return ret;
2849 }
2850 
2851 /* Cancel ongoing command synchronously:
2852  *
2853  * - Cancel command timer
2854  * - Reset command counter
2855  * - Cancel command request
2856  */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2857 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2858 {
2859 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
2860 
2861 	cancel_delayed_work_sync(&hdev->cmd_timer);
2862 	cancel_delayed_work_sync(&hdev->ncmd_timer);
2863 	atomic_set(&hdev->cmd_cnt, 1);
2864 
2865 	hci_cmd_sync_cancel_sync(hdev, err);
2866 }
2867 
2868 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2869 int hci_suspend_dev(struct hci_dev *hdev)
2870 {
2871 	int ret;
2872 
2873 	bt_dev_dbg(hdev, "");
2874 
2875 	/* Suspend should only act on when powered. */
2876 	if (!hdev_is_powered(hdev) ||
2877 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2878 		return 0;
2879 
2880 	/* If powering down don't attempt to suspend */
2881 	if (mgmt_powering_down(hdev))
2882 		return 0;
2883 
2884 	/* Cancel potentially blocking sync operation before suspend */
2885 	hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2886 
2887 	hci_req_sync_lock(hdev);
2888 	ret = hci_suspend_sync(hdev);
2889 	hci_req_sync_unlock(hdev);
2890 
2891 	hci_clear_wake_reason(hdev);
2892 	mgmt_suspending(hdev, hdev->suspend_state);
2893 
2894 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2895 	return ret;
2896 }
2897 EXPORT_SYMBOL(hci_suspend_dev);
2898 
2899 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2900 int hci_resume_dev(struct hci_dev *hdev)
2901 {
2902 	int ret;
2903 
2904 	bt_dev_dbg(hdev, "");
2905 
2906 	/* Resume should only act on when powered. */
2907 	if (!hdev_is_powered(hdev) ||
2908 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2909 		return 0;
2910 
2911 	/* If powering down don't attempt to resume */
2912 	if (mgmt_powering_down(hdev))
2913 		return 0;
2914 
2915 	hci_req_sync_lock(hdev);
2916 	ret = hci_resume_sync(hdev);
2917 	hci_req_sync_unlock(hdev);
2918 
2919 	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2920 		      hdev->wake_addr_type);
2921 
2922 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2923 	return ret;
2924 }
2925 EXPORT_SYMBOL(hci_resume_dev);
2926 
2927 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2928 int hci_reset_dev(struct hci_dev *hdev)
2929 {
2930 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2931 	struct sk_buff *skb;
2932 
2933 	skb = bt_skb_alloc(3, GFP_ATOMIC);
2934 	if (!skb)
2935 		return -ENOMEM;
2936 
2937 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2938 	skb_put_data(skb, hw_err, 3);
2939 
2940 	bt_dev_err(hdev, "Injecting HCI hardware error event");
2941 
2942 	/* Send Hardware Error to upper stack */
2943 	return hci_recv_frame(hdev, skb);
2944 }
2945 EXPORT_SYMBOL(hci_reset_dev);
2946 
2947 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2948 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2949 {
2950 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2951 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2952 		kfree_skb(skb);
2953 		return -ENXIO;
2954 	}
2955 
2956 	switch (hci_skb_pkt_type(skb)) {
2957 	case HCI_EVENT_PKT:
2958 		break;
2959 	case HCI_ACLDATA_PKT:
2960 		/* Detect if ISO packet has been sent as ACL */
2961 		if (hci_conn_num(hdev, ISO_LINK)) {
2962 			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2963 			__u8 type;
2964 
2965 			type = hci_conn_lookup_type(hdev, hci_handle(handle));
2966 			if (type == ISO_LINK)
2967 				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2968 		}
2969 		break;
2970 	case HCI_SCODATA_PKT:
2971 		break;
2972 	case HCI_ISODATA_PKT:
2973 		break;
2974 	default:
2975 		kfree_skb(skb);
2976 		return -EINVAL;
2977 	}
2978 
2979 	/* Incoming skb */
2980 	bt_cb(skb)->incoming = 1;
2981 
2982 	/* Time stamp */
2983 	__net_timestamp(skb);
2984 
2985 	skb_queue_tail(&hdev->rx_q, skb);
2986 	queue_work(hdev->workqueue, &hdev->rx_work);
2987 
2988 	return 0;
2989 }
2990 EXPORT_SYMBOL(hci_recv_frame);
2991 
2992 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2993 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2994 {
2995 	/* Mark as diagnostic packet */
2996 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2997 
2998 	/* Time stamp */
2999 	__net_timestamp(skb);
3000 
3001 	skb_queue_tail(&hdev->rx_q, skb);
3002 	queue_work(hdev->workqueue, &hdev->rx_work);
3003 
3004 	return 0;
3005 }
3006 EXPORT_SYMBOL(hci_recv_diag);
3007 
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)3008 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3009 {
3010 	va_list vargs;
3011 
3012 	va_start(vargs, fmt);
3013 	kfree_const(hdev->hw_info);
3014 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3015 	va_end(vargs);
3016 }
3017 EXPORT_SYMBOL(hci_set_hw_info);
3018 
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)3019 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3020 {
3021 	va_list vargs;
3022 
3023 	va_start(vargs, fmt);
3024 	kfree_const(hdev->fw_info);
3025 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3026 	va_end(vargs);
3027 }
3028 EXPORT_SYMBOL(hci_set_fw_info);
3029 
3030 /* ---- Interface to upper protocols ---- */
3031 
hci_register_cb(struct hci_cb * cb)3032 int hci_register_cb(struct hci_cb *cb)
3033 {
3034 	BT_DBG("%p name %s", cb, cb->name);
3035 
3036 	mutex_lock(&hci_cb_list_lock);
3037 	list_add_tail(&cb->list, &hci_cb_list);
3038 	mutex_unlock(&hci_cb_list_lock);
3039 
3040 	return 0;
3041 }
3042 EXPORT_SYMBOL(hci_register_cb);
3043 
hci_unregister_cb(struct hci_cb * cb)3044 int hci_unregister_cb(struct hci_cb *cb)
3045 {
3046 	BT_DBG("%p name %s", cb, cb->name);
3047 
3048 	mutex_lock(&hci_cb_list_lock);
3049 	list_del(&cb->list);
3050 	mutex_unlock(&hci_cb_list_lock);
3051 
3052 	return 0;
3053 }
3054 EXPORT_SYMBOL(hci_unregister_cb);
3055 
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3056 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3057 {
3058 	int err;
3059 
3060 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3061 	       skb->len);
3062 
3063 	/* Time stamp */
3064 	__net_timestamp(skb);
3065 
3066 	/* Send copy to monitor */
3067 	hci_send_to_monitor(hdev, skb);
3068 
3069 	if (atomic_read(&hdev->promisc)) {
3070 		/* Send copy to the sockets */
3071 		hci_send_to_sock(hdev, skb);
3072 	}
3073 
3074 	/* Get rid of skb owner, prior to sending to the driver. */
3075 	skb_orphan(skb);
3076 
3077 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3078 		kfree_skb(skb);
3079 		return -EINVAL;
3080 	}
3081 
3082 	err = hdev->send(hdev, skb);
3083 	if (err < 0) {
3084 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3085 		kfree_skb(skb);
3086 		return err;
3087 	}
3088 
3089 	return 0;
3090 }
3091 
3092 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3093 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3094 		 const void *param)
3095 {
3096 	struct sk_buff *skb;
3097 
3098 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3099 
3100 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3101 	if (!skb) {
3102 		bt_dev_err(hdev, "no memory for command");
3103 		return -ENOMEM;
3104 	}
3105 
3106 	/* Stand-alone HCI commands must be flagged as
3107 	 * single-command requests.
3108 	 */
3109 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3110 
3111 	skb_queue_tail(&hdev->cmd_q, skb);
3112 	queue_work(hdev->workqueue, &hdev->cmd_work);
3113 
3114 	return 0;
3115 }
3116 
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3117 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3118 		   const void *param)
3119 {
3120 	struct sk_buff *skb;
3121 
3122 	if (hci_opcode_ogf(opcode) != 0x3f) {
3123 		/* A controller receiving a command shall respond with either
3124 		 * a Command Status Event or a Command Complete Event.
3125 		 * Therefore, all standard HCI commands must be sent via the
3126 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3127 		 * Some vendors do not comply with this rule for vendor-specific
3128 		 * commands and do not return any event. We want to support
3129 		 * unresponded commands for such cases only.
3130 		 */
3131 		bt_dev_err(hdev, "unresponded command not supported");
3132 		return -EINVAL;
3133 	}
3134 
3135 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3136 	if (!skb) {
3137 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3138 			   opcode);
3139 		return -ENOMEM;
3140 	}
3141 
3142 	hci_send_frame(hdev, skb);
3143 
3144 	return 0;
3145 }
3146 EXPORT_SYMBOL(__hci_cmd_send);
3147 
3148 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3149 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3150 {
3151 	struct hci_command_hdr *hdr;
3152 
3153 	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3154 		return NULL;
3155 
3156 	hdr = (void *)skb->data;
3157 
3158 	if (hdr->opcode != cpu_to_le16(opcode))
3159 		return NULL;
3160 
3161 	return skb->data + HCI_COMMAND_HDR_SIZE;
3162 }
3163 
3164 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3165 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3166 {
3167 	void *data;
3168 
3169 	/* Check if opcode matches last sent command */
3170 	data = hci_cmd_data(hdev->sent_cmd, opcode);
3171 	if (!data)
3172 		/* Check if opcode matches last request */
3173 		data = hci_cmd_data(hdev->req_skb, opcode);
3174 
3175 	return data;
3176 }
3177 
3178 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3179 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3180 {
3181 	struct hci_event_hdr *hdr;
3182 	int offset;
3183 
3184 	if (!hdev->recv_event)
3185 		return NULL;
3186 
3187 	hdr = (void *)hdev->recv_event->data;
3188 	offset = sizeof(*hdr);
3189 
3190 	if (hdr->evt != event) {
3191 		/* In case of LE metaevent check the subevent match */
3192 		if (hdr->evt == HCI_EV_LE_META) {
3193 			struct hci_ev_le_meta *ev;
3194 
3195 			ev = (void *)hdev->recv_event->data + offset;
3196 			offset += sizeof(*ev);
3197 			if (ev->subevent == event)
3198 				goto found;
3199 		}
3200 		return NULL;
3201 	}
3202 
3203 found:
3204 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3205 
3206 	return hdev->recv_event->data + offset;
3207 }
3208 
3209 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3210 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3211 {
3212 	struct hci_acl_hdr *hdr;
3213 	int len = skb->len;
3214 
3215 	skb_push(skb, HCI_ACL_HDR_SIZE);
3216 	skb_reset_transport_header(skb);
3217 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3218 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3219 	hdr->dlen   = cpu_to_le16(len);
3220 }
3221 
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3222 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3223 			  struct sk_buff *skb, __u16 flags)
3224 {
3225 	struct hci_conn *conn = chan->conn;
3226 	struct hci_dev *hdev = conn->hdev;
3227 	struct sk_buff *list;
3228 
3229 	skb->len = skb_headlen(skb);
3230 	skb->data_len = 0;
3231 
3232 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3233 
3234 	hci_add_acl_hdr(skb, conn->handle, flags);
3235 
3236 	list = skb_shinfo(skb)->frag_list;
3237 	if (!list) {
3238 		/* Non fragmented */
3239 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3240 
3241 		skb_queue_tail(queue, skb);
3242 	} else {
3243 		/* Fragmented */
3244 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3245 
3246 		skb_shinfo(skb)->frag_list = NULL;
3247 
3248 		/* Queue all fragments atomically. We need to use spin_lock_bh
3249 		 * here because of 6LoWPAN links, as there this function is
3250 		 * called from softirq and using normal spin lock could cause
3251 		 * deadlocks.
3252 		 */
3253 		spin_lock_bh(&queue->lock);
3254 
3255 		__skb_queue_tail(queue, skb);
3256 
3257 		flags &= ~ACL_START;
3258 		flags |= ACL_CONT;
3259 		do {
3260 			skb = list; list = list->next;
3261 
3262 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3263 			hci_add_acl_hdr(skb, conn->handle, flags);
3264 
3265 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3266 
3267 			__skb_queue_tail(queue, skb);
3268 		} while (list);
3269 
3270 		spin_unlock_bh(&queue->lock);
3271 	}
3272 }
3273 
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3274 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3275 {
3276 	struct hci_dev *hdev = chan->conn->hdev;
3277 
3278 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3279 
3280 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3281 
3282 	queue_work(hdev->workqueue, &hdev->tx_work);
3283 }
3284 
3285 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3286 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3287 {
3288 	struct hci_dev *hdev = conn->hdev;
3289 	struct hci_sco_hdr hdr;
3290 
3291 	BT_DBG("%s len %d", hdev->name, skb->len);
3292 
3293 	hdr.handle = cpu_to_le16(conn->handle);
3294 	hdr.dlen   = skb->len;
3295 
3296 	skb_push(skb, HCI_SCO_HDR_SIZE);
3297 	skb_reset_transport_header(skb);
3298 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3299 
3300 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3301 
3302 	skb_queue_tail(&conn->data_q, skb);
3303 	queue_work(hdev->workqueue, &hdev->tx_work);
3304 }
3305 
3306 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3307 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3308 {
3309 	struct hci_iso_hdr *hdr;
3310 	int len = skb->len;
3311 
3312 	skb_push(skb, HCI_ISO_HDR_SIZE);
3313 	skb_reset_transport_header(skb);
3314 	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3315 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3316 	hdr->dlen   = cpu_to_le16(len);
3317 }
3318 
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3319 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3320 			  struct sk_buff *skb)
3321 {
3322 	struct hci_dev *hdev = conn->hdev;
3323 	struct sk_buff *list;
3324 	__u16 flags;
3325 
3326 	skb->len = skb_headlen(skb);
3327 	skb->data_len = 0;
3328 
3329 	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3330 
3331 	list = skb_shinfo(skb)->frag_list;
3332 
3333 	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3334 	hci_add_iso_hdr(skb, conn->handle, flags);
3335 
3336 	if (!list) {
3337 		/* Non fragmented */
3338 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3339 
3340 		skb_queue_tail(queue, skb);
3341 	} else {
3342 		/* Fragmented */
3343 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3344 
3345 		skb_shinfo(skb)->frag_list = NULL;
3346 
3347 		__skb_queue_tail(queue, skb);
3348 
3349 		do {
3350 			skb = list; list = list->next;
3351 
3352 			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3353 			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3354 						   0x00);
3355 			hci_add_iso_hdr(skb, conn->handle, flags);
3356 
3357 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3358 
3359 			__skb_queue_tail(queue, skb);
3360 		} while (list);
3361 	}
3362 }
3363 
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3364 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3365 {
3366 	struct hci_dev *hdev = conn->hdev;
3367 
3368 	BT_DBG("%s len %d", hdev->name, skb->len);
3369 
3370 	hci_queue_iso(conn, &conn->data_q, skb);
3371 
3372 	queue_work(hdev->workqueue, &hdev->tx_work);
3373 }
3374 
3375 /* ---- HCI TX task (outgoing data) ---- */
3376 
3377 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3378 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3379 {
3380 	struct hci_dev *hdev;
3381 	int cnt, q;
3382 
3383 	if (!conn) {
3384 		*quote = 0;
3385 		return;
3386 	}
3387 
3388 	hdev = conn->hdev;
3389 
3390 	switch (conn->type) {
3391 	case ACL_LINK:
3392 		cnt = hdev->acl_cnt;
3393 		break;
3394 	case SCO_LINK:
3395 	case ESCO_LINK:
3396 		cnt = hdev->sco_cnt;
3397 		break;
3398 	case LE_LINK:
3399 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3400 		break;
3401 	case ISO_LINK:
3402 		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3403 			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3404 		break;
3405 	default:
3406 		cnt = 0;
3407 		bt_dev_err(hdev, "unknown link type %d", conn->type);
3408 	}
3409 
3410 	q = cnt / num;
3411 	*quote = q ? q : 1;
3412 }
3413 
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3414 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3415 				     int *quote)
3416 {
3417 	struct hci_conn_hash *h = &hdev->conn_hash;
3418 	struct hci_conn *conn = NULL, *c;
3419 	unsigned int num = 0, min = ~0;
3420 
3421 	/* We don't have to lock device here. Connections are always
3422 	 * added and removed with TX task disabled. */
3423 
3424 	rcu_read_lock();
3425 
3426 	list_for_each_entry_rcu(c, &h->list, list) {
3427 		if (c->type != type || skb_queue_empty(&c->data_q))
3428 			continue;
3429 
3430 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3431 			continue;
3432 
3433 		num++;
3434 
3435 		if (c->sent < min) {
3436 			min  = c->sent;
3437 			conn = c;
3438 		}
3439 
3440 		if (hci_conn_num(hdev, type) == num)
3441 			break;
3442 	}
3443 
3444 	rcu_read_unlock();
3445 
3446 	hci_quote_sent(conn, num, quote);
3447 
3448 	BT_DBG("conn %p quote %d", conn, *quote);
3449 	return conn;
3450 }
3451 
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3452 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3453 {
3454 	struct hci_conn_hash *h = &hdev->conn_hash;
3455 	struct hci_conn *c;
3456 
3457 	bt_dev_err(hdev, "link tx timeout");
3458 
3459 	rcu_read_lock();
3460 
3461 	/* Kill stalled connections */
3462 	list_for_each_entry_rcu(c, &h->list, list) {
3463 		if (c->type == type && c->sent) {
3464 			bt_dev_err(hdev, "killing stalled connection %pMR",
3465 				   &c->dst);
3466 			/* hci_disconnect might sleep, so, we have to release
3467 			 * the RCU read lock before calling it.
3468 			 */
3469 			rcu_read_unlock();
3470 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3471 			rcu_read_lock();
3472 		}
3473 	}
3474 
3475 	rcu_read_unlock();
3476 }
3477 
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3478 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3479 				      int *quote)
3480 {
3481 	struct hci_conn_hash *h = &hdev->conn_hash;
3482 	struct hci_chan *chan = NULL;
3483 	unsigned int num = 0, min = ~0, cur_prio = 0;
3484 	struct hci_conn *conn;
3485 	int conn_num = 0;
3486 
3487 	BT_DBG("%s", hdev->name);
3488 
3489 	rcu_read_lock();
3490 
3491 	list_for_each_entry_rcu(conn, &h->list, list) {
3492 		struct hci_chan *tmp;
3493 
3494 		if (conn->type != type)
3495 			continue;
3496 
3497 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3498 			continue;
3499 
3500 		conn_num++;
3501 
3502 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3503 			struct sk_buff *skb;
3504 
3505 			if (skb_queue_empty(&tmp->data_q))
3506 				continue;
3507 
3508 			skb = skb_peek(&tmp->data_q);
3509 			if (skb->priority < cur_prio)
3510 				continue;
3511 
3512 			if (skb->priority > cur_prio) {
3513 				num = 0;
3514 				min = ~0;
3515 				cur_prio = skb->priority;
3516 			}
3517 
3518 			num++;
3519 
3520 			if (conn->sent < min) {
3521 				min  = conn->sent;
3522 				chan = tmp;
3523 			}
3524 		}
3525 
3526 		if (hci_conn_num(hdev, type) == conn_num)
3527 			break;
3528 	}
3529 
3530 	rcu_read_unlock();
3531 
3532 	if (!chan)
3533 		return NULL;
3534 
3535 	hci_quote_sent(chan->conn, num, quote);
3536 
3537 	BT_DBG("chan %p quote %d", chan, *quote);
3538 	return chan;
3539 }
3540 
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3541 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3542 {
3543 	struct hci_conn_hash *h = &hdev->conn_hash;
3544 	struct hci_conn *conn;
3545 	int num = 0;
3546 
3547 	BT_DBG("%s", hdev->name);
3548 
3549 	rcu_read_lock();
3550 
3551 	list_for_each_entry_rcu(conn, &h->list, list) {
3552 		struct hci_chan *chan;
3553 
3554 		if (conn->type != type)
3555 			continue;
3556 
3557 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3558 			continue;
3559 
3560 		num++;
3561 
3562 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3563 			struct sk_buff *skb;
3564 
3565 			if (chan->sent) {
3566 				chan->sent = 0;
3567 				continue;
3568 			}
3569 
3570 			if (skb_queue_empty(&chan->data_q))
3571 				continue;
3572 
3573 			skb = skb_peek(&chan->data_q);
3574 			if (skb->priority >= HCI_PRIO_MAX - 1)
3575 				continue;
3576 
3577 			skb->priority = HCI_PRIO_MAX - 1;
3578 
3579 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3580 			       skb->priority);
3581 		}
3582 
3583 		if (hci_conn_num(hdev, type) == num)
3584 			break;
3585 	}
3586 
3587 	rcu_read_unlock();
3588 
3589 }
3590 
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3591 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3592 {
3593 	unsigned long last_tx;
3594 
3595 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3596 		return;
3597 
3598 	switch (type) {
3599 	case LE_LINK:
3600 		last_tx = hdev->le_last_tx;
3601 		break;
3602 	default:
3603 		last_tx = hdev->acl_last_tx;
3604 		break;
3605 	}
3606 
3607 	/* tx timeout must be longer than maximum link supervision timeout
3608 	 * (40.9 seconds)
3609 	 */
3610 	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3611 		hci_link_tx_to(hdev, type);
3612 }
3613 
3614 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3615 static void hci_sched_sco(struct hci_dev *hdev)
3616 {
3617 	struct hci_conn *conn;
3618 	struct sk_buff *skb;
3619 	int quote;
3620 
3621 	BT_DBG("%s", hdev->name);
3622 
3623 	if (!hci_conn_num(hdev, SCO_LINK))
3624 		return;
3625 
3626 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3627 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3628 			BT_DBG("skb %p len %d", skb, skb->len);
3629 			hci_send_frame(hdev, skb);
3630 
3631 			conn->sent++;
3632 			if (conn->sent == ~0)
3633 				conn->sent = 0;
3634 		}
3635 	}
3636 }
3637 
hci_sched_esco(struct hci_dev * hdev)3638 static void hci_sched_esco(struct hci_dev *hdev)
3639 {
3640 	struct hci_conn *conn;
3641 	struct sk_buff *skb;
3642 	int quote;
3643 
3644 	BT_DBG("%s", hdev->name);
3645 
3646 	if (!hci_conn_num(hdev, ESCO_LINK))
3647 		return;
3648 
3649 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3650 						     &quote))) {
3651 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3652 			BT_DBG("skb %p len %d", skb, skb->len);
3653 			hci_send_frame(hdev, skb);
3654 
3655 			conn->sent++;
3656 			if (conn->sent == ~0)
3657 				conn->sent = 0;
3658 		}
3659 	}
3660 }
3661 
hci_sched_acl_pkt(struct hci_dev * hdev)3662 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3663 {
3664 	unsigned int cnt = hdev->acl_cnt;
3665 	struct hci_chan *chan;
3666 	struct sk_buff *skb;
3667 	int quote;
3668 
3669 	__check_timeout(hdev, cnt, ACL_LINK);
3670 
3671 	while (hdev->acl_cnt &&
3672 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3673 		u32 priority = (skb_peek(&chan->data_q))->priority;
3674 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3675 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3676 			       skb->len, skb->priority);
3677 
3678 			/* Stop if priority has changed */
3679 			if (skb->priority < priority)
3680 				break;
3681 
3682 			skb = skb_dequeue(&chan->data_q);
3683 
3684 			hci_conn_enter_active_mode(chan->conn,
3685 						   bt_cb(skb)->force_active);
3686 
3687 			hci_send_frame(hdev, skb);
3688 			hdev->acl_last_tx = jiffies;
3689 
3690 			hdev->acl_cnt--;
3691 			chan->sent++;
3692 			chan->conn->sent++;
3693 
3694 			/* Send pending SCO packets right away */
3695 			hci_sched_sco(hdev);
3696 			hci_sched_esco(hdev);
3697 		}
3698 	}
3699 
3700 	if (cnt != hdev->acl_cnt)
3701 		hci_prio_recalculate(hdev, ACL_LINK);
3702 }
3703 
hci_sched_acl(struct hci_dev * hdev)3704 static void hci_sched_acl(struct hci_dev *hdev)
3705 {
3706 	BT_DBG("%s", hdev->name);
3707 
3708 	/* No ACL link over BR/EDR controller */
3709 	if (!hci_conn_num(hdev, ACL_LINK))
3710 		return;
3711 
3712 	hci_sched_acl_pkt(hdev);
3713 }
3714 
hci_sched_le(struct hci_dev * hdev)3715 static void hci_sched_le(struct hci_dev *hdev)
3716 {
3717 	struct hci_chan *chan;
3718 	struct sk_buff *skb;
3719 	int quote, cnt, tmp;
3720 
3721 	BT_DBG("%s", hdev->name);
3722 
3723 	if (!hci_conn_num(hdev, LE_LINK))
3724 		return;
3725 
3726 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3727 
3728 	__check_timeout(hdev, cnt, LE_LINK);
3729 
3730 	tmp = cnt;
3731 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3732 		u32 priority = (skb_peek(&chan->data_q))->priority;
3733 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3734 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3735 			       skb->len, skb->priority);
3736 
3737 			/* Stop if priority has changed */
3738 			if (skb->priority < priority)
3739 				break;
3740 
3741 			skb = skb_dequeue(&chan->data_q);
3742 
3743 			hci_send_frame(hdev, skb);
3744 			hdev->le_last_tx = jiffies;
3745 
3746 			cnt--;
3747 			chan->sent++;
3748 			chan->conn->sent++;
3749 
3750 			/* Send pending SCO packets right away */
3751 			hci_sched_sco(hdev);
3752 			hci_sched_esco(hdev);
3753 		}
3754 	}
3755 
3756 	if (hdev->le_pkts)
3757 		hdev->le_cnt = cnt;
3758 	else
3759 		hdev->acl_cnt = cnt;
3760 
3761 	if (cnt != tmp)
3762 		hci_prio_recalculate(hdev, LE_LINK);
3763 }
3764 
3765 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3766 static void hci_sched_iso(struct hci_dev *hdev)
3767 {
3768 	struct hci_conn *conn;
3769 	struct sk_buff *skb;
3770 	int quote, *cnt;
3771 
3772 	BT_DBG("%s", hdev->name);
3773 
3774 	if (!hci_conn_num(hdev, ISO_LINK))
3775 		return;
3776 
3777 	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3778 		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3779 	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3780 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3781 			BT_DBG("skb %p len %d", skb, skb->len);
3782 			hci_send_frame(hdev, skb);
3783 
3784 			conn->sent++;
3785 			if (conn->sent == ~0)
3786 				conn->sent = 0;
3787 			(*cnt)--;
3788 		}
3789 	}
3790 }
3791 
hci_tx_work(struct work_struct * work)3792 static void hci_tx_work(struct work_struct *work)
3793 {
3794 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3795 	struct sk_buff *skb;
3796 
3797 	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3798 	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3799 
3800 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3801 		/* Schedule queues and send stuff to HCI driver */
3802 		hci_sched_sco(hdev);
3803 		hci_sched_esco(hdev);
3804 		hci_sched_iso(hdev);
3805 		hci_sched_acl(hdev);
3806 		hci_sched_le(hdev);
3807 	}
3808 
3809 	/* Send next queued raw (unknown type) packet */
3810 	while ((skb = skb_dequeue(&hdev->raw_q)))
3811 		hci_send_frame(hdev, skb);
3812 }
3813 
3814 /* ----- HCI RX task (incoming data processing) ----- */
3815 
3816 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3817 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3818 {
3819 	struct hci_acl_hdr *hdr = (void *) skb->data;
3820 	struct hci_conn *conn;
3821 	__u16 handle, flags;
3822 
3823 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3824 
3825 	handle = __le16_to_cpu(hdr->handle);
3826 	flags  = hci_flags(handle);
3827 	handle = hci_handle(handle);
3828 
3829 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3830 	       handle, flags);
3831 
3832 	hdev->stat.acl_rx++;
3833 
3834 	hci_dev_lock(hdev);
3835 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3836 	hci_dev_unlock(hdev);
3837 
3838 	if (conn) {
3839 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3840 
3841 		/* Send to upper protocol */
3842 		l2cap_recv_acldata(conn, skb, flags);
3843 		return;
3844 	} else {
3845 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3846 			   handle);
3847 	}
3848 
3849 	kfree_skb(skb);
3850 }
3851 
3852 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3853 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3854 {
3855 	struct hci_sco_hdr *hdr = (void *) skb->data;
3856 	struct hci_conn *conn;
3857 	__u16 handle, flags;
3858 
3859 	skb_pull(skb, HCI_SCO_HDR_SIZE);
3860 
3861 	handle = __le16_to_cpu(hdr->handle);
3862 	flags  = hci_flags(handle);
3863 	handle = hci_handle(handle);
3864 
3865 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3866 	       handle, flags);
3867 
3868 	hdev->stat.sco_rx++;
3869 
3870 	hci_dev_lock(hdev);
3871 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3872 	hci_dev_unlock(hdev);
3873 
3874 	if (conn) {
3875 		/* Send to upper protocol */
3876 		hci_skb_pkt_status(skb) = flags & 0x03;
3877 		sco_recv_scodata(conn, skb);
3878 		return;
3879 	} else {
3880 		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3881 				       handle);
3882 	}
3883 
3884 	kfree_skb(skb);
3885 }
3886 
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3887 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3888 {
3889 	struct hci_iso_hdr *hdr;
3890 	struct hci_conn *conn;
3891 	__u16 handle, flags;
3892 
3893 	hdr = skb_pull_data(skb, sizeof(*hdr));
3894 	if (!hdr) {
3895 		bt_dev_err(hdev, "ISO packet too small");
3896 		goto drop;
3897 	}
3898 
3899 	handle = __le16_to_cpu(hdr->handle);
3900 	flags  = hci_flags(handle);
3901 	handle = hci_handle(handle);
3902 
3903 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3904 		   handle, flags);
3905 
3906 	hci_dev_lock(hdev);
3907 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3908 	hci_dev_unlock(hdev);
3909 
3910 	if (!conn) {
3911 		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3912 			   handle);
3913 		goto drop;
3914 	}
3915 
3916 	/* Send to upper protocol */
3917 	iso_recv(conn, skb, flags);
3918 	return;
3919 
3920 drop:
3921 	kfree_skb(skb);
3922 }
3923 
hci_req_is_complete(struct hci_dev * hdev)3924 static bool hci_req_is_complete(struct hci_dev *hdev)
3925 {
3926 	struct sk_buff *skb;
3927 
3928 	skb = skb_peek(&hdev->cmd_q);
3929 	if (!skb)
3930 		return true;
3931 
3932 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3933 }
3934 
hci_resend_last(struct hci_dev * hdev)3935 static void hci_resend_last(struct hci_dev *hdev)
3936 {
3937 	struct hci_command_hdr *sent;
3938 	struct sk_buff *skb;
3939 	u16 opcode;
3940 
3941 	if (!hdev->sent_cmd)
3942 		return;
3943 
3944 	sent = (void *) hdev->sent_cmd->data;
3945 	opcode = __le16_to_cpu(sent->opcode);
3946 	if (opcode == HCI_OP_RESET)
3947 		return;
3948 
3949 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3950 	if (!skb)
3951 		return;
3952 
3953 	skb_queue_head(&hdev->cmd_q, skb);
3954 	queue_work(hdev->workqueue, &hdev->cmd_work);
3955 }
3956 
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3957 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3958 			  hci_req_complete_t *req_complete,
3959 			  hci_req_complete_skb_t *req_complete_skb)
3960 {
3961 	struct sk_buff *skb;
3962 	unsigned long flags;
3963 
3964 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3965 
3966 	/* If the completed command doesn't match the last one that was
3967 	 * sent we need to do special handling of it.
3968 	 */
3969 	if (!hci_sent_cmd_data(hdev, opcode)) {
3970 		/* Some CSR based controllers generate a spontaneous
3971 		 * reset complete event during init and any pending
3972 		 * command will never be completed. In such a case we
3973 		 * need to resend whatever was the last sent
3974 		 * command.
3975 		 */
3976 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3977 			hci_resend_last(hdev);
3978 
3979 		return;
3980 	}
3981 
3982 	/* If we reach this point this event matches the last command sent */
3983 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3984 
3985 	/* If the command succeeded and there's still more commands in
3986 	 * this request the request is not yet complete.
3987 	 */
3988 	if (!status && !hci_req_is_complete(hdev))
3989 		return;
3990 
3991 	skb = hdev->req_skb;
3992 
3993 	/* If this was the last command in a request the complete
3994 	 * callback would be found in hdev->req_skb instead of the
3995 	 * command queue (hdev->cmd_q).
3996 	 */
3997 	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3998 		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3999 		return;
4000 	}
4001 
4002 	if (skb && bt_cb(skb)->hci.req_complete) {
4003 		*req_complete = bt_cb(skb)->hci.req_complete;
4004 		return;
4005 	}
4006 
4007 	/* Remove all pending commands belonging to this request */
4008 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4009 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4010 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4011 			__skb_queue_head(&hdev->cmd_q, skb);
4012 			break;
4013 		}
4014 
4015 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4016 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4017 		else
4018 			*req_complete = bt_cb(skb)->hci.req_complete;
4019 		dev_kfree_skb_irq(skb);
4020 	}
4021 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4022 }
4023 
hci_rx_work(struct work_struct * work)4024 static void hci_rx_work(struct work_struct *work)
4025 {
4026 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4027 	struct sk_buff *skb;
4028 
4029 	BT_DBG("%s", hdev->name);
4030 
4031 	/* The kcov_remote functions used for collecting packet parsing
4032 	 * coverage information from this background thread and associate
4033 	 * the coverage with the syscall's thread which originally injected
4034 	 * the packet. This helps fuzzing the kernel.
4035 	 */
4036 	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4037 		kcov_remote_start_common(skb_get_kcov_handle(skb));
4038 
4039 		/* Send copy to monitor */
4040 		hci_send_to_monitor(hdev, skb);
4041 
4042 		if (atomic_read(&hdev->promisc)) {
4043 			/* Send copy to the sockets */
4044 			hci_send_to_sock(hdev, skb);
4045 		}
4046 
4047 		/* If the device has been opened in HCI_USER_CHANNEL,
4048 		 * the userspace has exclusive access to device.
4049 		 * When device is HCI_INIT, we still need to process
4050 		 * the data packets to the driver in order
4051 		 * to complete its setup().
4052 		 */
4053 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4054 		    !test_bit(HCI_INIT, &hdev->flags)) {
4055 			kfree_skb(skb);
4056 			continue;
4057 		}
4058 
4059 		if (test_bit(HCI_INIT, &hdev->flags)) {
4060 			/* Don't process data packets in this states. */
4061 			switch (hci_skb_pkt_type(skb)) {
4062 			case HCI_ACLDATA_PKT:
4063 			case HCI_SCODATA_PKT:
4064 			case HCI_ISODATA_PKT:
4065 				kfree_skb(skb);
4066 				continue;
4067 			}
4068 		}
4069 
4070 		/* Process frame */
4071 		switch (hci_skb_pkt_type(skb)) {
4072 		case HCI_EVENT_PKT:
4073 			BT_DBG("%s Event packet", hdev->name);
4074 			hci_event_packet(hdev, skb);
4075 			break;
4076 
4077 		case HCI_ACLDATA_PKT:
4078 			BT_DBG("%s ACL data packet", hdev->name);
4079 			hci_acldata_packet(hdev, skb);
4080 			break;
4081 
4082 		case HCI_SCODATA_PKT:
4083 			BT_DBG("%s SCO data packet", hdev->name);
4084 			hci_scodata_packet(hdev, skb);
4085 			break;
4086 
4087 		case HCI_ISODATA_PKT:
4088 			BT_DBG("%s ISO data packet", hdev->name);
4089 			hci_isodata_packet(hdev, skb);
4090 			break;
4091 
4092 		default:
4093 			kfree_skb(skb);
4094 			break;
4095 		}
4096 	}
4097 }
4098 
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4099 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4100 {
4101 	int err;
4102 
4103 	bt_dev_dbg(hdev, "skb %p", skb);
4104 
4105 	kfree_skb(hdev->sent_cmd);
4106 
4107 	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4108 	if (!hdev->sent_cmd) {
4109 		skb_queue_head(&hdev->cmd_q, skb);
4110 		queue_work(hdev->workqueue, &hdev->cmd_work);
4111 		return;
4112 	}
4113 
4114 	err = hci_send_frame(hdev, skb);
4115 	if (err < 0) {
4116 		hci_cmd_sync_cancel_sync(hdev, -err);
4117 		return;
4118 	}
4119 
4120 	if (hci_req_status_pend(hdev) &&
4121 	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4122 		kfree_skb(hdev->req_skb);
4123 		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4124 	}
4125 
4126 	atomic_dec(&hdev->cmd_cnt);
4127 }
4128 
hci_cmd_work(struct work_struct * work)4129 static void hci_cmd_work(struct work_struct *work)
4130 {
4131 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4132 	struct sk_buff *skb;
4133 
4134 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4135 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4136 
4137 	/* Send queued commands */
4138 	if (atomic_read(&hdev->cmd_cnt)) {
4139 		skb = skb_dequeue(&hdev->cmd_q);
4140 		if (!skb)
4141 			return;
4142 
4143 		hci_send_cmd_sync(hdev, skb);
4144 
4145 		rcu_read_lock();
4146 		if (test_bit(HCI_RESET, &hdev->flags) ||
4147 		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4148 			cancel_delayed_work(&hdev->cmd_timer);
4149 		else
4150 			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4151 					   HCI_CMD_TIMEOUT);
4152 		rcu_read_unlock();
4153 	}
4154 }
4155