xref: /linux/net/bluetooth/hci_sync.c (revision 86fbd9f6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BlueZ - Bluetooth protocol stack for Linux
4  *
5  * Copyright (C) 2021 Intel Corporation
6  * Copyright 2023 NXP
7  */
8 
9 #include <linux/property.h>
10 
11 #include <net/bluetooth/bluetooth.h>
12 #include <net/bluetooth/hci_core.h>
13 #include <net/bluetooth/mgmt.h>
14 
15 #include "hci_request.h"
16 #include "hci_codec.h"
17 #include "hci_debugfs.h"
18 #include "smp.h"
19 #include "eir.h"
20 #include "msft.h"
21 #include "aosp.h"
22 #include "leds.h"
23 
hci_cmd_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)24 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
25 				  struct sk_buff *skb)
26 {
27 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
28 
29 	if (hdev->req_status != HCI_REQ_PEND)
30 		return;
31 
32 	hdev->req_result = result;
33 	hdev->req_status = HCI_REQ_DONE;
34 
35 	/* Free the request command so it is not used as response */
36 	kfree_skb(hdev->req_skb);
37 	hdev->req_skb = NULL;
38 
39 	if (skb) {
40 		struct sock *sk = hci_skb_sk(skb);
41 
42 		/* Drop sk reference if set */
43 		if (sk)
44 			sock_put(sk);
45 
46 		hdev->req_rsp = skb_get(skb);
47 	}
48 
49 	wake_up_interruptible(&hdev->req_wait_q);
50 }
51 
hci_cmd_sync_alloc(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,struct sock * sk)52 static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
53 					  u32 plen, const void *param,
54 					  struct sock *sk)
55 {
56 	int len = HCI_COMMAND_HDR_SIZE + plen;
57 	struct hci_command_hdr *hdr;
58 	struct sk_buff *skb;
59 
60 	skb = bt_skb_alloc(len, GFP_ATOMIC);
61 	if (!skb)
62 		return NULL;
63 
64 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
65 	hdr->opcode = cpu_to_le16(opcode);
66 	hdr->plen   = plen;
67 
68 	if (plen)
69 		skb_put_data(skb, param, plen);
70 
71 	bt_dev_dbg(hdev, "skb len %d", skb->len);
72 
73 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
74 	hci_skb_opcode(skb) = opcode;
75 
76 	/* Grab a reference if command needs to be associated with a sock (e.g.
77 	 * likely mgmt socket that initiated the command).
78 	 */
79 	if (sk) {
80 		hci_skb_sk(skb) = sk;
81 		sock_hold(sk);
82 	}
83 
84 	return skb;
85 }
86 
hci_cmd_sync_add(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event,struct sock * sk)87 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
88 			     const void *param, u8 event, struct sock *sk)
89 {
90 	struct hci_dev *hdev = req->hdev;
91 	struct sk_buff *skb;
92 
93 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
94 
95 	/* If an error occurred during request building, there is no point in
96 	 * queueing the HCI command. We can simply return.
97 	 */
98 	if (req->err)
99 		return;
100 
101 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
102 	if (!skb) {
103 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
104 			   opcode);
105 		req->err = -ENOMEM;
106 		return;
107 	}
108 
109 	if (skb_queue_empty(&req->cmd_q))
110 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
111 
112 	hci_skb_event(skb) = event;
113 
114 	skb_queue_tail(&req->cmd_q, skb);
115 }
116 
hci_cmd_sync_run(struct hci_request * req)117 static int hci_cmd_sync_run(struct hci_request *req)
118 {
119 	struct hci_dev *hdev = req->hdev;
120 	struct sk_buff *skb;
121 	unsigned long flags;
122 
123 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
124 
125 	/* If an error occurred during request building, remove all HCI
126 	 * commands queued on the HCI request queue.
127 	 */
128 	if (req->err) {
129 		skb_queue_purge(&req->cmd_q);
130 		return req->err;
131 	}
132 
133 	/* Do not allow empty requests */
134 	if (skb_queue_empty(&req->cmd_q))
135 		return -ENODATA;
136 
137 	skb = skb_peek_tail(&req->cmd_q);
138 	bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
139 	bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
140 
141 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
142 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
143 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
144 
145 	queue_work(hdev->workqueue, &hdev->cmd_work);
146 
147 	return 0;
148 }
149 
150 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)151 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
152 				  const void *param, u8 event, u32 timeout,
153 				  struct sock *sk)
154 {
155 	struct hci_request req;
156 	struct sk_buff *skb;
157 	int err = 0;
158 
159 	bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
160 
161 	hci_req_init(&req, hdev);
162 
163 	hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
164 
165 	hdev->req_status = HCI_REQ_PEND;
166 
167 	err = hci_cmd_sync_run(&req);
168 	if (err < 0)
169 		return ERR_PTR(err);
170 
171 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
172 					       hdev->req_status != HCI_REQ_PEND,
173 					       timeout);
174 
175 	if (err == -ERESTARTSYS)
176 		return ERR_PTR(-EINTR);
177 
178 	switch (hdev->req_status) {
179 	case HCI_REQ_DONE:
180 		err = -bt_to_errno(hdev->req_result);
181 		break;
182 
183 	case HCI_REQ_CANCELED:
184 		err = -hdev->req_result;
185 		break;
186 
187 	default:
188 		err = -ETIMEDOUT;
189 		break;
190 	}
191 
192 	hdev->req_status = 0;
193 	hdev->req_result = 0;
194 	skb = hdev->req_rsp;
195 	hdev->req_rsp = NULL;
196 
197 	bt_dev_dbg(hdev, "end: err %d", err);
198 
199 	if (err < 0) {
200 		kfree_skb(skb);
201 		return ERR_PTR(err);
202 	}
203 
204 	return skb;
205 }
206 EXPORT_SYMBOL(__hci_cmd_sync_sk);
207 
208 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)209 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
210 			       const void *param, u32 timeout)
211 {
212 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
213 }
214 EXPORT_SYMBOL(__hci_cmd_sync);
215 
216 /* Send HCI command and wait for command complete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)217 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
218 			     const void *param, u32 timeout)
219 {
220 	struct sk_buff *skb;
221 
222 	if (!test_bit(HCI_UP, &hdev->flags))
223 		return ERR_PTR(-ENETDOWN);
224 
225 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
226 
227 	hci_req_sync_lock(hdev);
228 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
229 	hci_req_sync_unlock(hdev);
230 
231 	return skb;
232 }
233 EXPORT_SYMBOL(hci_cmd_sync);
234 
235 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)236 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
237 				  const void *param, u8 event, u32 timeout)
238 {
239 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
240 				 NULL);
241 }
242 EXPORT_SYMBOL(__hci_cmd_sync_ev);
243 
244 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_status_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)245 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
246 			     const void *param, u8 event, u32 timeout,
247 			     struct sock *sk)
248 {
249 	struct sk_buff *skb;
250 	u8 status;
251 
252 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
253 	if (IS_ERR(skb)) {
254 		if (!event)
255 			bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
256 				   PTR_ERR(skb));
257 		return PTR_ERR(skb);
258 	}
259 
260 	/* If command return a status event skb will be set to NULL as there are
261 	 * no parameters, in case of failure IS_ERR(skb) would have be set to
262 	 * the actual error would be found with PTR_ERR(skb).
263 	 */
264 	if (!skb)
265 		return 0;
266 
267 	status = skb->data[0];
268 
269 	kfree_skb(skb);
270 
271 	return status;
272 }
273 EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
274 
__hci_cmd_sync_status(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)275 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
276 			  const void *param, u32 timeout)
277 {
278 	return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
279 					NULL);
280 }
281 EXPORT_SYMBOL(__hci_cmd_sync_status);
282 
hci_cmd_sync_work(struct work_struct * work)283 static void hci_cmd_sync_work(struct work_struct *work)
284 {
285 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
286 
287 	bt_dev_dbg(hdev, "");
288 
289 	/* Dequeue all entries and run them */
290 	while (1) {
291 		struct hci_cmd_sync_work_entry *entry;
292 
293 		mutex_lock(&hdev->cmd_sync_work_lock);
294 		entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
295 						 struct hci_cmd_sync_work_entry,
296 						 list);
297 		if (entry)
298 			list_del(&entry->list);
299 		mutex_unlock(&hdev->cmd_sync_work_lock);
300 
301 		if (!entry)
302 			break;
303 
304 		bt_dev_dbg(hdev, "entry %p", entry);
305 
306 		if (entry->func) {
307 			int err;
308 
309 			hci_req_sync_lock(hdev);
310 			err = entry->func(hdev, entry->data);
311 			if (entry->destroy)
312 				entry->destroy(hdev, entry->data, err);
313 			hci_req_sync_unlock(hdev);
314 		}
315 
316 		kfree(entry);
317 	}
318 }
319 
hci_cmd_sync_cancel_work(struct work_struct * work)320 static void hci_cmd_sync_cancel_work(struct work_struct *work)
321 {
322 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
323 
324 	cancel_delayed_work_sync(&hdev->cmd_timer);
325 	cancel_delayed_work_sync(&hdev->ncmd_timer);
326 	atomic_set(&hdev->cmd_cnt, 1);
327 
328 	wake_up_interruptible(&hdev->req_wait_q);
329 }
330 
331 static int hci_scan_disable_sync(struct hci_dev *hdev);
scan_disable_sync(struct hci_dev * hdev,void * data)332 static int scan_disable_sync(struct hci_dev *hdev, void *data)
333 {
334 	return hci_scan_disable_sync(hdev);
335 }
336 
337 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length);
interleaved_inquiry_sync(struct hci_dev * hdev,void * data)338 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
339 {
340 	return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN);
341 }
342 
le_scan_disable(struct work_struct * work)343 static void le_scan_disable(struct work_struct *work)
344 {
345 	struct hci_dev *hdev = container_of(work, struct hci_dev,
346 					    le_scan_disable.work);
347 	int status;
348 
349 	bt_dev_dbg(hdev, "");
350 	hci_dev_lock(hdev);
351 
352 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
353 		goto _return;
354 
355 	status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
356 	if (status) {
357 		bt_dev_err(hdev, "failed to disable LE scan: %d", status);
358 		goto _return;
359 	}
360 
361 	hdev->discovery.scan_start = 0;
362 
363 	/* If we were running LE only scan, change discovery state. If
364 	 * we were running both LE and BR/EDR inquiry simultaneously,
365 	 * and BR/EDR inquiry is already finished, stop discovery,
366 	 * otherwise BR/EDR inquiry will stop discovery when finished.
367 	 * If we will resolve remote device name, do not change
368 	 * discovery state.
369 	 */
370 
371 	if (hdev->discovery.type == DISCOV_TYPE_LE)
372 		goto discov_stopped;
373 
374 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
375 		goto _return;
376 
377 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
378 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
379 		    hdev->discovery.state != DISCOVERY_RESOLVING)
380 			goto discov_stopped;
381 
382 		goto _return;
383 	}
384 
385 	status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
386 	if (status) {
387 		bt_dev_err(hdev, "inquiry failed: status %d", status);
388 		goto discov_stopped;
389 	}
390 
391 	goto _return;
392 
393 discov_stopped:
394 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
395 
396 _return:
397 	hci_dev_unlock(hdev);
398 }
399 
400 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
401 				       u8 filter_dup);
402 
reenable_adv_sync(struct hci_dev * hdev,void * data)403 static int reenable_adv_sync(struct hci_dev *hdev, void *data)
404 {
405 	bt_dev_dbg(hdev, "");
406 
407 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
408 	    list_empty(&hdev->adv_instances))
409 		return 0;
410 
411 	if (hdev->cur_adv_instance) {
412 		return hci_schedule_adv_instance_sync(hdev,
413 						      hdev->cur_adv_instance,
414 						      true);
415 	} else {
416 		if (ext_adv_capable(hdev)) {
417 			hci_start_ext_adv_sync(hdev, 0x00);
418 		} else {
419 			hci_update_adv_data_sync(hdev, 0x00);
420 			hci_update_scan_rsp_data_sync(hdev, 0x00);
421 			hci_enable_advertising_sync(hdev);
422 		}
423 	}
424 
425 	return 0;
426 }
427 
reenable_adv(struct work_struct * work)428 static void reenable_adv(struct work_struct *work)
429 {
430 	struct hci_dev *hdev = container_of(work, struct hci_dev,
431 					    reenable_adv_work);
432 	int status;
433 
434 	bt_dev_dbg(hdev, "");
435 
436 	hci_dev_lock(hdev);
437 
438 	status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
439 	if (status)
440 		bt_dev_err(hdev, "failed to reenable ADV: %d", status);
441 
442 	hci_dev_unlock(hdev);
443 }
444 
cancel_adv_timeout(struct hci_dev * hdev)445 static void cancel_adv_timeout(struct hci_dev *hdev)
446 {
447 	if (hdev->adv_instance_timeout) {
448 		hdev->adv_instance_timeout = 0;
449 		cancel_delayed_work(&hdev->adv_instance_expire);
450 	}
451 }
452 
453 /* For a single instance:
454  * - force == true: The instance will be removed even when its remaining
455  *   lifetime is not zero.
456  * - force == false: the instance will be deactivated but kept stored unless
457  *   the remaining lifetime is zero.
458  *
459  * For instance == 0x00:
460  * - force == true: All instances will be removed regardless of their timeout
461  *   setting.
462  * - force == false: Only instances that have a timeout will be removed.
463  */
hci_clear_adv_instance_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)464 int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
465 				u8 instance, bool force)
466 {
467 	struct adv_info *adv_instance, *n, *next_instance = NULL;
468 	int err;
469 	u8 rem_inst;
470 
471 	/* Cancel any timeout concerning the removed instance(s). */
472 	if (!instance || hdev->cur_adv_instance == instance)
473 		cancel_adv_timeout(hdev);
474 
475 	/* Get the next instance to advertise BEFORE we remove
476 	 * the current one. This can be the same instance again
477 	 * if there is only one instance.
478 	 */
479 	if (instance && hdev->cur_adv_instance == instance)
480 		next_instance = hci_get_next_instance(hdev, instance);
481 
482 	if (instance == 0x00) {
483 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
484 					 list) {
485 			if (!(force || adv_instance->timeout))
486 				continue;
487 
488 			rem_inst = adv_instance->instance;
489 			err = hci_remove_adv_instance(hdev, rem_inst);
490 			if (!err)
491 				mgmt_advertising_removed(sk, hdev, rem_inst);
492 		}
493 	} else {
494 		adv_instance = hci_find_adv_instance(hdev, instance);
495 
496 		if (force || (adv_instance && adv_instance->timeout &&
497 			      !adv_instance->remaining_time)) {
498 			/* Don't advertise a removed instance. */
499 			if (next_instance &&
500 			    next_instance->instance == instance)
501 				next_instance = NULL;
502 
503 			err = hci_remove_adv_instance(hdev, instance);
504 			if (!err)
505 				mgmt_advertising_removed(sk, hdev, instance);
506 		}
507 	}
508 
509 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
510 		return 0;
511 
512 	if (next_instance && !ext_adv_capable(hdev))
513 		return hci_schedule_adv_instance_sync(hdev,
514 						      next_instance->instance,
515 						      false);
516 
517 	return 0;
518 }
519 
adv_timeout_expire_sync(struct hci_dev * hdev,void * data)520 static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
521 {
522 	u8 instance = *(u8 *)data;
523 
524 	kfree(data);
525 
526 	hci_clear_adv_instance_sync(hdev, NULL, instance, false);
527 
528 	if (list_empty(&hdev->adv_instances))
529 		return hci_disable_advertising_sync(hdev);
530 
531 	return 0;
532 }
533 
adv_timeout_expire(struct work_struct * work)534 static void adv_timeout_expire(struct work_struct *work)
535 {
536 	u8 *inst_ptr;
537 	struct hci_dev *hdev = container_of(work, struct hci_dev,
538 					    adv_instance_expire.work);
539 
540 	bt_dev_dbg(hdev, "");
541 
542 	hci_dev_lock(hdev);
543 
544 	hdev->adv_instance_timeout = 0;
545 
546 	if (hdev->cur_adv_instance == 0x00)
547 		goto unlock;
548 
549 	inst_ptr = kmalloc(1, GFP_KERNEL);
550 	if (!inst_ptr)
551 		goto unlock;
552 
553 	*inst_ptr = hdev->cur_adv_instance;
554 	hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
555 
556 unlock:
557 	hci_dev_unlock(hdev);
558 }
559 
hci_cmd_sync_init(struct hci_dev * hdev)560 void hci_cmd_sync_init(struct hci_dev *hdev)
561 {
562 	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
563 	INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
564 	mutex_init(&hdev->cmd_sync_work_lock);
565 	mutex_init(&hdev->unregister_lock);
566 
567 	INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
568 	INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
569 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
570 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
571 }
572 
_hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry,int err)573 static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
574 				       struct hci_cmd_sync_work_entry *entry,
575 				       int err)
576 {
577 	if (entry->destroy)
578 		entry->destroy(hdev, entry->data, err);
579 
580 	list_del(&entry->list);
581 	kfree(entry);
582 }
583 
hci_cmd_sync_clear(struct hci_dev * hdev)584 void hci_cmd_sync_clear(struct hci_dev *hdev)
585 {
586 	struct hci_cmd_sync_work_entry *entry, *tmp;
587 
588 	cancel_work_sync(&hdev->cmd_sync_work);
589 	cancel_work_sync(&hdev->reenable_adv_work);
590 
591 	mutex_lock(&hdev->cmd_sync_work_lock);
592 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
593 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
594 	mutex_unlock(&hdev->cmd_sync_work_lock);
595 }
596 
hci_cmd_sync_cancel(struct hci_dev * hdev,int err)597 void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
598 {
599 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
600 
601 	if (hdev->req_status == HCI_REQ_PEND) {
602 		hdev->req_result = err;
603 		hdev->req_status = HCI_REQ_CANCELED;
604 
605 		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
606 	}
607 }
608 EXPORT_SYMBOL(hci_cmd_sync_cancel);
609 
610 /* Cancel ongoing command request synchronously:
611  *
612  * - Set result and mark status to HCI_REQ_CANCELED
613  * - Wakeup command sync thread
614  */
hci_cmd_sync_cancel_sync(struct hci_dev * hdev,int err)615 void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
616 {
617 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
618 
619 	if (hdev->req_status == HCI_REQ_PEND) {
620 		/* req_result is __u32 so error must be positive to be properly
621 		 * propagated.
622 		 */
623 		hdev->req_result = err < 0 ? -err : err;
624 		hdev->req_status = HCI_REQ_CANCELED;
625 
626 		wake_up_interruptible(&hdev->req_wait_q);
627 	}
628 }
629 EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
630 
631 /* Submit HCI command to be run in as cmd_sync_work:
632  *
633  * - hdev must _not_ be unregistered
634  */
hci_cmd_sync_submit(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)635 int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
636 			void *data, hci_cmd_sync_work_destroy_t destroy)
637 {
638 	struct hci_cmd_sync_work_entry *entry;
639 	int err = 0;
640 
641 	mutex_lock(&hdev->unregister_lock);
642 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
643 		err = -ENODEV;
644 		goto unlock;
645 	}
646 
647 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
648 	if (!entry) {
649 		err = -ENOMEM;
650 		goto unlock;
651 	}
652 	entry->func = func;
653 	entry->data = data;
654 	entry->destroy = destroy;
655 
656 	mutex_lock(&hdev->cmd_sync_work_lock);
657 	list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
658 	mutex_unlock(&hdev->cmd_sync_work_lock);
659 
660 	queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
661 
662 unlock:
663 	mutex_unlock(&hdev->unregister_lock);
664 	return err;
665 }
666 EXPORT_SYMBOL(hci_cmd_sync_submit);
667 
668 /* Queue HCI command:
669  *
670  * - hdev must be running
671  */
hci_cmd_sync_queue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)672 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
673 		       void *data, hci_cmd_sync_work_destroy_t destroy)
674 {
675 	/* Only queue command if hdev is running which means it had been opened
676 	 * and is either on init phase or is already up.
677 	 */
678 	if (!test_bit(HCI_RUNNING, &hdev->flags))
679 		return -ENETDOWN;
680 
681 	return hci_cmd_sync_submit(hdev, func, data, destroy);
682 }
683 EXPORT_SYMBOL(hci_cmd_sync_queue);
684 
685 static struct hci_cmd_sync_work_entry *
_hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)686 _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
687 			   void *data, hci_cmd_sync_work_destroy_t destroy)
688 {
689 	struct hci_cmd_sync_work_entry *entry, *tmp;
690 
691 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
692 		if (func && entry->func != func)
693 			continue;
694 
695 		if (data && entry->data != data)
696 			continue;
697 
698 		if (destroy && entry->destroy != destroy)
699 			continue;
700 
701 		return entry;
702 	}
703 
704 	return NULL;
705 }
706 
707 /* Queue HCI command entry once:
708  *
709  * - Lookup if an entry already exist and only if it doesn't creates a new entry
710  *   and queue it.
711  */
hci_cmd_sync_queue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)712 int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
713 			    void *data, hci_cmd_sync_work_destroy_t destroy)
714 {
715 	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
716 		return 0;
717 
718 	return hci_cmd_sync_queue(hdev, func, data, destroy);
719 }
720 EXPORT_SYMBOL(hci_cmd_sync_queue_once);
721 
722 /* Lookup HCI command entry:
723  *
724  * - Return first entry that matches by function callback or data or
725  *   destroy callback.
726  */
727 struct hci_cmd_sync_work_entry *
hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)728 hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
729 			  void *data, hci_cmd_sync_work_destroy_t destroy)
730 {
731 	struct hci_cmd_sync_work_entry *entry;
732 
733 	mutex_lock(&hdev->cmd_sync_work_lock);
734 	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
735 	mutex_unlock(&hdev->cmd_sync_work_lock);
736 
737 	return entry;
738 }
739 EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
740 
741 /* Cancel HCI command entry */
hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry)742 void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
743 			       struct hci_cmd_sync_work_entry *entry)
744 {
745 	mutex_lock(&hdev->cmd_sync_work_lock);
746 	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
747 	mutex_unlock(&hdev->cmd_sync_work_lock);
748 }
749 EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
750 
751 /* Dequeue one HCI command entry:
752  *
753  * - Lookup and cancel first entry that matches.
754  */
hci_cmd_sync_dequeue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)755 bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
756 			       hci_cmd_sync_work_func_t func,
757 			       void *data, hci_cmd_sync_work_destroy_t destroy)
758 {
759 	struct hci_cmd_sync_work_entry *entry;
760 
761 	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
762 	if (!entry)
763 		return false;
764 
765 	hci_cmd_sync_cancel_entry(hdev, entry);
766 
767 	return true;
768 }
769 EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
770 
771 /* Dequeue HCI command entry:
772  *
773  * - Lookup and cancel any entry that matches by function callback or data or
774  *   destroy callback.
775  */
hci_cmd_sync_dequeue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)776 bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
777 			  void *data, hci_cmd_sync_work_destroy_t destroy)
778 {
779 	struct hci_cmd_sync_work_entry *entry;
780 	bool ret = false;
781 
782 	mutex_lock(&hdev->cmd_sync_work_lock);
783 	while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
784 						   destroy))) {
785 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
786 		ret = true;
787 	}
788 	mutex_unlock(&hdev->cmd_sync_work_lock);
789 
790 	return ret;
791 }
792 EXPORT_SYMBOL(hci_cmd_sync_dequeue);
793 
hci_update_eir_sync(struct hci_dev * hdev)794 int hci_update_eir_sync(struct hci_dev *hdev)
795 {
796 	struct hci_cp_write_eir cp;
797 
798 	bt_dev_dbg(hdev, "");
799 
800 	if (!hdev_is_powered(hdev))
801 		return 0;
802 
803 	if (!lmp_ext_inq_capable(hdev))
804 		return 0;
805 
806 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
807 		return 0;
808 
809 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
810 		return 0;
811 
812 	memset(&cp, 0, sizeof(cp));
813 
814 	eir_create(hdev, cp.data);
815 
816 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
817 		return 0;
818 
819 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
820 
821 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
822 				     HCI_CMD_TIMEOUT);
823 }
824 
get_service_classes(struct hci_dev * hdev)825 static u8 get_service_classes(struct hci_dev *hdev)
826 {
827 	struct bt_uuid *uuid;
828 	u8 val = 0;
829 
830 	list_for_each_entry(uuid, &hdev->uuids, list)
831 		val |= uuid->svc_hint;
832 
833 	return val;
834 }
835 
hci_update_class_sync(struct hci_dev * hdev)836 int hci_update_class_sync(struct hci_dev *hdev)
837 {
838 	u8 cod[3];
839 
840 	bt_dev_dbg(hdev, "");
841 
842 	if (!hdev_is_powered(hdev))
843 		return 0;
844 
845 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
846 		return 0;
847 
848 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
849 		return 0;
850 
851 	cod[0] = hdev->minor_class;
852 	cod[1] = hdev->major_class;
853 	cod[2] = get_service_classes(hdev);
854 
855 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
856 		cod[1] |= 0x20;
857 
858 	if (memcmp(cod, hdev->dev_class, 3) == 0)
859 		return 0;
860 
861 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
862 				     sizeof(cod), cod, HCI_CMD_TIMEOUT);
863 }
864 
is_advertising_allowed(struct hci_dev * hdev,bool connectable)865 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
866 {
867 	/* If there is no connection we are OK to advertise. */
868 	if (hci_conn_num(hdev, LE_LINK) == 0)
869 		return true;
870 
871 	/* Check le_states if there is any connection in peripheral role. */
872 	if (hdev->conn_hash.le_num_peripheral > 0) {
873 		/* Peripheral connection state and non connectable mode
874 		 * bit 20.
875 		 */
876 		if (!connectable && !(hdev->le_states[2] & 0x10))
877 			return false;
878 
879 		/* Peripheral connection state and connectable mode bit 38
880 		 * and scannable bit 21.
881 		 */
882 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
883 				    !(hdev->le_states[2] & 0x20)))
884 			return false;
885 	}
886 
887 	/* Check le_states if there is any connection in central role. */
888 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
889 		/* Central connection state and non connectable mode bit 18. */
890 		if (!connectable && !(hdev->le_states[2] & 0x02))
891 			return false;
892 
893 		/* Central connection state and connectable mode bit 35 and
894 		 * scannable 19.
895 		 */
896 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
897 				    !(hdev->le_states[2] & 0x08)))
898 			return false;
899 	}
900 
901 	return true;
902 }
903 
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)904 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
905 {
906 	/* If privacy is not enabled don't use RPA */
907 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
908 		return false;
909 
910 	/* If basic privacy mode is enabled use RPA */
911 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
912 		return true;
913 
914 	/* If limited privacy mode is enabled don't use RPA if we're
915 	 * both discoverable and bondable.
916 	 */
917 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
918 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
919 		return false;
920 
921 	/* We're neither bondable nor discoverable in the limited
922 	 * privacy mode, therefore use RPA.
923 	 */
924 	return true;
925 }
926 
hci_set_random_addr_sync(struct hci_dev * hdev,bdaddr_t * rpa)927 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
928 {
929 	/* If we're advertising or initiating an LE connection we can't
930 	 * go ahead and change the random address at this time. This is
931 	 * because the eventual initiator address used for the
932 	 * subsequently created connection will be undefined (some
933 	 * controllers use the new address and others the one we had
934 	 * when the operation started).
935 	 *
936 	 * In this kind of scenario skip the update and let the random
937 	 * address be updated at the next cycle.
938 	 */
939 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
940 	    hci_lookup_le_connect(hdev)) {
941 		bt_dev_dbg(hdev, "Deferring random address update");
942 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
943 		return 0;
944 	}
945 
946 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
947 				     6, rpa, HCI_CMD_TIMEOUT);
948 }
949 
hci_update_random_address_sync(struct hci_dev * hdev,bool require_privacy,bool rpa,u8 * own_addr_type)950 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
951 				   bool rpa, u8 *own_addr_type)
952 {
953 	int err;
954 
955 	/* If privacy is enabled use a resolvable private address. If
956 	 * current RPA has expired or there is something else than
957 	 * the current RPA in use, then generate a new one.
958 	 */
959 	if (rpa) {
960 		/* If Controller supports LL Privacy use own address type is
961 		 * 0x03
962 		 */
963 		if (use_ll_privacy(hdev))
964 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
965 		else
966 			*own_addr_type = ADDR_LE_DEV_RANDOM;
967 
968 		/* Check if RPA is valid */
969 		if (rpa_valid(hdev))
970 			return 0;
971 
972 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
973 		if (err < 0) {
974 			bt_dev_err(hdev, "failed to generate new RPA");
975 			return err;
976 		}
977 
978 		err = hci_set_random_addr_sync(hdev, &hdev->rpa);
979 		if (err)
980 			return err;
981 
982 		return 0;
983 	}
984 
985 	/* In case of required privacy without resolvable private address,
986 	 * use an non-resolvable private address. This is useful for active
987 	 * scanning and non-connectable advertising.
988 	 */
989 	if (require_privacy) {
990 		bdaddr_t nrpa;
991 
992 		while (true) {
993 			/* The non-resolvable private address is generated
994 			 * from random six bytes with the two most significant
995 			 * bits cleared.
996 			 */
997 			get_random_bytes(&nrpa, 6);
998 			nrpa.b[5] &= 0x3f;
999 
1000 			/* The non-resolvable private address shall not be
1001 			 * equal to the public address.
1002 			 */
1003 			if (bacmp(&hdev->bdaddr, &nrpa))
1004 				break;
1005 		}
1006 
1007 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1008 
1009 		return hci_set_random_addr_sync(hdev, &nrpa);
1010 	}
1011 
1012 	/* If forcing static address is in use or there is no public
1013 	 * address use the static address as random address (but skip
1014 	 * the HCI command if the current random address is already the
1015 	 * static one.
1016 	 *
1017 	 * In case BR/EDR has been disabled on a dual-mode controller
1018 	 * and a static address has been configured, then use that
1019 	 * address instead of the public BR/EDR address.
1020 	 */
1021 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1022 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1023 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1024 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1025 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1026 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1027 			return hci_set_random_addr_sync(hdev,
1028 							&hdev->static_addr);
1029 		return 0;
1030 	}
1031 
1032 	/* Neither privacy nor static address is being used so use a
1033 	 * public address.
1034 	 */
1035 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1036 
1037 	return 0;
1038 }
1039 
hci_disable_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1040 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1041 {
1042 	struct hci_cp_le_set_ext_adv_enable *cp;
1043 	struct hci_cp_ext_adv_set *set;
1044 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1045 	u8 size;
1046 	struct adv_info *adv = NULL;
1047 
1048 	/* If request specifies an instance that doesn't exist, fail */
1049 	if (instance > 0) {
1050 		adv = hci_find_adv_instance(hdev, instance);
1051 		if (!adv)
1052 			return -EINVAL;
1053 
1054 		/* If not enabled there is nothing to do */
1055 		if (!adv->enabled)
1056 			return 0;
1057 	}
1058 
1059 	memset(data, 0, sizeof(data));
1060 
1061 	cp = (void *)data;
1062 	set = (void *)cp->data;
1063 
1064 	/* Instance 0x00 indicates all advertising instances will be disabled */
1065 	cp->num_of_sets = !!instance;
1066 	cp->enable = 0x00;
1067 
1068 	set->handle = adv ? adv->handle : instance;
1069 
1070 	size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1071 
1072 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1073 				     size, data, HCI_CMD_TIMEOUT);
1074 }
1075 
hci_set_adv_set_random_addr_sync(struct hci_dev * hdev,u8 instance,bdaddr_t * random_addr)1076 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1077 					    bdaddr_t *random_addr)
1078 {
1079 	struct hci_cp_le_set_adv_set_rand_addr cp;
1080 	int err;
1081 
1082 	if (!instance) {
1083 		/* Instance 0x00 doesn't have an adv_info, instead it uses
1084 		 * hdev->random_addr to track its address so whenever it needs
1085 		 * to be updated this also set the random address since
1086 		 * hdev->random_addr is shared with scan state machine.
1087 		 */
1088 		err = hci_set_random_addr_sync(hdev, random_addr);
1089 		if (err)
1090 			return err;
1091 	}
1092 
1093 	memset(&cp, 0, sizeof(cp));
1094 
1095 	cp.handle = instance;
1096 	bacpy(&cp.bdaddr, random_addr);
1097 
1098 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1099 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1100 }
1101 
hci_setup_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1102 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1103 {
1104 	struct hci_cp_le_set_ext_adv_params cp;
1105 	bool connectable;
1106 	u32 flags;
1107 	bdaddr_t random_addr;
1108 	u8 own_addr_type;
1109 	int err;
1110 	struct adv_info *adv;
1111 	bool secondary_adv;
1112 
1113 	if (instance > 0) {
1114 		adv = hci_find_adv_instance(hdev, instance);
1115 		if (!adv)
1116 			return -EINVAL;
1117 	} else {
1118 		adv = NULL;
1119 	}
1120 
1121 	/* Updating parameters of an active instance will return a
1122 	 * Command Disallowed error, so we must first disable the
1123 	 * instance if it is active.
1124 	 */
1125 	if (adv && !adv->pending) {
1126 		err = hci_disable_ext_adv_instance_sync(hdev, instance);
1127 		if (err)
1128 			return err;
1129 	}
1130 
1131 	flags = hci_adv_instance_flags(hdev, instance);
1132 
1133 	/* If the "connectable" instance flag was not set, then choose between
1134 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1135 	 */
1136 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1137 		      mgmt_get_connectable(hdev);
1138 
1139 	if (!is_advertising_allowed(hdev, connectable))
1140 		return -EPERM;
1141 
1142 	/* Set require_privacy to true only when non-connectable
1143 	 * advertising is used. In that case it is fine to use a
1144 	 * non-resolvable private address.
1145 	 */
1146 	err = hci_get_random_address(hdev, !connectable,
1147 				     adv_use_rpa(hdev, flags), adv,
1148 				     &own_addr_type, &random_addr);
1149 	if (err < 0)
1150 		return err;
1151 
1152 	memset(&cp, 0, sizeof(cp));
1153 
1154 	if (adv) {
1155 		hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1156 		hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1157 		cp.tx_power = adv->tx_power;
1158 	} else {
1159 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1160 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1161 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1162 	}
1163 
1164 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1165 
1166 	if (connectable) {
1167 		if (secondary_adv)
1168 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1169 		else
1170 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1171 	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1172 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1173 		if (secondary_adv)
1174 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1175 		else
1176 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1177 	} else {
1178 		if (secondary_adv)
1179 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1180 		else
1181 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1182 	}
1183 
1184 	/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1185 	 * contains the peer’s Identity Address and the Peer_Address_Type
1186 	 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1187 	 * These parameters are used to locate the corresponding local IRK in
1188 	 * the resolving list; this IRK is used to generate their own address
1189 	 * used in the advertisement.
1190 	 */
1191 	if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1192 		hci_copy_identity_address(hdev, &cp.peer_addr,
1193 					  &cp.peer_addr_type);
1194 
1195 	cp.own_addr_type = own_addr_type;
1196 	cp.channel_map = hdev->le_adv_channel_map;
1197 	cp.handle = adv ? adv->handle : instance;
1198 
1199 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1200 		cp.primary_phy = HCI_ADV_PHY_1M;
1201 		cp.secondary_phy = HCI_ADV_PHY_2M;
1202 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1203 		cp.primary_phy = HCI_ADV_PHY_CODED;
1204 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1205 	} else {
1206 		/* In all other cases use 1M */
1207 		cp.primary_phy = HCI_ADV_PHY_1M;
1208 		cp.secondary_phy = HCI_ADV_PHY_1M;
1209 	}
1210 
1211 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
1212 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1213 	if (err)
1214 		return err;
1215 
1216 	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1217 	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1218 	    bacmp(&random_addr, BDADDR_ANY)) {
1219 		/* Check if random address need to be updated */
1220 		if (adv) {
1221 			if (!bacmp(&random_addr, &adv->random_addr))
1222 				return 0;
1223 		} else {
1224 			if (!bacmp(&random_addr, &hdev->random_addr))
1225 				return 0;
1226 		}
1227 
1228 		return hci_set_adv_set_random_addr_sync(hdev, instance,
1229 							&random_addr);
1230 	}
1231 
1232 	return 0;
1233 }
1234 
hci_set_ext_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1235 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1236 {
1237 	DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
1238 		    HCI_MAX_EXT_AD_LENGTH);
1239 	u8 len;
1240 	struct adv_info *adv = NULL;
1241 	int err;
1242 
1243 	if (instance) {
1244 		adv = hci_find_adv_instance(hdev, instance);
1245 		if (!adv || !adv->scan_rsp_changed)
1246 			return 0;
1247 	}
1248 
1249 	len = eir_create_scan_rsp(hdev, instance, pdu->data);
1250 
1251 	pdu->handle = adv ? adv->handle : instance;
1252 	pdu->length = len;
1253 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1254 	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1255 
1256 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1257 				    struct_size(pdu, data, len), pdu,
1258 				    HCI_CMD_TIMEOUT);
1259 	if (err)
1260 		return err;
1261 
1262 	if (adv) {
1263 		adv->scan_rsp_changed = false;
1264 	} else {
1265 		memcpy(hdev->scan_rsp_data, pdu->data, len);
1266 		hdev->scan_rsp_data_len = len;
1267 	}
1268 
1269 	return 0;
1270 }
1271 
__hci_set_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1272 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1273 {
1274 	struct hci_cp_le_set_scan_rsp_data cp;
1275 	u8 len;
1276 
1277 	memset(&cp, 0, sizeof(cp));
1278 
1279 	len = eir_create_scan_rsp(hdev, instance, cp.data);
1280 
1281 	if (hdev->scan_rsp_data_len == len &&
1282 	    !memcmp(cp.data, hdev->scan_rsp_data, len))
1283 		return 0;
1284 
1285 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1286 	hdev->scan_rsp_data_len = len;
1287 
1288 	cp.length = len;
1289 
1290 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1291 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1292 }
1293 
hci_update_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1294 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1295 {
1296 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1297 		return 0;
1298 
1299 	if (ext_adv_capable(hdev))
1300 		return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1301 
1302 	return __hci_set_scan_rsp_data_sync(hdev, instance);
1303 }
1304 
hci_enable_ext_advertising_sync(struct hci_dev * hdev,u8 instance)1305 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1306 {
1307 	struct hci_cp_le_set_ext_adv_enable *cp;
1308 	struct hci_cp_ext_adv_set *set;
1309 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1310 	struct adv_info *adv;
1311 
1312 	if (instance > 0) {
1313 		adv = hci_find_adv_instance(hdev, instance);
1314 		if (!adv)
1315 			return -EINVAL;
1316 		/* If already enabled there is nothing to do */
1317 		if (adv->enabled)
1318 			return 0;
1319 	} else {
1320 		adv = NULL;
1321 	}
1322 
1323 	cp = (void *)data;
1324 	set = (void *)cp->data;
1325 
1326 	memset(cp, 0, sizeof(*cp));
1327 
1328 	cp->enable = 0x01;
1329 	cp->num_of_sets = 0x01;
1330 
1331 	memset(set, 0, sizeof(*set));
1332 
1333 	set->handle = adv ? adv->handle : instance;
1334 
1335 	/* Set duration per instance since controller is responsible for
1336 	 * scheduling it.
1337 	 */
1338 	if (adv && adv->timeout) {
1339 		u16 duration = adv->timeout * MSEC_PER_SEC;
1340 
1341 		/* Time = N * 10 ms */
1342 		set->duration = cpu_to_le16(duration / 10);
1343 	}
1344 
1345 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1346 				     sizeof(*cp) +
1347 				     sizeof(*set) * cp->num_of_sets,
1348 				     data, HCI_CMD_TIMEOUT);
1349 }
1350 
hci_start_ext_adv_sync(struct hci_dev * hdev,u8 instance)1351 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1352 {
1353 	int err;
1354 
1355 	err = hci_setup_ext_adv_instance_sync(hdev, instance);
1356 	if (err)
1357 		return err;
1358 
1359 	err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1360 	if (err)
1361 		return err;
1362 
1363 	return hci_enable_ext_advertising_sync(hdev, instance);
1364 }
1365 
hci_disable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1366 int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1367 {
1368 	struct hci_cp_le_set_per_adv_enable cp;
1369 	struct adv_info *adv = NULL;
1370 
1371 	/* If periodic advertising already disabled there is nothing to do. */
1372 	adv = hci_find_adv_instance(hdev, instance);
1373 	if (!adv || !adv->periodic || !adv->enabled)
1374 		return 0;
1375 
1376 	memset(&cp, 0, sizeof(cp));
1377 
1378 	cp.enable = 0x00;
1379 	cp.handle = instance;
1380 
1381 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1382 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1383 }
1384 
hci_set_per_adv_params_sync(struct hci_dev * hdev,u8 instance,u16 min_interval,u16 max_interval)1385 static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1386 				       u16 min_interval, u16 max_interval)
1387 {
1388 	struct hci_cp_le_set_per_adv_params cp;
1389 
1390 	memset(&cp, 0, sizeof(cp));
1391 
1392 	if (!min_interval)
1393 		min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1394 
1395 	if (!max_interval)
1396 		max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1397 
1398 	cp.handle = instance;
1399 	cp.min_interval = cpu_to_le16(min_interval);
1400 	cp.max_interval = cpu_to_le16(max_interval);
1401 	cp.periodic_properties = 0x0000;
1402 
1403 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1404 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1405 }
1406 
hci_set_per_adv_data_sync(struct hci_dev * hdev,u8 instance)1407 static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1408 {
1409 	DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
1410 		    HCI_MAX_PER_AD_LENGTH);
1411 	u8 len;
1412 	struct adv_info *adv = NULL;
1413 
1414 	if (instance) {
1415 		adv = hci_find_adv_instance(hdev, instance);
1416 		if (!adv || !adv->periodic)
1417 			return 0;
1418 	}
1419 
1420 	len = eir_create_per_adv_data(hdev, instance, pdu->data);
1421 
1422 	pdu->length = len;
1423 	pdu->handle = adv ? adv->handle : instance;
1424 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1425 
1426 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1427 				     struct_size(pdu, data, len), pdu,
1428 				     HCI_CMD_TIMEOUT);
1429 }
1430 
hci_enable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1431 static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1432 {
1433 	struct hci_cp_le_set_per_adv_enable cp;
1434 	struct adv_info *adv = NULL;
1435 
1436 	/* If periodic advertising already enabled there is nothing to do. */
1437 	adv = hci_find_adv_instance(hdev, instance);
1438 	if (adv && adv->periodic && adv->enabled)
1439 		return 0;
1440 
1441 	memset(&cp, 0, sizeof(cp));
1442 
1443 	cp.enable = 0x01;
1444 	cp.handle = instance;
1445 
1446 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1447 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1448 }
1449 
1450 /* Checks if periodic advertising data contains a Basic Announcement and if it
1451  * does generates a Broadcast ID and add Broadcast Announcement.
1452  */
hci_adv_bcast_annoucement(struct hci_dev * hdev,struct adv_info * adv)1453 static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1454 {
1455 	u8 bid[3];
1456 	u8 ad[4 + 3];
1457 
1458 	/* Skip if NULL adv as instance 0x00 is used for general purpose
1459 	 * advertising so it cannot used for the likes of Broadcast Announcement
1460 	 * as it can be overwritten at any point.
1461 	 */
1462 	if (!adv)
1463 		return 0;
1464 
1465 	/* Check if PA data doesn't contains a Basic Audio Announcement then
1466 	 * there is nothing to do.
1467 	 */
1468 	if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1469 				  0x1851, NULL))
1470 		return 0;
1471 
1472 	/* Check if advertising data already has a Broadcast Announcement since
1473 	 * the process may want to control the Broadcast ID directly and in that
1474 	 * case the kernel shall no interfere.
1475 	 */
1476 	if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1477 				 NULL))
1478 		return 0;
1479 
1480 	/* Generate Broadcast ID */
1481 	get_random_bytes(bid, sizeof(bid));
1482 	eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1483 	hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL);
1484 
1485 	return hci_update_adv_data_sync(hdev, adv->instance);
1486 }
1487 
hci_start_per_adv_sync(struct hci_dev * hdev,u8 instance,u8 data_len,u8 * data,u32 flags,u16 min_interval,u16 max_interval,u16 sync_interval)1488 int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
1489 			   u8 *data, u32 flags, u16 min_interval,
1490 			   u16 max_interval, u16 sync_interval)
1491 {
1492 	struct adv_info *adv = NULL;
1493 	int err;
1494 	bool added = false;
1495 
1496 	hci_disable_per_advertising_sync(hdev, instance);
1497 
1498 	if (instance) {
1499 		adv = hci_find_adv_instance(hdev, instance);
1500 		/* Create an instance if that could not be found */
1501 		if (!adv) {
1502 			adv = hci_add_per_instance(hdev, instance, flags,
1503 						   data_len, data,
1504 						   sync_interval,
1505 						   sync_interval);
1506 			if (IS_ERR(adv))
1507 				return PTR_ERR(adv);
1508 			adv->pending = false;
1509 			added = true;
1510 		}
1511 	}
1512 
1513 	/* Start advertising */
1514 	err = hci_start_ext_adv_sync(hdev, instance);
1515 	if (err < 0)
1516 		goto fail;
1517 
1518 	err = hci_adv_bcast_annoucement(hdev, adv);
1519 	if (err < 0)
1520 		goto fail;
1521 
1522 	err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1523 					  max_interval);
1524 	if (err < 0)
1525 		goto fail;
1526 
1527 	err = hci_set_per_adv_data_sync(hdev, instance);
1528 	if (err < 0)
1529 		goto fail;
1530 
1531 	err = hci_enable_per_advertising_sync(hdev, instance);
1532 	if (err < 0)
1533 		goto fail;
1534 
1535 	return 0;
1536 
1537 fail:
1538 	if (added)
1539 		hci_remove_adv_instance(hdev, instance);
1540 
1541 	return err;
1542 }
1543 
hci_start_adv_sync(struct hci_dev * hdev,u8 instance)1544 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1545 {
1546 	int err;
1547 
1548 	if (ext_adv_capable(hdev))
1549 		return hci_start_ext_adv_sync(hdev, instance);
1550 
1551 	err = hci_update_adv_data_sync(hdev, instance);
1552 	if (err)
1553 		return err;
1554 
1555 	err = hci_update_scan_rsp_data_sync(hdev, instance);
1556 	if (err)
1557 		return err;
1558 
1559 	return hci_enable_advertising_sync(hdev);
1560 }
1561 
hci_enable_advertising_sync(struct hci_dev * hdev)1562 int hci_enable_advertising_sync(struct hci_dev *hdev)
1563 {
1564 	struct adv_info *adv_instance;
1565 	struct hci_cp_le_set_adv_param cp;
1566 	u8 own_addr_type, enable = 0x01;
1567 	bool connectable;
1568 	u16 adv_min_interval, adv_max_interval;
1569 	u32 flags;
1570 	u8 status;
1571 
1572 	if (ext_adv_capable(hdev))
1573 		return hci_enable_ext_advertising_sync(hdev,
1574 						       hdev->cur_adv_instance);
1575 
1576 	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1577 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1578 
1579 	/* If the "connectable" instance flag was not set, then choose between
1580 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1581 	 */
1582 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1583 		      mgmt_get_connectable(hdev);
1584 
1585 	if (!is_advertising_allowed(hdev, connectable))
1586 		return -EINVAL;
1587 
1588 	status = hci_disable_advertising_sync(hdev);
1589 	if (status)
1590 		return status;
1591 
1592 	/* Clear the HCI_LE_ADV bit temporarily so that the
1593 	 * hci_update_random_address knows that it's safe to go ahead
1594 	 * and write a new random address. The flag will be set back on
1595 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1596 	 */
1597 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1598 
1599 	/* Set require_privacy to true only when non-connectable
1600 	 * advertising is used. In that case it is fine to use a
1601 	 * non-resolvable private address.
1602 	 */
1603 	status = hci_update_random_address_sync(hdev, !connectable,
1604 						adv_use_rpa(hdev, flags),
1605 						&own_addr_type);
1606 	if (status)
1607 		return status;
1608 
1609 	memset(&cp, 0, sizeof(cp));
1610 
1611 	if (adv_instance) {
1612 		adv_min_interval = adv_instance->min_interval;
1613 		adv_max_interval = adv_instance->max_interval;
1614 	} else {
1615 		adv_min_interval = hdev->le_adv_min_interval;
1616 		adv_max_interval = hdev->le_adv_max_interval;
1617 	}
1618 
1619 	if (connectable) {
1620 		cp.type = LE_ADV_IND;
1621 	} else {
1622 		if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1623 			cp.type = LE_ADV_SCAN_IND;
1624 		else
1625 			cp.type = LE_ADV_NONCONN_IND;
1626 
1627 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1628 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1629 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1630 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1631 		}
1632 	}
1633 
1634 	cp.min_interval = cpu_to_le16(adv_min_interval);
1635 	cp.max_interval = cpu_to_le16(adv_max_interval);
1636 	cp.own_address_type = own_addr_type;
1637 	cp.channel_map = hdev->le_adv_channel_map;
1638 
1639 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1640 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1641 	if (status)
1642 		return status;
1643 
1644 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1645 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1646 }
1647 
enable_advertising_sync(struct hci_dev * hdev,void * data)1648 static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1649 {
1650 	return hci_enable_advertising_sync(hdev);
1651 }
1652 
hci_enable_advertising(struct hci_dev * hdev)1653 int hci_enable_advertising(struct hci_dev *hdev)
1654 {
1655 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1656 	    list_empty(&hdev->adv_instances))
1657 		return 0;
1658 
1659 	return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1660 }
1661 
hci_remove_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)1662 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1663 				     struct sock *sk)
1664 {
1665 	int err;
1666 
1667 	if (!ext_adv_capable(hdev))
1668 		return 0;
1669 
1670 	err = hci_disable_ext_adv_instance_sync(hdev, instance);
1671 	if (err)
1672 		return err;
1673 
1674 	/* If request specifies an instance that doesn't exist, fail */
1675 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1676 		return -EINVAL;
1677 
1678 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1679 					sizeof(instance), &instance, 0,
1680 					HCI_CMD_TIMEOUT, sk);
1681 }
1682 
remove_ext_adv_sync(struct hci_dev * hdev,void * data)1683 static int remove_ext_adv_sync(struct hci_dev *hdev, void *data)
1684 {
1685 	struct adv_info *adv = data;
1686 	u8 instance = 0;
1687 
1688 	if (adv)
1689 		instance = adv->instance;
1690 
1691 	return hci_remove_ext_adv_instance_sync(hdev, instance, NULL);
1692 }
1693 
hci_remove_ext_adv_instance(struct hci_dev * hdev,u8 instance)1694 int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance)
1695 {
1696 	struct adv_info *adv = NULL;
1697 
1698 	if (instance) {
1699 		adv = hci_find_adv_instance(hdev, instance);
1700 		if (!adv)
1701 			return -EINVAL;
1702 	}
1703 
1704 	return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL);
1705 }
1706 
hci_le_terminate_big_sync(struct hci_dev * hdev,u8 handle,u8 reason)1707 int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1708 {
1709 	struct hci_cp_le_term_big cp;
1710 
1711 	memset(&cp, 0, sizeof(cp));
1712 	cp.handle = handle;
1713 	cp.reason = reason;
1714 
1715 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1716 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1717 }
1718 
hci_set_ext_adv_data_sync(struct hci_dev * hdev,u8 instance)1719 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1720 {
1721 	DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
1722 		    HCI_MAX_EXT_AD_LENGTH);
1723 	u8 len;
1724 	struct adv_info *adv = NULL;
1725 	int err;
1726 
1727 	if (instance) {
1728 		adv = hci_find_adv_instance(hdev, instance);
1729 		if (!adv || !adv->adv_data_changed)
1730 			return 0;
1731 	}
1732 
1733 	len = eir_create_adv_data(hdev, instance, pdu->data);
1734 
1735 	pdu->length = len;
1736 	pdu->handle = adv ? adv->handle : instance;
1737 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1738 	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1739 
1740 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1741 				    struct_size(pdu, data, len), pdu,
1742 				    HCI_CMD_TIMEOUT);
1743 	if (err)
1744 		return err;
1745 
1746 	/* Update data if the command succeed */
1747 	if (adv) {
1748 		adv->adv_data_changed = false;
1749 	} else {
1750 		memcpy(hdev->adv_data, pdu->data, len);
1751 		hdev->adv_data_len = len;
1752 	}
1753 
1754 	return 0;
1755 }
1756 
hci_set_adv_data_sync(struct hci_dev * hdev,u8 instance)1757 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1758 {
1759 	struct hci_cp_le_set_adv_data cp;
1760 	u8 len;
1761 
1762 	memset(&cp, 0, sizeof(cp));
1763 
1764 	len = eir_create_adv_data(hdev, instance, cp.data);
1765 
1766 	/* There's nothing to do if the data hasn't changed */
1767 	if (hdev->adv_data_len == len &&
1768 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1769 		return 0;
1770 
1771 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1772 	hdev->adv_data_len = len;
1773 
1774 	cp.length = len;
1775 
1776 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1777 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1778 }
1779 
hci_update_adv_data_sync(struct hci_dev * hdev,u8 instance)1780 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1781 {
1782 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1783 		return 0;
1784 
1785 	if (ext_adv_capable(hdev))
1786 		return hci_set_ext_adv_data_sync(hdev, instance);
1787 
1788 	return hci_set_adv_data_sync(hdev, instance);
1789 }
1790 
hci_schedule_adv_instance_sync(struct hci_dev * hdev,u8 instance,bool force)1791 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1792 				   bool force)
1793 {
1794 	struct adv_info *adv = NULL;
1795 	u16 timeout;
1796 
1797 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1798 		return -EPERM;
1799 
1800 	if (hdev->adv_instance_timeout)
1801 		return -EBUSY;
1802 
1803 	adv = hci_find_adv_instance(hdev, instance);
1804 	if (!adv)
1805 		return -ENOENT;
1806 
1807 	/* A zero timeout means unlimited advertising. As long as there is
1808 	 * only one instance, duration should be ignored. We still set a timeout
1809 	 * in case further instances are being added later on.
1810 	 *
1811 	 * If the remaining lifetime of the instance is more than the duration
1812 	 * then the timeout corresponds to the duration, otherwise it will be
1813 	 * reduced to the remaining instance lifetime.
1814 	 */
1815 	if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1816 		timeout = adv->duration;
1817 	else
1818 		timeout = adv->remaining_time;
1819 
1820 	/* The remaining time is being reduced unless the instance is being
1821 	 * advertised without time limit.
1822 	 */
1823 	if (adv->timeout)
1824 		adv->remaining_time = adv->remaining_time - timeout;
1825 
1826 	/* Only use work for scheduling instances with legacy advertising */
1827 	if (!ext_adv_capable(hdev)) {
1828 		hdev->adv_instance_timeout = timeout;
1829 		queue_delayed_work(hdev->req_workqueue,
1830 				   &hdev->adv_instance_expire,
1831 				   msecs_to_jiffies(timeout * 1000));
1832 	}
1833 
1834 	/* If we're just re-scheduling the same instance again then do not
1835 	 * execute any HCI commands. This happens when a single instance is
1836 	 * being advertised.
1837 	 */
1838 	if (!force && hdev->cur_adv_instance == instance &&
1839 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1840 		return 0;
1841 
1842 	hdev->cur_adv_instance = instance;
1843 
1844 	return hci_start_adv_sync(hdev, instance);
1845 }
1846 
hci_clear_adv_sets_sync(struct hci_dev * hdev,struct sock * sk)1847 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1848 {
1849 	int err;
1850 
1851 	if (!ext_adv_capable(hdev))
1852 		return 0;
1853 
1854 	/* Disable instance 0x00 to disable all instances */
1855 	err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
1856 	if (err)
1857 		return err;
1858 
1859 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
1860 					0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1861 }
1862 
hci_clear_adv_sync(struct hci_dev * hdev,struct sock * sk,bool force)1863 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
1864 {
1865 	struct adv_info *adv, *n;
1866 	int err = 0;
1867 
1868 	if (ext_adv_capable(hdev))
1869 		/* Remove all existing sets */
1870 		err = hci_clear_adv_sets_sync(hdev, sk);
1871 	if (ext_adv_capable(hdev))
1872 		return err;
1873 
1874 	/* This is safe as long as there is no command send while the lock is
1875 	 * held.
1876 	 */
1877 	hci_dev_lock(hdev);
1878 
1879 	/* Cleanup non-ext instances */
1880 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1881 		u8 instance = adv->instance;
1882 		int err;
1883 
1884 		if (!(force || adv->timeout))
1885 			continue;
1886 
1887 		err = hci_remove_adv_instance(hdev, instance);
1888 		if (!err)
1889 			mgmt_advertising_removed(sk, hdev, instance);
1890 	}
1891 
1892 	hci_dev_unlock(hdev);
1893 
1894 	return 0;
1895 }
1896 
hci_remove_adv_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)1897 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
1898 			       struct sock *sk)
1899 {
1900 	int err = 0;
1901 
1902 	/* If we use extended advertising, instance has to be removed first. */
1903 	if (ext_adv_capable(hdev))
1904 		err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
1905 	if (ext_adv_capable(hdev))
1906 		return err;
1907 
1908 	/* This is safe as long as there is no command send while the lock is
1909 	 * held.
1910 	 */
1911 	hci_dev_lock(hdev);
1912 
1913 	err = hci_remove_adv_instance(hdev, instance);
1914 	if (!err)
1915 		mgmt_advertising_removed(sk, hdev, instance);
1916 
1917 	hci_dev_unlock(hdev);
1918 
1919 	return err;
1920 }
1921 
1922 /* For a single instance:
1923  * - force == true: The instance will be removed even when its remaining
1924  *   lifetime is not zero.
1925  * - force == false: the instance will be deactivated but kept stored unless
1926  *   the remaining lifetime is zero.
1927  *
1928  * For instance == 0x00:
1929  * - force == true: All instances will be removed regardless of their timeout
1930  *   setting.
1931  * - force == false: Only instances that have a timeout will be removed.
1932  */
hci_remove_advertising_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)1933 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
1934 				u8 instance, bool force)
1935 {
1936 	struct adv_info *next = NULL;
1937 	int err;
1938 
1939 	/* Cancel any timeout concerning the removed instance(s). */
1940 	if (!instance || hdev->cur_adv_instance == instance)
1941 		cancel_adv_timeout(hdev);
1942 
1943 	/* Get the next instance to advertise BEFORE we remove
1944 	 * the current one. This can be the same instance again
1945 	 * if there is only one instance.
1946 	 */
1947 	if (hdev->cur_adv_instance == instance)
1948 		next = hci_get_next_instance(hdev, instance);
1949 
1950 	if (!instance) {
1951 		err = hci_clear_adv_sync(hdev, sk, force);
1952 		if (err)
1953 			return err;
1954 	} else {
1955 		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
1956 
1957 		if (force || (adv && adv->timeout && !adv->remaining_time)) {
1958 			/* Don't advertise a removed instance. */
1959 			if (next && next->instance == instance)
1960 				next = NULL;
1961 
1962 			err = hci_remove_adv_sync(hdev, instance, sk);
1963 			if (err)
1964 				return err;
1965 		}
1966 	}
1967 
1968 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
1969 		return 0;
1970 
1971 	if (next && !ext_adv_capable(hdev))
1972 		hci_schedule_adv_instance_sync(hdev, next->instance, false);
1973 
1974 	return 0;
1975 }
1976 
hci_read_rssi_sync(struct hci_dev * hdev,__le16 handle)1977 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
1978 {
1979 	struct hci_cp_read_rssi cp;
1980 
1981 	cp.handle = handle;
1982 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
1983 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1984 }
1985 
hci_read_clock_sync(struct hci_dev * hdev,struct hci_cp_read_clock * cp)1986 int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
1987 {
1988 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
1989 					sizeof(*cp), cp, HCI_CMD_TIMEOUT);
1990 }
1991 
hci_read_tx_power_sync(struct hci_dev * hdev,__le16 handle,u8 type)1992 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
1993 {
1994 	struct hci_cp_read_tx_power cp;
1995 
1996 	cp.handle = handle;
1997 	cp.type = type;
1998 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
1999 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2000 }
2001 
hci_disable_advertising_sync(struct hci_dev * hdev)2002 int hci_disable_advertising_sync(struct hci_dev *hdev)
2003 {
2004 	u8 enable = 0x00;
2005 	int err = 0;
2006 
2007 	/* If controller is not advertising we are done. */
2008 	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2009 		return 0;
2010 
2011 	if (ext_adv_capable(hdev))
2012 		err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2013 	if (ext_adv_capable(hdev))
2014 		return err;
2015 
2016 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2017 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2018 }
2019 
hci_le_set_ext_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2020 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2021 					   u8 filter_dup)
2022 {
2023 	struct hci_cp_le_set_ext_scan_enable cp;
2024 
2025 	memset(&cp, 0, sizeof(cp));
2026 	cp.enable = val;
2027 
2028 	if (hci_dev_test_flag(hdev, HCI_MESH))
2029 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2030 	else
2031 		cp.filter_dup = filter_dup;
2032 
2033 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2034 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2035 }
2036 
hci_le_set_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2037 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2038 				       u8 filter_dup)
2039 {
2040 	struct hci_cp_le_set_scan_enable cp;
2041 
2042 	if (use_ext_scan(hdev))
2043 		return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2044 
2045 	memset(&cp, 0, sizeof(cp));
2046 	cp.enable = val;
2047 
2048 	if (val && hci_dev_test_flag(hdev, HCI_MESH))
2049 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2050 	else
2051 		cp.filter_dup = filter_dup;
2052 
2053 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2054 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2055 }
2056 
hci_le_set_addr_resolution_enable_sync(struct hci_dev * hdev,u8 val)2057 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2058 {
2059 	if (!use_ll_privacy(hdev))
2060 		return 0;
2061 
2062 	/* If controller is not/already resolving we are done. */
2063 	if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2064 		return 0;
2065 
2066 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2067 				     sizeof(val), &val, HCI_CMD_TIMEOUT);
2068 }
2069 
hci_scan_disable_sync(struct hci_dev * hdev)2070 static int hci_scan_disable_sync(struct hci_dev *hdev)
2071 {
2072 	int err;
2073 
2074 	/* If controller is not scanning we are done. */
2075 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2076 		return 0;
2077 
2078 	if (hdev->scanning_paused) {
2079 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2080 		return 0;
2081 	}
2082 
2083 	err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2084 	if (err) {
2085 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2086 		return err;
2087 	}
2088 
2089 	return err;
2090 }
2091 
scan_use_rpa(struct hci_dev * hdev)2092 static bool scan_use_rpa(struct hci_dev *hdev)
2093 {
2094 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
2095 }
2096 
hci_start_interleave_scan(struct hci_dev * hdev)2097 static void hci_start_interleave_scan(struct hci_dev *hdev)
2098 {
2099 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2100 	queue_delayed_work(hdev->req_workqueue,
2101 			   &hdev->interleave_scan, 0);
2102 }
2103 
is_interleave_scanning(struct hci_dev * hdev)2104 static bool is_interleave_scanning(struct hci_dev *hdev)
2105 {
2106 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
2107 }
2108 
cancel_interleave_scan(struct hci_dev * hdev)2109 static void cancel_interleave_scan(struct hci_dev *hdev)
2110 {
2111 	bt_dev_dbg(hdev, "cancelling interleave scan");
2112 
2113 	cancel_delayed_work_sync(&hdev->interleave_scan);
2114 
2115 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2116 }
2117 
2118 /* Return true if interleave_scan wasn't started until exiting this function,
2119  * otherwise, return false
2120  */
hci_update_interleaved_scan_sync(struct hci_dev * hdev)2121 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2122 {
2123 	/* Do interleaved scan only if all of the following are true:
2124 	 * - There is at least one ADV monitor
2125 	 * - At least one pending LE connection or one device to be scanned for
2126 	 * - Monitor offloading is not supported
2127 	 * If so, we should alternate between allowlist scan and one without
2128 	 * any filters to save power.
2129 	 */
2130 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2131 				!(list_empty(&hdev->pend_le_conns) &&
2132 				  list_empty(&hdev->pend_le_reports)) &&
2133 				hci_get_adv_monitor_offload_ext(hdev) ==
2134 				    HCI_ADV_MONITOR_EXT_NONE;
2135 	bool is_interleaving = is_interleave_scanning(hdev);
2136 
2137 	if (use_interleaving && !is_interleaving) {
2138 		hci_start_interleave_scan(hdev);
2139 		bt_dev_dbg(hdev, "starting interleave scan");
2140 		return true;
2141 	}
2142 
2143 	if (!use_interleaving && is_interleaving)
2144 		cancel_interleave_scan(hdev);
2145 
2146 	return false;
2147 }
2148 
2149 /* Removes connection to resolve list if needed.*/
hci_le_del_resolve_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2150 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2151 					bdaddr_t *bdaddr, u8 bdaddr_type)
2152 {
2153 	struct hci_cp_le_del_from_resolv_list cp;
2154 	struct bdaddr_list_with_irk *entry;
2155 
2156 	if (!use_ll_privacy(hdev))
2157 		return 0;
2158 
2159 	/* Check if the IRK has been programmed */
2160 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2161 						bdaddr_type);
2162 	if (!entry)
2163 		return 0;
2164 
2165 	cp.bdaddr_type = bdaddr_type;
2166 	bacpy(&cp.bdaddr, bdaddr);
2167 
2168 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2169 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2170 }
2171 
hci_le_del_accept_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2172 static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2173 				       bdaddr_t *bdaddr, u8 bdaddr_type)
2174 {
2175 	struct hci_cp_le_del_from_accept_list cp;
2176 	int err;
2177 
2178 	/* Check if device is on accept list before removing it */
2179 	if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2180 		return 0;
2181 
2182 	cp.bdaddr_type = bdaddr_type;
2183 	bacpy(&cp.bdaddr, bdaddr);
2184 
2185 	/* Ignore errors when removing from resolving list as that is likely
2186 	 * that the device was never added.
2187 	 */
2188 	hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2189 
2190 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2191 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2192 	if (err) {
2193 		bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2194 		return err;
2195 	}
2196 
2197 	bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2198 		   cp.bdaddr_type);
2199 
2200 	return 0;
2201 }
2202 
2203 struct conn_params {
2204 	bdaddr_t addr;
2205 	u8 addr_type;
2206 	hci_conn_flags_t flags;
2207 	u8 privacy_mode;
2208 };
2209 
2210 /* Adds connection to resolve list if needed.
2211  * Setting params to NULL programs local hdev->irk
2212  */
hci_le_add_resolve_list_sync(struct hci_dev * hdev,struct conn_params * params)2213 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2214 					struct conn_params *params)
2215 {
2216 	struct hci_cp_le_add_to_resolv_list cp;
2217 	struct smp_irk *irk;
2218 	struct bdaddr_list_with_irk *entry;
2219 	struct hci_conn_params *p;
2220 
2221 	if (!use_ll_privacy(hdev))
2222 		return 0;
2223 
2224 	/* Attempt to program local identity address, type and irk if params is
2225 	 * NULL.
2226 	 */
2227 	if (!params) {
2228 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2229 			return 0;
2230 
2231 		hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2232 		memcpy(cp.peer_irk, hdev->irk, 16);
2233 		goto done;
2234 	}
2235 
2236 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2237 	if (!irk)
2238 		return 0;
2239 
2240 	/* Check if the IK has _not_ been programmed yet. */
2241 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2242 						&params->addr,
2243 						params->addr_type);
2244 	if (entry)
2245 		return 0;
2246 
2247 	cp.bdaddr_type = params->addr_type;
2248 	bacpy(&cp.bdaddr, &params->addr);
2249 	memcpy(cp.peer_irk, irk->val, 16);
2250 
2251 	/* Default privacy mode is always Network */
2252 	params->privacy_mode = HCI_NETWORK_PRIVACY;
2253 
2254 	rcu_read_lock();
2255 	p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2256 				      &params->addr, params->addr_type);
2257 	if (!p)
2258 		p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2259 					      &params->addr, params->addr_type);
2260 	if (p)
2261 		WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2262 	rcu_read_unlock();
2263 
2264 done:
2265 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2266 		memcpy(cp.local_irk, hdev->irk, 16);
2267 	else
2268 		memset(cp.local_irk, 0, 16);
2269 
2270 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2271 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2272 }
2273 
2274 /* Set Device Privacy Mode. */
hci_le_set_privacy_mode_sync(struct hci_dev * hdev,struct conn_params * params)2275 static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2276 					struct conn_params *params)
2277 {
2278 	struct hci_cp_le_set_privacy_mode cp;
2279 	struct smp_irk *irk;
2280 
2281 	/* If device privacy mode has already been set there is nothing to do */
2282 	if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2283 		return 0;
2284 
2285 	/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2286 	 * indicates that LL Privacy has been enabled and
2287 	 * HCI_OP_LE_SET_PRIVACY_MODE is supported.
2288 	 */
2289 	if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2290 		return 0;
2291 
2292 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2293 	if (!irk)
2294 		return 0;
2295 
2296 	memset(&cp, 0, sizeof(cp));
2297 	cp.bdaddr_type = irk->addr_type;
2298 	bacpy(&cp.bdaddr, &irk->bdaddr);
2299 	cp.mode = HCI_DEVICE_PRIVACY;
2300 
2301 	/* Note: params->privacy_mode is not updated since it is a copy */
2302 
2303 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2304 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2305 }
2306 
2307 /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2308  * this attempts to program the device in the resolving list as well and
2309  * properly set the privacy mode.
2310  */
hci_le_add_accept_list_sync(struct hci_dev * hdev,struct conn_params * params,u8 * num_entries)2311 static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2312 				       struct conn_params *params,
2313 				       u8 *num_entries)
2314 {
2315 	struct hci_cp_le_add_to_accept_list cp;
2316 	int err;
2317 
2318 	/* During suspend, only wakeable devices can be in acceptlist */
2319 	if (hdev->suspended &&
2320 	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2321 		hci_le_del_accept_list_sync(hdev, &params->addr,
2322 					    params->addr_type);
2323 		return 0;
2324 	}
2325 
2326 	/* Select filter policy to accept all advertising */
2327 	if (*num_entries >= hdev->le_accept_list_size)
2328 		return -ENOSPC;
2329 
2330 	/* Accept list can not be used with RPAs */
2331 	if (!use_ll_privacy(hdev) &&
2332 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
2333 		return -EINVAL;
2334 
2335 	/* Attempt to program the device in the resolving list first to avoid
2336 	 * having to rollback in case it fails since the resolving list is
2337 	 * dynamic it can probably be smaller than the accept list.
2338 	 */
2339 	err = hci_le_add_resolve_list_sync(hdev, params);
2340 	if (err) {
2341 		bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2342 		return err;
2343 	}
2344 
2345 	/* Set Privacy Mode */
2346 	err = hci_le_set_privacy_mode_sync(hdev, params);
2347 	if (err) {
2348 		bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2349 		return err;
2350 	}
2351 
2352 	/* Check if already in accept list */
2353 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2354 				   params->addr_type))
2355 		return 0;
2356 
2357 	*num_entries += 1;
2358 	cp.bdaddr_type = params->addr_type;
2359 	bacpy(&cp.bdaddr, &params->addr);
2360 
2361 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2362 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2363 	if (err) {
2364 		bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2365 		/* Rollback the device from the resolving list */
2366 		hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2367 		return err;
2368 	}
2369 
2370 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2371 		   cp.bdaddr_type);
2372 
2373 	return 0;
2374 }
2375 
2376 /* This function disables/pause all advertising instances */
hci_pause_advertising_sync(struct hci_dev * hdev)2377 static int hci_pause_advertising_sync(struct hci_dev *hdev)
2378 {
2379 	int err;
2380 	int old_state;
2381 
2382 	/* If already been paused there is nothing to do. */
2383 	if (hdev->advertising_paused)
2384 		return 0;
2385 
2386 	bt_dev_dbg(hdev, "Pausing directed advertising");
2387 
2388 	/* Stop directed advertising */
2389 	old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2390 	if (old_state) {
2391 		/* When discoverable timeout triggers, then just make sure
2392 		 * the limited discoverable flag is cleared. Even in the case
2393 		 * of a timeout triggered from general discoverable, it is
2394 		 * safe to unconditionally clear the flag.
2395 		 */
2396 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2397 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2398 		hdev->discov_timeout = 0;
2399 	}
2400 
2401 	bt_dev_dbg(hdev, "Pausing advertising instances");
2402 
2403 	/* Call to disable any advertisements active on the controller.
2404 	 * This will succeed even if no advertisements are configured.
2405 	 */
2406 	err = hci_disable_advertising_sync(hdev);
2407 	if (err)
2408 		return err;
2409 
2410 	/* If we are using software rotation, pause the loop */
2411 	if (!ext_adv_capable(hdev))
2412 		cancel_adv_timeout(hdev);
2413 
2414 	hdev->advertising_paused = true;
2415 	hdev->advertising_old_state = old_state;
2416 
2417 	return 0;
2418 }
2419 
2420 /* This function enables all user advertising instances */
hci_resume_advertising_sync(struct hci_dev * hdev)2421 static int hci_resume_advertising_sync(struct hci_dev *hdev)
2422 {
2423 	struct adv_info *adv, *tmp;
2424 	int err;
2425 
2426 	/* If advertising has not been paused there is nothing  to do. */
2427 	if (!hdev->advertising_paused)
2428 		return 0;
2429 
2430 	/* Resume directed advertising */
2431 	hdev->advertising_paused = false;
2432 	if (hdev->advertising_old_state) {
2433 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
2434 		hdev->advertising_old_state = 0;
2435 	}
2436 
2437 	bt_dev_dbg(hdev, "Resuming advertising instances");
2438 
2439 	if (ext_adv_capable(hdev)) {
2440 		/* Call for each tracked instance to be re-enabled */
2441 		list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2442 			err = hci_enable_ext_advertising_sync(hdev,
2443 							      adv->instance);
2444 			if (!err)
2445 				continue;
2446 
2447 			/* If the instance cannot be resumed remove it */
2448 			hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2449 							 NULL);
2450 		}
2451 	} else {
2452 		/* Schedule for most recent instance to be restarted and begin
2453 		 * the software rotation loop
2454 		 */
2455 		err = hci_schedule_adv_instance_sync(hdev,
2456 						     hdev->cur_adv_instance,
2457 						     true);
2458 	}
2459 
2460 	hdev->advertising_paused = false;
2461 
2462 	return err;
2463 }
2464 
hci_pause_addr_resolution(struct hci_dev * hdev)2465 static int hci_pause_addr_resolution(struct hci_dev *hdev)
2466 {
2467 	int err;
2468 
2469 	if (!use_ll_privacy(hdev))
2470 		return 0;
2471 
2472 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2473 		return 0;
2474 
2475 	/* Cannot disable addr resolution if scanning is enabled or
2476 	 * when initiating an LE connection.
2477 	 */
2478 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2479 	    hci_lookup_le_connect(hdev)) {
2480 		bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2481 		return -EPERM;
2482 	}
2483 
2484 	/* Cannot disable addr resolution if advertising is enabled. */
2485 	err = hci_pause_advertising_sync(hdev);
2486 	if (err) {
2487 		bt_dev_err(hdev, "Pause advertising failed: %d", err);
2488 		return err;
2489 	}
2490 
2491 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2492 	if (err)
2493 		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2494 			   err);
2495 
2496 	/* Return if address resolution is disabled and RPA is not used. */
2497 	if (!err && scan_use_rpa(hdev))
2498 		return 0;
2499 
2500 	hci_resume_advertising_sync(hdev);
2501 	return err;
2502 }
2503 
hci_read_local_oob_data_sync(struct hci_dev * hdev,bool extended,struct sock * sk)2504 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2505 					     bool extended, struct sock *sk)
2506 {
2507 	u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2508 					HCI_OP_READ_LOCAL_OOB_DATA;
2509 
2510 	return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2511 }
2512 
conn_params_copy(struct list_head * list,size_t * n)2513 static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2514 {
2515 	struct hci_conn_params *params;
2516 	struct conn_params *p;
2517 	size_t i;
2518 
2519 	rcu_read_lock();
2520 
2521 	i = 0;
2522 	list_for_each_entry_rcu(params, list, action)
2523 		++i;
2524 	*n = i;
2525 
2526 	rcu_read_unlock();
2527 
2528 	p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2529 	if (!p)
2530 		return NULL;
2531 
2532 	rcu_read_lock();
2533 
2534 	i = 0;
2535 	list_for_each_entry_rcu(params, list, action) {
2536 		/* Racing adds are handled in next scan update */
2537 		if (i >= *n)
2538 			break;
2539 
2540 		/* No hdev->lock, but: addr, addr_type are immutable.
2541 		 * privacy_mode is only written by us or in
2542 		 * hci_cc_le_set_privacy_mode that we wait for.
2543 		 * We should be idempotent so MGMT updating flags
2544 		 * while we are processing is OK.
2545 		 */
2546 		bacpy(&p[i].addr, &params->addr);
2547 		p[i].addr_type = params->addr_type;
2548 		p[i].flags = READ_ONCE(params->flags);
2549 		p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2550 		++i;
2551 	}
2552 
2553 	rcu_read_unlock();
2554 
2555 	*n = i;
2556 	return p;
2557 }
2558 
2559 /* Clear LE Accept List */
hci_le_clear_accept_list_sync(struct hci_dev * hdev)2560 static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
2561 {
2562 	if (!(hdev->commands[26] & 0x80))
2563 		return 0;
2564 
2565 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
2566 				     HCI_CMD_TIMEOUT);
2567 }
2568 
2569 /* Device must not be scanning when updating the accept list.
2570  *
2571  * Update is done using the following sequence:
2572  *
2573  * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
2574  * Remove Devices From Accept List ->
2575  * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
2576  * Add Devices to Accept List ->
2577  * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
2578  * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
2579  * Enable Scanning
2580  *
2581  * In case of failure advertising shall be restored to its original state and
2582  * return would disable accept list since either accept or resolving list could
2583  * not be programmed.
2584  *
2585  */
hci_update_accept_list_sync(struct hci_dev * hdev)2586 static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2587 {
2588 	struct conn_params *params;
2589 	struct bdaddr_list *b, *t;
2590 	u8 num_entries = 0;
2591 	bool pend_conn, pend_report;
2592 	u8 filter_policy;
2593 	size_t i, n;
2594 	int err;
2595 
2596 	/* Pause advertising if resolving list can be used as controllers
2597 	 * cannot accept resolving list modifications while advertising.
2598 	 */
2599 	if (use_ll_privacy(hdev)) {
2600 		err = hci_pause_advertising_sync(hdev);
2601 		if (err) {
2602 			bt_dev_err(hdev, "pause advertising failed: %d", err);
2603 			return 0x00;
2604 		}
2605 	}
2606 
2607 	/* Disable address resolution while reprogramming accept list since
2608 	 * devices that do have an IRK will be programmed in the resolving list
2609 	 * when LL Privacy is enabled.
2610 	 */
2611 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2612 	if (err) {
2613 		bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2614 		goto done;
2615 	}
2616 
2617 	/* Force address filtering if PA Sync is in progress */
2618 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2619 		struct hci_cp_le_pa_create_sync *sent;
2620 
2621 		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
2622 		if (sent) {
2623 			struct conn_params pa;
2624 
2625 			memset(&pa, 0, sizeof(pa));
2626 
2627 			bacpy(&pa.addr, &sent->addr);
2628 			pa.addr_type = sent->addr_type;
2629 
2630 			/* Clear first since there could be addresses left
2631 			 * behind.
2632 			 */
2633 			hci_le_clear_accept_list_sync(hdev);
2634 
2635 			num_entries = 1;
2636 			err = hci_le_add_accept_list_sync(hdev, &pa,
2637 							  &num_entries);
2638 			goto done;
2639 		}
2640 	}
2641 
2642 	/* Go through the current accept list programmed into the
2643 	 * controller one by one and check if that address is connected or is
2644 	 * still in the list of pending connections or list of devices to
2645 	 * report. If not present in either list, then remove it from
2646 	 * the controller.
2647 	 */
2648 	list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2649 		if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2650 			continue;
2651 
2652 		/* Pointers not dereferenced, no locks needed */
2653 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2654 						      &b->bdaddr,
2655 						      b->bdaddr_type);
2656 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2657 							&b->bdaddr,
2658 							b->bdaddr_type);
2659 
2660 		/* If the device is not likely to connect or report,
2661 		 * remove it from the acceptlist.
2662 		 */
2663 		if (!pend_conn && !pend_report) {
2664 			hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2665 						    b->bdaddr_type);
2666 			continue;
2667 		}
2668 
2669 		num_entries++;
2670 	}
2671 
2672 	/* Since all no longer valid accept list entries have been
2673 	 * removed, walk through the list of pending connections
2674 	 * and ensure that any new device gets programmed into
2675 	 * the controller.
2676 	 *
2677 	 * If the list of the devices is larger than the list of
2678 	 * available accept list entries in the controller, then
2679 	 * just abort and return filer policy value to not use the
2680 	 * accept list.
2681 	 *
2682 	 * The list and params may be mutated while we wait for events,
2683 	 * so make a copy and iterate it.
2684 	 */
2685 
2686 	params = conn_params_copy(&hdev->pend_le_conns, &n);
2687 	if (!params) {
2688 		err = -ENOMEM;
2689 		goto done;
2690 	}
2691 
2692 	for (i = 0; i < n; ++i) {
2693 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2694 						  &num_entries);
2695 		if (err) {
2696 			kvfree(params);
2697 			goto done;
2698 		}
2699 	}
2700 
2701 	kvfree(params);
2702 
2703 	/* After adding all new pending connections, walk through
2704 	 * the list of pending reports and also add these to the
2705 	 * accept list if there is still space. Abort if space runs out.
2706 	 */
2707 
2708 	params = conn_params_copy(&hdev->pend_le_reports, &n);
2709 	if (!params) {
2710 		err = -ENOMEM;
2711 		goto done;
2712 	}
2713 
2714 	for (i = 0; i < n; ++i) {
2715 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2716 						  &num_entries);
2717 		if (err) {
2718 			kvfree(params);
2719 			goto done;
2720 		}
2721 	}
2722 
2723 	kvfree(params);
2724 
2725 	/* Use the allowlist unless the following conditions are all true:
2726 	 * - We are not currently suspending
2727 	 * - There are 1 or more ADV monitors registered and it's not offloaded
2728 	 * - Interleaved scanning is not currently using the allowlist
2729 	 */
2730 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2731 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2732 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2733 		err = -EINVAL;
2734 
2735 done:
2736 	filter_policy = err ? 0x00 : 0x01;
2737 
2738 	/* Enable address resolution when LL Privacy is enabled. */
2739 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2740 	if (err)
2741 		bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2742 
2743 	/* Resume advertising if it was paused */
2744 	if (use_ll_privacy(hdev))
2745 		hci_resume_advertising_sync(hdev);
2746 
2747 	/* Select filter policy to use accept list */
2748 	return filter_policy;
2749 }
2750 
hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params * cp,u8 type,u16 interval,u16 window)2751 static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2752 				   u8 type, u16 interval, u16 window)
2753 {
2754 	cp->type = type;
2755 	cp->interval = cpu_to_le16(interval);
2756 	cp->window = cpu_to_le16(window);
2757 }
2758 
hci_le_set_ext_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2759 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2760 					  u16 interval, u16 window,
2761 					  u8 own_addr_type, u8 filter_policy)
2762 {
2763 	struct hci_cp_le_set_ext_scan_params *cp;
2764 	struct hci_cp_le_scan_phy_params *phy;
2765 	u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2766 	u8 num_phy = 0x00;
2767 
2768 	cp = (void *)data;
2769 	phy = (void *)cp->data;
2770 
2771 	memset(data, 0, sizeof(data));
2772 
2773 	cp->own_addr_type = own_addr_type;
2774 	cp->filter_policy = filter_policy;
2775 
2776 	/* Check if PA Sync is in progress then select the PHY based on the
2777 	 * hci_conn.iso_qos.
2778 	 */
2779 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2780 		struct hci_cp_le_add_to_accept_list *sent;
2781 
2782 		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2783 		if (sent) {
2784 			struct hci_conn *conn;
2785 
2786 			conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
2787 						       &sent->bdaddr);
2788 			if (conn) {
2789 				struct bt_iso_qos *qos = &conn->iso_qos;
2790 
2791 				if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2792 				    qos->bcast.in.phy & BT_ISO_PHY_2M) {
2793 					cp->scanning_phys |= LE_SCAN_PHY_1M;
2794 					hci_le_scan_phy_params(phy, type,
2795 							       interval,
2796 							       window);
2797 					num_phy++;
2798 					phy++;
2799 				}
2800 
2801 				if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2802 					cp->scanning_phys |= LE_SCAN_PHY_CODED;
2803 					hci_le_scan_phy_params(phy, type,
2804 							       interval * 3,
2805 							       window * 3);
2806 					num_phy++;
2807 					phy++;
2808 				}
2809 
2810 				if (num_phy)
2811 					goto done;
2812 			}
2813 		}
2814 	}
2815 
2816 	if (scan_1m(hdev) || scan_2m(hdev)) {
2817 		cp->scanning_phys |= LE_SCAN_PHY_1M;
2818 		hci_le_scan_phy_params(phy, type, interval, window);
2819 		num_phy++;
2820 		phy++;
2821 	}
2822 
2823 	if (scan_coded(hdev)) {
2824 		cp->scanning_phys |= LE_SCAN_PHY_CODED;
2825 		hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2826 		num_phy++;
2827 		phy++;
2828 	}
2829 
2830 done:
2831 	if (!num_phy)
2832 		return -EINVAL;
2833 
2834 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2835 				     sizeof(*cp) + sizeof(*phy) * num_phy,
2836 				     data, HCI_CMD_TIMEOUT);
2837 }
2838 
hci_le_set_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2839 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2840 				      u16 interval, u16 window,
2841 				      u8 own_addr_type, u8 filter_policy)
2842 {
2843 	struct hci_cp_le_set_scan_param cp;
2844 
2845 	if (use_ext_scan(hdev))
2846 		return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2847 						      window, own_addr_type,
2848 						      filter_policy);
2849 
2850 	memset(&cp, 0, sizeof(cp));
2851 	cp.type = type;
2852 	cp.interval = cpu_to_le16(interval);
2853 	cp.window = cpu_to_le16(window);
2854 	cp.own_address_type = own_addr_type;
2855 	cp.filter_policy = filter_policy;
2856 
2857 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
2858 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2859 }
2860 
hci_start_scan_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,u8 filter_dup)2861 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
2862 			       u16 window, u8 own_addr_type, u8 filter_policy,
2863 			       u8 filter_dup)
2864 {
2865 	int err;
2866 
2867 	if (hdev->scanning_paused) {
2868 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2869 		return 0;
2870 	}
2871 
2872 	err = hci_le_set_scan_param_sync(hdev, type, interval, window,
2873 					 own_addr_type, filter_policy);
2874 	if (err)
2875 		return err;
2876 
2877 	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
2878 }
2879 
hci_passive_scan_sync(struct hci_dev * hdev)2880 static int hci_passive_scan_sync(struct hci_dev *hdev)
2881 {
2882 	u8 own_addr_type;
2883 	u8 filter_policy;
2884 	u16 window, interval;
2885 	u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
2886 	int err;
2887 
2888 	if (hdev->scanning_paused) {
2889 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2890 		return 0;
2891 	}
2892 
2893 	err = hci_scan_disable_sync(hdev);
2894 	if (err) {
2895 		bt_dev_err(hdev, "disable scanning failed: %d", err);
2896 		return err;
2897 	}
2898 
2899 	/* Set require_privacy to false since no SCAN_REQ are send
2900 	 * during passive scanning. Not using an non-resolvable address
2901 	 * here is important so that peer devices using direct
2902 	 * advertising with our address will be correctly reported
2903 	 * by the controller.
2904 	 */
2905 	if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
2906 					   &own_addr_type))
2907 		return 0;
2908 
2909 	if (hdev->enable_advmon_interleave_scan &&
2910 	    hci_update_interleaved_scan_sync(hdev))
2911 		return 0;
2912 
2913 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
2914 
2915 	/* Adding or removing entries from the accept list must
2916 	 * happen before enabling scanning. The controller does
2917 	 * not allow accept list modification while scanning.
2918 	 */
2919 	filter_policy = hci_update_accept_list_sync(hdev);
2920 
2921 	/* When the controller is using random resolvable addresses and
2922 	 * with that having LE privacy enabled, then controllers with
2923 	 * Extended Scanner Filter Policies support can now enable support
2924 	 * for handling directed advertising.
2925 	 *
2926 	 * So instead of using filter polices 0x00 (no acceptlist)
2927 	 * and 0x01 (acceptlist enabled) use the new filter policies
2928 	 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
2929 	 */
2930 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
2931 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
2932 		filter_policy |= 0x02;
2933 
2934 	if (hdev->suspended) {
2935 		window = hdev->le_scan_window_suspend;
2936 		interval = hdev->le_scan_int_suspend;
2937 	} else if (hci_is_le_conn_scanning(hdev)) {
2938 		window = hdev->le_scan_window_connect;
2939 		interval = hdev->le_scan_int_connect;
2940 	} else if (hci_is_adv_monitoring(hdev)) {
2941 		window = hdev->le_scan_window_adv_monitor;
2942 		interval = hdev->le_scan_int_adv_monitor;
2943 	} else {
2944 		window = hdev->le_scan_window;
2945 		interval = hdev->le_scan_interval;
2946 	}
2947 
2948 	/* Disable all filtering for Mesh */
2949 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
2950 		filter_policy = 0;
2951 		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
2952 	}
2953 
2954 	bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
2955 
2956 	return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
2957 				   own_addr_type, filter_policy, filter_dups);
2958 }
2959 
2960 /* This function controls the passive scanning based on hdev->pend_le_conns
2961  * list. If there are pending LE connection we start the background scanning,
2962  * otherwise we stop it in the following sequence:
2963  *
2964  * If there are devices to scan:
2965  *
2966  * Disable Scanning -> Update Accept List ->
2967  * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
2968  * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
2969  * Enable Scanning
2970  *
2971  * Otherwise:
2972  *
2973  * Disable Scanning
2974  */
hci_update_passive_scan_sync(struct hci_dev * hdev)2975 int hci_update_passive_scan_sync(struct hci_dev *hdev)
2976 {
2977 	int err;
2978 
2979 	if (!test_bit(HCI_UP, &hdev->flags) ||
2980 	    test_bit(HCI_INIT, &hdev->flags) ||
2981 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
2982 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
2983 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
2984 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2985 		return 0;
2986 
2987 	/* No point in doing scanning if LE support hasn't been enabled */
2988 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2989 		return 0;
2990 
2991 	/* If discovery is active don't interfere with it */
2992 	if (hdev->discovery.state != DISCOVERY_STOPPED)
2993 		return 0;
2994 
2995 	/* Reset RSSI and UUID filters when starting background scanning
2996 	 * since these filters are meant for service discovery only.
2997 	 *
2998 	 * The Start Discovery and Start Service Discovery operations
2999 	 * ensure to set proper values for RSSI threshold and UUID
3000 	 * filter list. So it is safe to just reset them here.
3001 	 */
3002 	hci_discovery_filter_clear(hdev);
3003 
3004 	bt_dev_dbg(hdev, "ADV monitoring is %s",
3005 		   hci_is_adv_monitoring(hdev) ? "on" : "off");
3006 
3007 	if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3008 	    list_empty(&hdev->pend_le_conns) &&
3009 	    list_empty(&hdev->pend_le_reports) &&
3010 	    !hci_is_adv_monitoring(hdev) &&
3011 	    !hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3012 		/* If there is no pending LE connections or devices
3013 		 * to be scanned for or no ADV monitors, we should stop the
3014 		 * background scanning.
3015 		 */
3016 
3017 		bt_dev_dbg(hdev, "stopping background scanning");
3018 
3019 		err = hci_scan_disable_sync(hdev);
3020 		if (err)
3021 			bt_dev_err(hdev, "stop background scanning failed: %d",
3022 				   err);
3023 	} else {
3024 		/* If there is at least one pending LE connection, we should
3025 		 * keep the background scan running.
3026 		 */
3027 
3028 		/* If controller is connecting, we should not start scanning
3029 		 * since some controllers are not able to scan and connect at
3030 		 * the same time.
3031 		 */
3032 		if (hci_lookup_le_connect(hdev))
3033 			return 0;
3034 
3035 		bt_dev_dbg(hdev, "start background scanning");
3036 
3037 		err = hci_passive_scan_sync(hdev);
3038 		if (err)
3039 			bt_dev_err(hdev, "start background scanning failed: %d",
3040 				   err);
3041 	}
3042 
3043 	return err;
3044 }
3045 
update_scan_sync(struct hci_dev * hdev,void * data)3046 static int update_scan_sync(struct hci_dev *hdev, void *data)
3047 {
3048 	return hci_update_scan_sync(hdev);
3049 }
3050 
hci_update_scan(struct hci_dev * hdev)3051 int hci_update_scan(struct hci_dev *hdev)
3052 {
3053 	return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3054 }
3055 
update_passive_scan_sync(struct hci_dev * hdev,void * data)3056 static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3057 {
3058 	return hci_update_passive_scan_sync(hdev);
3059 }
3060 
hci_update_passive_scan(struct hci_dev * hdev)3061 int hci_update_passive_scan(struct hci_dev *hdev)
3062 {
3063 	/* Only queue if it would have any effect */
3064 	if (!test_bit(HCI_UP, &hdev->flags) ||
3065 	    test_bit(HCI_INIT, &hdev->flags) ||
3066 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3067 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3068 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3069 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3070 		return 0;
3071 
3072 	return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3073 				       NULL);
3074 }
3075 
hci_write_sc_support_sync(struct hci_dev * hdev,u8 val)3076 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3077 {
3078 	int err;
3079 
3080 	if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3081 		return 0;
3082 
3083 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3084 				    sizeof(val), &val, HCI_CMD_TIMEOUT);
3085 
3086 	if (!err) {
3087 		if (val) {
3088 			hdev->features[1][0] |= LMP_HOST_SC;
3089 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3090 		} else {
3091 			hdev->features[1][0] &= ~LMP_HOST_SC;
3092 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3093 		}
3094 	}
3095 
3096 	return err;
3097 }
3098 
hci_write_ssp_mode_sync(struct hci_dev * hdev,u8 mode)3099 int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3100 {
3101 	int err;
3102 
3103 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3104 	    lmp_host_ssp_capable(hdev))
3105 		return 0;
3106 
3107 	if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3108 		__hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3109 				      sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3110 	}
3111 
3112 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3113 				    sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3114 	if (err)
3115 		return err;
3116 
3117 	return hci_write_sc_support_sync(hdev, 0x01);
3118 }
3119 
hci_write_le_host_supported_sync(struct hci_dev * hdev,u8 le,u8 simul)3120 int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3121 {
3122 	struct hci_cp_write_le_host_supported cp;
3123 
3124 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3125 	    !lmp_bredr_capable(hdev))
3126 		return 0;
3127 
3128 	/* Check first if we already have the right host state
3129 	 * (host features set)
3130 	 */
3131 	if (le == lmp_host_le_capable(hdev) &&
3132 	    simul == lmp_host_le_br_capable(hdev))
3133 		return 0;
3134 
3135 	memset(&cp, 0, sizeof(cp));
3136 
3137 	cp.le = le;
3138 	cp.simul = simul;
3139 
3140 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3141 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3142 }
3143 
hci_powered_update_adv_sync(struct hci_dev * hdev)3144 static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3145 {
3146 	struct adv_info *adv, *tmp;
3147 	int err;
3148 
3149 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3150 		return 0;
3151 
3152 	/* If RPA Resolution has not been enable yet it means the
3153 	 * resolving list is empty and we should attempt to program the
3154 	 * local IRK in order to support using own_addr_type
3155 	 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3156 	 */
3157 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3158 		hci_le_add_resolve_list_sync(hdev, NULL);
3159 		hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3160 	}
3161 
3162 	/* Make sure the controller has a good default for
3163 	 * advertising data. This also applies to the case
3164 	 * where BR/EDR was toggled during the AUTO_OFF phase.
3165 	 */
3166 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3167 	    list_empty(&hdev->adv_instances)) {
3168 		if (ext_adv_capable(hdev)) {
3169 			err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3170 			if (!err)
3171 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3172 		} else {
3173 			err = hci_update_adv_data_sync(hdev, 0x00);
3174 			if (!err)
3175 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3176 		}
3177 
3178 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3179 			hci_enable_advertising_sync(hdev);
3180 	}
3181 
3182 	/* Call for each tracked instance to be scheduled */
3183 	list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3184 		hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3185 
3186 	return 0;
3187 }
3188 
hci_write_auth_enable_sync(struct hci_dev * hdev)3189 static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3190 {
3191 	u8 link_sec;
3192 
3193 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3194 	if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3195 		return 0;
3196 
3197 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3198 				     sizeof(link_sec), &link_sec,
3199 				     HCI_CMD_TIMEOUT);
3200 }
3201 
hci_write_fast_connectable_sync(struct hci_dev * hdev,bool enable)3202 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3203 {
3204 	struct hci_cp_write_page_scan_activity cp;
3205 	u8 type;
3206 	int err = 0;
3207 
3208 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3209 		return 0;
3210 
3211 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3212 		return 0;
3213 
3214 	memset(&cp, 0, sizeof(cp));
3215 
3216 	if (enable) {
3217 		type = PAGE_SCAN_TYPE_INTERLACED;
3218 
3219 		/* 160 msec page scan interval */
3220 		cp.interval = cpu_to_le16(0x0100);
3221 	} else {
3222 		type = hdev->def_page_scan_type;
3223 		cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3224 	}
3225 
3226 	cp.window = cpu_to_le16(hdev->def_page_scan_window);
3227 
3228 	if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3229 	    __cpu_to_le16(hdev->page_scan_window) != cp.window) {
3230 		err = __hci_cmd_sync_status(hdev,
3231 					    HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3232 					    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3233 		if (err)
3234 			return err;
3235 	}
3236 
3237 	if (hdev->page_scan_type != type)
3238 		err = __hci_cmd_sync_status(hdev,
3239 					    HCI_OP_WRITE_PAGE_SCAN_TYPE,
3240 					    sizeof(type), &type,
3241 					    HCI_CMD_TIMEOUT);
3242 
3243 	return err;
3244 }
3245 
disconnected_accept_list_entries(struct hci_dev * hdev)3246 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3247 {
3248 	struct bdaddr_list *b;
3249 
3250 	list_for_each_entry(b, &hdev->accept_list, list) {
3251 		struct hci_conn *conn;
3252 
3253 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3254 		if (!conn)
3255 			return true;
3256 
3257 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3258 			return true;
3259 	}
3260 
3261 	return false;
3262 }
3263 
hci_write_scan_enable_sync(struct hci_dev * hdev,u8 val)3264 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3265 {
3266 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3267 					    sizeof(val), &val,
3268 					    HCI_CMD_TIMEOUT);
3269 }
3270 
hci_update_scan_sync(struct hci_dev * hdev)3271 int hci_update_scan_sync(struct hci_dev *hdev)
3272 {
3273 	u8 scan;
3274 
3275 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3276 		return 0;
3277 
3278 	if (!hdev_is_powered(hdev))
3279 		return 0;
3280 
3281 	if (mgmt_powering_down(hdev))
3282 		return 0;
3283 
3284 	if (hdev->scanning_paused)
3285 		return 0;
3286 
3287 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3288 	    disconnected_accept_list_entries(hdev))
3289 		scan = SCAN_PAGE;
3290 	else
3291 		scan = SCAN_DISABLED;
3292 
3293 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3294 		scan |= SCAN_INQUIRY;
3295 
3296 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3297 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3298 		return 0;
3299 
3300 	return hci_write_scan_enable_sync(hdev, scan);
3301 }
3302 
hci_update_name_sync(struct hci_dev * hdev)3303 int hci_update_name_sync(struct hci_dev *hdev)
3304 {
3305 	struct hci_cp_write_local_name cp;
3306 
3307 	memset(&cp, 0, sizeof(cp));
3308 
3309 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3310 
3311 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3312 					    sizeof(cp), &cp,
3313 					    HCI_CMD_TIMEOUT);
3314 }
3315 
3316 /* This function perform powered update HCI command sequence after the HCI init
3317  * sequence which end up resetting all states, the sequence is as follows:
3318  *
3319  * HCI_SSP_ENABLED(Enable SSP)
3320  * HCI_LE_ENABLED(Enable LE)
3321  * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
3322  * Update adv data)
3323  * Enable Authentication
3324  * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3325  * Set Name -> Set EIR)
3326  * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3327  */
hci_powered_update_sync(struct hci_dev * hdev)3328 int hci_powered_update_sync(struct hci_dev *hdev)
3329 {
3330 	int err;
3331 
3332 	/* Register the available SMP channels (BR/EDR and LE) only when
3333 	 * successfully powering on the controller. This late
3334 	 * registration is required so that LE SMP can clearly decide if
3335 	 * the public address or static address is used.
3336 	 */
3337 	smp_register(hdev);
3338 
3339 	err = hci_write_ssp_mode_sync(hdev, 0x01);
3340 	if (err)
3341 		return err;
3342 
3343 	err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3344 	if (err)
3345 		return err;
3346 
3347 	err = hci_powered_update_adv_sync(hdev);
3348 	if (err)
3349 		return err;
3350 
3351 	err = hci_write_auth_enable_sync(hdev);
3352 	if (err)
3353 		return err;
3354 
3355 	if (lmp_bredr_capable(hdev)) {
3356 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3357 			hci_write_fast_connectable_sync(hdev, true);
3358 		else
3359 			hci_write_fast_connectable_sync(hdev, false);
3360 		hci_update_scan_sync(hdev);
3361 		hci_update_class_sync(hdev);
3362 		hci_update_name_sync(hdev);
3363 		hci_update_eir_sync(hdev);
3364 	}
3365 
3366 	/* If forcing static address is in use or there is no public
3367 	 * address use the static address as random address (but skip
3368 	 * the HCI command if the current random address is already the
3369 	 * static one.
3370 	 *
3371 	 * In case BR/EDR has been disabled on a dual-mode controller
3372 	 * and a static address has been configured, then use that
3373 	 * address instead of the public BR/EDR address.
3374 	 */
3375 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3376 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3377 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3378 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
3379 			return hci_set_random_addr_sync(hdev,
3380 							&hdev->static_addr);
3381 	}
3382 
3383 	return 0;
3384 }
3385 
3386 /**
3387  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3388  *				       (BD_ADDR) for a HCI device from
3389  *				       a firmware node property.
3390  * @hdev:	The HCI device
3391  *
3392  * Search the firmware node for 'local-bd-address'.
3393  *
3394  * All-zero BD addresses are rejected, because those could be properties
3395  * that exist in the firmware tables, but were not updated by the firmware. For
3396  * example, the DTS could define 'local-bd-address', with zero BD addresses.
3397  */
hci_dev_get_bd_addr_from_property(struct hci_dev * hdev)3398 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3399 {
3400 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3401 	bdaddr_t ba;
3402 	int ret;
3403 
3404 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3405 					    (u8 *)&ba, sizeof(ba));
3406 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3407 		return;
3408 
3409 	if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
3410 		baswap(&hdev->public_addr, &ba);
3411 	else
3412 		bacpy(&hdev->public_addr, &ba);
3413 }
3414 
3415 struct hci_init_stage {
3416 	int (*func)(struct hci_dev *hdev);
3417 };
3418 
3419 /* Run init stage NULL terminated function table */
hci_init_stage_sync(struct hci_dev * hdev,const struct hci_init_stage * stage)3420 static int hci_init_stage_sync(struct hci_dev *hdev,
3421 			       const struct hci_init_stage *stage)
3422 {
3423 	size_t i;
3424 
3425 	for (i = 0; stage[i].func; i++) {
3426 		int err;
3427 
3428 		err = stage[i].func(hdev);
3429 		if (err)
3430 			return err;
3431 	}
3432 
3433 	return 0;
3434 }
3435 
3436 /* Read Local Version */
hci_read_local_version_sync(struct hci_dev * hdev)3437 static int hci_read_local_version_sync(struct hci_dev *hdev)
3438 {
3439 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3440 				     0, NULL, HCI_CMD_TIMEOUT);
3441 }
3442 
3443 /* Read BD Address */
hci_read_bd_addr_sync(struct hci_dev * hdev)3444 static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3445 {
3446 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3447 				     0, NULL, HCI_CMD_TIMEOUT);
3448 }
3449 
3450 #define HCI_INIT(_func) \
3451 { \
3452 	.func = _func, \
3453 }
3454 
3455 static const struct hci_init_stage hci_init0[] = {
3456 	/* HCI_OP_READ_LOCAL_VERSION */
3457 	HCI_INIT(hci_read_local_version_sync),
3458 	/* HCI_OP_READ_BD_ADDR */
3459 	HCI_INIT(hci_read_bd_addr_sync),
3460 	{}
3461 };
3462 
hci_reset_sync(struct hci_dev * hdev)3463 int hci_reset_sync(struct hci_dev *hdev)
3464 {
3465 	int err;
3466 
3467 	set_bit(HCI_RESET, &hdev->flags);
3468 
3469 	err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3470 				    HCI_CMD_TIMEOUT);
3471 	if (err)
3472 		return err;
3473 
3474 	return 0;
3475 }
3476 
hci_init0_sync(struct hci_dev * hdev)3477 static int hci_init0_sync(struct hci_dev *hdev)
3478 {
3479 	int err;
3480 
3481 	bt_dev_dbg(hdev, "");
3482 
3483 	/* Reset */
3484 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3485 		err = hci_reset_sync(hdev);
3486 		if (err)
3487 			return err;
3488 	}
3489 
3490 	return hci_init_stage_sync(hdev, hci_init0);
3491 }
3492 
hci_unconf_init_sync(struct hci_dev * hdev)3493 static int hci_unconf_init_sync(struct hci_dev *hdev)
3494 {
3495 	int err;
3496 
3497 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3498 		return 0;
3499 
3500 	err = hci_init0_sync(hdev);
3501 	if (err < 0)
3502 		return err;
3503 
3504 	if (hci_dev_test_flag(hdev, HCI_SETUP))
3505 		hci_debugfs_create_basic(hdev);
3506 
3507 	return 0;
3508 }
3509 
3510 /* Read Local Supported Features. */
hci_read_local_features_sync(struct hci_dev * hdev)3511 static int hci_read_local_features_sync(struct hci_dev *hdev)
3512 {
3513 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3514 				     0, NULL, HCI_CMD_TIMEOUT);
3515 }
3516 
3517 /* BR Controller init stage 1 command sequence */
3518 static const struct hci_init_stage br_init1[] = {
3519 	/* HCI_OP_READ_LOCAL_FEATURES */
3520 	HCI_INIT(hci_read_local_features_sync),
3521 	/* HCI_OP_READ_LOCAL_VERSION */
3522 	HCI_INIT(hci_read_local_version_sync),
3523 	/* HCI_OP_READ_BD_ADDR */
3524 	HCI_INIT(hci_read_bd_addr_sync),
3525 	{}
3526 };
3527 
3528 /* Read Local Commands */
hci_read_local_cmds_sync(struct hci_dev * hdev)3529 static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3530 {
3531 	/* All Bluetooth 1.2 and later controllers should support the
3532 	 * HCI command for reading the local supported commands.
3533 	 *
3534 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
3535 	 * but do not have support for this command. If that is the case,
3536 	 * the driver can quirk the behavior and skip reading the local
3537 	 * supported commands.
3538 	 */
3539 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3540 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
3541 		return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3542 					     0, NULL, HCI_CMD_TIMEOUT);
3543 
3544 	return 0;
3545 }
3546 
hci_init1_sync(struct hci_dev * hdev)3547 static int hci_init1_sync(struct hci_dev *hdev)
3548 {
3549 	int err;
3550 
3551 	bt_dev_dbg(hdev, "");
3552 
3553 	/* Reset */
3554 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3555 		err = hci_reset_sync(hdev);
3556 		if (err)
3557 			return err;
3558 	}
3559 
3560 	return hci_init_stage_sync(hdev, br_init1);
3561 }
3562 
3563 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_read_buffer_size_sync(struct hci_dev * hdev)3564 static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3565 {
3566 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3567 				     0, NULL, HCI_CMD_TIMEOUT);
3568 }
3569 
3570 /* Read Class of Device */
hci_read_dev_class_sync(struct hci_dev * hdev)3571 static int hci_read_dev_class_sync(struct hci_dev *hdev)
3572 {
3573 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3574 				     0, NULL, HCI_CMD_TIMEOUT);
3575 }
3576 
3577 /* Read Local Name */
hci_read_local_name_sync(struct hci_dev * hdev)3578 static int hci_read_local_name_sync(struct hci_dev *hdev)
3579 {
3580 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3581 				     0, NULL, HCI_CMD_TIMEOUT);
3582 }
3583 
3584 /* Read Voice Setting */
hci_read_voice_setting_sync(struct hci_dev * hdev)3585 static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3586 {
3587 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3588 				     0, NULL, HCI_CMD_TIMEOUT);
3589 }
3590 
3591 /* Read Number of Supported IAC */
hci_read_num_supported_iac_sync(struct hci_dev * hdev)3592 static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3593 {
3594 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3595 				     0, NULL, HCI_CMD_TIMEOUT);
3596 }
3597 
3598 /* Read Current IAC LAP */
hci_read_current_iac_lap_sync(struct hci_dev * hdev)3599 static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3600 {
3601 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3602 				     0, NULL, HCI_CMD_TIMEOUT);
3603 }
3604 
hci_set_event_filter_sync(struct hci_dev * hdev,u8 flt_type,u8 cond_type,bdaddr_t * bdaddr,u8 auto_accept)3605 static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3606 				     u8 cond_type, bdaddr_t *bdaddr,
3607 				     u8 auto_accept)
3608 {
3609 	struct hci_cp_set_event_filter cp;
3610 
3611 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3612 		return 0;
3613 
3614 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3615 		return 0;
3616 
3617 	memset(&cp, 0, sizeof(cp));
3618 	cp.flt_type = flt_type;
3619 
3620 	if (flt_type != HCI_FLT_CLEAR_ALL) {
3621 		cp.cond_type = cond_type;
3622 		bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3623 		cp.addr_conn_flt.auto_accept = auto_accept;
3624 	}
3625 
3626 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3627 				     flt_type == HCI_FLT_CLEAR_ALL ?
3628 				     sizeof(cp.flt_type) : sizeof(cp), &cp,
3629 				     HCI_CMD_TIMEOUT);
3630 }
3631 
hci_clear_event_filter_sync(struct hci_dev * hdev)3632 static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3633 {
3634 	if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3635 		return 0;
3636 
3637 	/* In theory the state machine should not reach here unless
3638 	 * a hci_set_event_filter_sync() call succeeds, but we do
3639 	 * the check both for parity and as a future reminder.
3640 	 */
3641 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3642 		return 0;
3643 
3644 	return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3645 					 BDADDR_ANY, 0x00);
3646 }
3647 
3648 /* Connection accept timeout ~20 secs */
hci_write_ca_timeout_sync(struct hci_dev * hdev)3649 static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3650 {
3651 	__le16 param = cpu_to_le16(0x7d00);
3652 
3653 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3654 				     sizeof(param), &param, HCI_CMD_TIMEOUT);
3655 }
3656 
3657 /* BR Controller init stage 2 command sequence */
3658 static const struct hci_init_stage br_init2[] = {
3659 	/* HCI_OP_READ_BUFFER_SIZE */
3660 	HCI_INIT(hci_read_buffer_size_sync),
3661 	/* HCI_OP_READ_CLASS_OF_DEV */
3662 	HCI_INIT(hci_read_dev_class_sync),
3663 	/* HCI_OP_READ_LOCAL_NAME */
3664 	HCI_INIT(hci_read_local_name_sync),
3665 	/* HCI_OP_READ_VOICE_SETTING */
3666 	HCI_INIT(hci_read_voice_setting_sync),
3667 	/* HCI_OP_READ_NUM_SUPPORTED_IAC */
3668 	HCI_INIT(hci_read_num_supported_iac_sync),
3669 	/* HCI_OP_READ_CURRENT_IAC_LAP */
3670 	HCI_INIT(hci_read_current_iac_lap_sync),
3671 	/* HCI_OP_SET_EVENT_FLT */
3672 	HCI_INIT(hci_clear_event_filter_sync),
3673 	/* HCI_OP_WRITE_CA_TIMEOUT */
3674 	HCI_INIT(hci_write_ca_timeout_sync),
3675 	{}
3676 };
3677 
hci_write_ssp_mode_1_sync(struct hci_dev * hdev)3678 static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3679 {
3680 	u8 mode = 0x01;
3681 
3682 	if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3683 		return 0;
3684 
3685 	/* When SSP is available, then the host features page
3686 	 * should also be available as well. However some
3687 	 * controllers list the max_page as 0 as long as SSP
3688 	 * has not been enabled. To achieve proper debugging
3689 	 * output, force the minimum max_page to 1 at least.
3690 	 */
3691 	hdev->max_page = 0x01;
3692 
3693 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3694 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3695 }
3696 
hci_write_eir_sync(struct hci_dev * hdev)3697 static int hci_write_eir_sync(struct hci_dev *hdev)
3698 {
3699 	struct hci_cp_write_eir cp;
3700 
3701 	if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3702 		return 0;
3703 
3704 	memset(hdev->eir, 0, sizeof(hdev->eir));
3705 	memset(&cp, 0, sizeof(cp));
3706 
3707 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3708 				     HCI_CMD_TIMEOUT);
3709 }
3710 
hci_write_inquiry_mode_sync(struct hci_dev * hdev)3711 static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3712 {
3713 	u8 mode;
3714 
3715 	if (!lmp_inq_rssi_capable(hdev) &&
3716 	    !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3717 		return 0;
3718 
3719 	/* If Extended Inquiry Result events are supported, then
3720 	 * they are clearly preferred over Inquiry Result with RSSI
3721 	 * events.
3722 	 */
3723 	mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3724 
3725 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3726 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3727 }
3728 
hci_read_inq_rsp_tx_power_sync(struct hci_dev * hdev)3729 static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3730 {
3731 	if (!lmp_inq_tx_pwr_capable(hdev))
3732 		return 0;
3733 
3734 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3735 				     0, NULL, HCI_CMD_TIMEOUT);
3736 }
3737 
hci_read_local_ext_features_sync(struct hci_dev * hdev,u8 page)3738 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3739 {
3740 	struct hci_cp_read_local_ext_features cp;
3741 
3742 	if (!lmp_ext_feat_capable(hdev))
3743 		return 0;
3744 
3745 	memset(&cp, 0, sizeof(cp));
3746 	cp.page = page;
3747 
3748 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3749 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3750 }
3751 
hci_read_local_ext_features_1_sync(struct hci_dev * hdev)3752 static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3753 {
3754 	return hci_read_local_ext_features_sync(hdev, 0x01);
3755 }
3756 
3757 /* HCI Controller init stage 2 command sequence */
3758 static const struct hci_init_stage hci_init2[] = {
3759 	/* HCI_OP_READ_LOCAL_COMMANDS */
3760 	HCI_INIT(hci_read_local_cmds_sync),
3761 	/* HCI_OP_WRITE_SSP_MODE */
3762 	HCI_INIT(hci_write_ssp_mode_1_sync),
3763 	/* HCI_OP_WRITE_EIR */
3764 	HCI_INIT(hci_write_eir_sync),
3765 	/* HCI_OP_WRITE_INQUIRY_MODE */
3766 	HCI_INIT(hci_write_inquiry_mode_sync),
3767 	/* HCI_OP_READ_INQ_RSP_TX_POWER */
3768 	HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3769 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
3770 	HCI_INIT(hci_read_local_ext_features_1_sync),
3771 	/* HCI_OP_WRITE_AUTH_ENABLE */
3772 	HCI_INIT(hci_write_auth_enable_sync),
3773 	{}
3774 };
3775 
3776 /* Read LE Buffer Size */
hci_le_read_buffer_size_sync(struct hci_dev * hdev)3777 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3778 {
3779 	/* Use Read LE Buffer Size V2 if supported */
3780 	if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3781 		return __hci_cmd_sync_status(hdev,
3782 					     HCI_OP_LE_READ_BUFFER_SIZE_V2,
3783 					     0, NULL, HCI_CMD_TIMEOUT);
3784 
3785 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3786 				     0, NULL, HCI_CMD_TIMEOUT);
3787 }
3788 
3789 /* Read LE Local Supported Features */
hci_le_read_local_features_sync(struct hci_dev * hdev)3790 static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3791 {
3792 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
3793 				     0, NULL, HCI_CMD_TIMEOUT);
3794 }
3795 
3796 /* Read LE Supported States */
hci_le_read_supported_states_sync(struct hci_dev * hdev)3797 static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
3798 {
3799 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
3800 				     0, NULL, HCI_CMD_TIMEOUT);
3801 }
3802 
3803 /* LE Controller init stage 2 command sequence */
3804 static const struct hci_init_stage le_init2[] = {
3805 	/* HCI_OP_LE_READ_LOCAL_FEATURES */
3806 	HCI_INIT(hci_le_read_local_features_sync),
3807 	/* HCI_OP_LE_READ_BUFFER_SIZE */
3808 	HCI_INIT(hci_le_read_buffer_size_sync),
3809 	/* HCI_OP_LE_READ_SUPPORTED_STATES */
3810 	HCI_INIT(hci_le_read_supported_states_sync),
3811 	{}
3812 };
3813 
hci_init2_sync(struct hci_dev * hdev)3814 static int hci_init2_sync(struct hci_dev *hdev)
3815 {
3816 	int err;
3817 
3818 	bt_dev_dbg(hdev, "");
3819 
3820 	err = hci_init_stage_sync(hdev, hci_init2);
3821 	if (err)
3822 		return err;
3823 
3824 	if (lmp_bredr_capable(hdev)) {
3825 		err = hci_init_stage_sync(hdev, br_init2);
3826 		if (err)
3827 			return err;
3828 	} else {
3829 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
3830 	}
3831 
3832 	if (lmp_le_capable(hdev)) {
3833 		err = hci_init_stage_sync(hdev, le_init2);
3834 		if (err)
3835 			return err;
3836 		/* LE-only controllers have LE implicitly enabled */
3837 		if (!lmp_bredr_capable(hdev))
3838 			hci_dev_set_flag(hdev, HCI_LE_ENABLED);
3839 	}
3840 
3841 	return 0;
3842 }
3843 
hci_set_event_mask_sync(struct hci_dev * hdev)3844 static int hci_set_event_mask_sync(struct hci_dev *hdev)
3845 {
3846 	/* The second byte is 0xff instead of 0x9f (two reserved bits
3847 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
3848 	 * command otherwise.
3849 	 */
3850 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
3851 
3852 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
3853 	 * any event mask for pre 1.2 devices.
3854 	 */
3855 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3856 		return 0;
3857 
3858 	if (lmp_bredr_capable(hdev)) {
3859 		events[4] |= 0x01; /* Flow Specification Complete */
3860 
3861 		/* Don't set Disconnect Complete and mode change when
3862 		 * suspended as that would wakeup the host when disconnecting
3863 		 * due to suspend.
3864 		 */
3865 		if (hdev->suspended) {
3866 			events[0] &= 0xef;
3867 			events[2] &= 0xf7;
3868 		}
3869 	} else {
3870 		/* Use a different default for LE-only devices */
3871 		memset(events, 0, sizeof(events));
3872 		events[1] |= 0x20; /* Command Complete */
3873 		events[1] |= 0x40; /* Command Status */
3874 		events[1] |= 0x80; /* Hardware Error */
3875 
3876 		/* If the controller supports the Disconnect command, enable
3877 		 * the corresponding event. In addition enable packet flow
3878 		 * control related events.
3879 		 */
3880 		if (hdev->commands[0] & 0x20) {
3881 			/* Don't set Disconnect Complete when suspended as that
3882 			 * would wakeup the host when disconnecting due to
3883 			 * suspend.
3884 			 */
3885 			if (!hdev->suspended)
3886 				events[0] |= 0x10; /* Disconnection Complete */
3887 			events[2] |= 0x04; /* Number of Completed Packets */
3888 			events[3] |= 0x02; /* Data Buffer Overflow */
3889 		}
3890 
3891 		/* If the controller supports the Read Remote Version
3892 		 * Information command, enable the corresponding event.
3893 		 */
3894 		if (hdev->commands[2] & 0x80)
3895 			events[1] |= 0x08; /* Read Remote Version Information
3896 					    * Complete
3897 					    */
3898 
3899 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
3900 			events[0] |= 0x80; /* Encryption Change */
3901 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
3902 		}
3903 	}
3904 
3905 	if (lmp_inq_rssi_capable(hdev) ||
3906 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3907 		events[4] |= 0x02; /* Inquiry Result with RSSI */
3908 
3909 	if (lmp_ext_feat_capable(hdev))
3910 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
3911 
3912 	if (lmp_esco_capable(hdev)) {
3913 		events[5] |= 0x08; /* Synchronous Connection Complete */
3914 		events[5] |= 0x10; /* Synchronous Connection Changed */
3915 	}
3916 
3917 	if (lmp_sniffsubr_capable(hdev))
3918 		events[5] |= 0x20; /* Sniff Subrating */
3919 
3920 	if (lmp_pause_enc_capable(hdev))
3921 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
3922 
3923 	if (lmp_ext_inq_capable(hdev))
3924 		events[5] |= 0x40; /* Extended Inquiry Result */
3925 
3926 	if (lmp_no_flush_capable(hdev))
3927 		events[7] |= 0x01; /* Enhanced Flush Complete */
3928 
3929 	if (lmp_lsto_capable(hdev))
3930 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
3931 
3932 	if (lmp_ssp_capable(hdev)) {
3933 		events[6] |= 0x01;	/* IO Capability Request */
3934 		events[6] |= 0x02;	/* IO Capability Response */
3935 		events[6] |= 0x04;	/* User Confirmation Request */
3936 		events[6] |= 0x08;	/* User Passkey Request */
3937 		events[6] |= 0x10;	/* Remote OOB Data Request */
3938 		events[6] |= 0x20;	/* Simple Pairing Complete */
3939 		events[7] |= 0x04;	/* User Passkey Notification */
3940 		events[7] |= 0x08;	/* Keypress Notification */
3941 		events[7] |= 0x10;	/* Remote Host Supported
3942 					 * Features Notification
3943 					 */
3944 	}
3945 
3946 	if (lmp_le_capable(hdev))
3947 		events[7] |= 0x20;	/* LE Meta-Event */
3948 
3949 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
3950 				     sizeof(events), events, HCI_CMD_TIMEOUT);
3951 }
3952 
hci_read_stored_link_key_sync(struct hci_dev * hdev)3953 static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
3954 {
3955 	struct hci_cp_read_stored_link_key cp;
3956 
3957 	if (!(hdev->commands[6] & 0x20) ||
3958 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
3959 		return 0;
3960 
3961 	memset(&cp, 0, sizeof(cp));
3962 	bacpy(&cp.bdaddr, BDADDR_ANY);
3963 	cp.read_all = 0x01;
3964 
3965 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
3966 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3967 }
3968 
hci_setup_link_policy_sync(struct hci_dev * hdev)3969 static int hci_setup_link_policy_sync(struct hci_dev *hdev)
3970 {
3971 	struct hci_cp_write_def_link_policy cp;
3972 	u16 link_policy = 0;
3973 
3974 	if (!(hdev->commands[5] & 0x10))
3975 		return 0;
3976 
3977 	memset(&cp, 0, sizeof(cp));
3978 
3979 	if (lmp_rswitch_capable(hdev))
3980 		link_policy |= HCI_LP_RSWITCH;
3981 	if (lmp_hold_capable(hdev))
3982 		link_policy |= HCI_LP_HOLD;
3983 	if (lmp_sniff_capable(hdev))
3984 		link_policy |= HCI_LP_SNIFF;
3985 	if (lmp_park_capable(hdev))
3986 		link_policy |= HCI_LP_PARK;
3987 
3988 	cp.policy = cpu_to_le16(link_policy);
3989 
3990 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
3991 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3992 }
3993 
hci_read_page_scan_activity_sync(struct hci_dev * hdev)3994 static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
3995 {
3996 	if (!(hdev->commands[8] & 0x01))
3997 		return 0;
3998 
3999 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4000 				     0, NULL, HCI_CMD_TIMEOUT);
4001 }
4002 
hci_read_def_err_data_reporting_sync(struct hci_dev * hdev)4003 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4004 {
4005 	if (!(hdev->commands[18] & 0x04) ||
4006 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4007 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4008 		return 0;
4009 
4010 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4011 				     0, NULL, HCI_CMD_TIMEOUT);
4012 }
4013 
hci_read_page_scan_type_sync(struct hci_dev * hdev)4014 static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4015 {
4016 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
4017 	 * support the Read Page Scan Type command. Check support for
4018 	 * this command in the bit mask of supported commands.
4019 	 */
4020 	if (!(hdev->commands[13] & 0x01))
4021 		return 0;
4022 
4023 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4024 				     0, NULL, HCI_CMD_TIMEOUT);
4025 }
4026 
4027 /* Read features beyond page 1 if available */
hci_read_local_ext_features_all_sync(struct hci_dev * hdev)4028 static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4029 {
4030 	u8 page;
4031 	int err;
4032 
4033 	if (!lmp_ext_feat_capable(hdev))
4034 		return 0;
4035 
4036 	for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4037 	     page++) {
4038 		err = hci_read_local_ext_features_sync(hdev, page);
4039 		if (err)
4040 			return err;
4041 	}
4042 
4043 	return 0;
4044 }
4045 
4046 /* HCI Controller init stage 3 command sequence */
4047 static const struct hci_init_stage hci_init3[] = {
4048 	/* HCI_OP_SET_EVENT_MASK */
4049 	HCI_INIT(hci_set_event_mask_sync),
4050 	/* HCI_OP_READ_STORED_LINK_KEY */
4051 	HCI_INIT(hci_read_stored_link_key_sync),
4052 	/* HCI_OP_WRITE_DEF_LINK_POLICY */
4053 	HCI_INIT(hci_setup_link_policy_sync),
4054 	/* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4055 	HCI_INIT(hci_read_page_scan_activity_sync),
4056 	/* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4057 	HCI_INIT(hci_read_def_err_data_reporting_sync),
4058 	/* HCI_OP_READ_PAGE_SCAN_TYPE */
4059 	HCI_INIT(hci_read_page_scan_type_sync),
4060 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
4061 	HCI_INIT(hci_read_local_ext_features_all_sync),
4062 	{}
4063 };
4064 
hci_le_set_event_mask_sync(struct hci_dev * hdev)4065 static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4066 {
4067 	u8 events[8];
4068 
4069 	if (!lmp_le_capable(hdev))
4070 		return 0;
4071 
4072 	memset(events, 0, sizeof(events));
4073 
4074 	if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4075 		events[0] |= 0x10;	/* LE Long Term Key Request */
4076 
4077 	/* If controller supports the Connection Parameters Request
4078 	 * Link Layer Procedure, enable the corresponding event.
4079 	 */
4080 	if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4081 		/* LE Remote Connection Parameter Request */
4082 		events[0] |= 0x20;
4083 
4084 	/* If the controller supports the Data Length Extension
4085 	 * feature, enable the corresponding event.
4086 	 */
4087 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4088 		events[0] |= 0x40;	/* LE Data Length Change */
4089 
4090 	/* If the controller supports LL Privacy feature or LE Extended Adv,
4091 	 * enable the corresponding event.
4092 	 */
4093 	if (use_enhanced_conn_complete(hdev))
4094 		events[1] |= 0x02;	/* LE Enhanced Connection Complete */
4095 
4096 	/* If the controller supports Extended Scanner Filter
4097 	 * Policies, enable the corresponding event.
4098 	 */
4099 	if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4100 		events[1] |= 0x04;	/* LE Direct Advertising Report */
4101 
4102 	/* If the controller supports Channel Selection Algorithm #2
4103 	 * feature, enable the corresponding event.
4104 	 */
4105 	if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4106 		events[2] |= 0x08;	/* LE Channel Selection Algorithm */
4107 
4108 	/* If the controller supports the LE Set Scan Enable command,
4109 	 * enable the corresponding advertising report event.
4110 	 */
4111 	if (hdev->commands[26] & 0x08)
4112 		events[0] |= 0x02;	/* LE Advertising Report */
4113 
4114 	/* If the controller supports the LE Create Connection
4115 	 * command, enable the corresponding event.
4116 	 */
4117 	if (hdev->commands[26] & 0x10)
4118 		events[0] |= 0x01;	/* LE Connection Complete */
4119 
4120 	/* If the controller supports the LE Connection Update
4121 	 * command, enable the corresponding event.
4122 	 */
4123 	if (hdev->commands[27] & 0x04)
4124 		events[0] |= 0x04;	/* LE Connection Update Complete */
4125 
4126 	/* If the controller supports the LE Read Remote Used Features
4127 	 * command, enable the corresponding event.
4128 	 */
4129 	if (hdev->commands[27] & 0x20)
4130 		/* LE Read Remote Used Features Complete */
4131 		events[0] |= 0x08;
4132 
4133 	/* If the controller supports the LE Read Local P-256
4134 	 * Public Key command, enable the corresponding event.
4135 	 */
4136 	if (hdev->commands[34] & 0x02)
4137 		/* LE Read Local P-256 Public Key Complete */
4138 		events[0] |= 0x80;
4139 
4140 	/* If the controller supports the LE Generate DHKey
4141 	 * command, enable the corresponding event.
4142 	 */
4143 	if (hdev->commands[34] & 0x04)
4144 		events[1] |= 0x01;	/* LE Generate DHKey Complete */
4145 
4146 	/* If the controller supports the LE Set Default PHY or
4147 	 * LE Set PHY commands, enable the corresponding event.
4148 	 */
4149 	if (hdev->commands[35] & (0x20 | 0x40))
4150 		events[1] |= 0x08;        /* LE PHY Update Complete */
4151 
4152 	/* If the controller supports LE Set Extended Scan Parameters
4153 	 * and LE Set Extended Scan Enable commands, enable the
4154 	 * corresponding event.
4155 	 */
4156 	if (use_ext_scan(hdev))
4157 		events[1] |= 0x10;	/* LE Extended Advertising Report */
4158 
4159 	/* If the controller supports the LE Extended Advertising
4160 	 * command, enable the corresponding event.
4161 	 */
4162 	if (ext_adv_capable(hdev))
4163 		events[2] |= 0x02;	/* LE Advertising Set Terminated */
4164 
4165 	if (cis_capable(hdev)) {
4166 		events[3] |= 0x01;	/* LE CIS Established */
4167 		if (cis_peripheral_capable(hdev))
4168 			events[3] |= 0x02; /* LE CIS Request */
4169 	}
4170 
4171 	if (bis_capable(hdev)) {
4172 		events[1] |= 0x20;	/* LE PA Report */
4173 		events[1] |= 0x40;	/* LE PA Sync Established */
4174 		events[3] |= 0x04;	/* LE Create BIG Complete */
4175 		events[3] |= 0x08;	/* LE Terminate BIG Complete */
4176 		events[3] |= 0x10;	/* LE BIG Sync Established */
4177 		events[3] |= 0x20;	/* LE BIG Sync Loss */
4178 		events[4] |= 0x02;	/* LE BIG Info Advertising Report */
4179 	}
4180 
4181 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4182 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4183 }
4184 
4185 /* Read LE Advertising Channel TX Power */
hci_le_read_adv_tx_power_sync(struct hci_dev * hdev)4186 static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4187 {
4188 	if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4189 		/* HCI TS spec forbids mixing of legacy and extended
4190 		 * advertising commands wherein READ_ADV_TX_POWER is
4191 		 * also included. So do not call it if extended adv
4192 		 * is supported otherwise controller will return
4193 		 * COMMAND_DISALLOWED for extended commands.
4194 		 */
4195 		return __hci_cmd_sync_status(hdev,
4196 					       HCI_OP_LE_READ_ADV_TX_POWER,
4197 					       0, NULL, HCI_CMD_TIMEOUT);
4198 	}
4199 
4200 	return 0;
4201 }
4202 
4203 /* Read LE Min/Max Tx Power*/
hci_le_read_tx_power_sync(struct hci_dev * hdev)4204 static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4205 {
4206 	if (!(hdev->commands[38] & 0x80) ||
4207 	    test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
4208 		return 0;
4209 
4210 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4211 				     0, NULL, HCI_CMD_TIMEOUT);
4212 }
4213 
4214 /* Read LE Accept List Size */
hci_le_read_accept_list_size_sync(struct hci_dev * hdev)4215 static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4216 {
4217 	if (!(hdev->commands[26] & 0x40))
4218 		return 0;
4219 
4220 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4221 				     0, NULL, HCI_CMD_TIMEOUT);
4222 }
4223 
4224 /* Read LE Resolving List Size */
hci_le_read_resolv_list_size_sync(struct hci_dev * hdev)4225 static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4226 {
4227 	if (!(hdev->commands[34] & 0x40))
4228 		return 0;
4229 
4230 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4231 				     0, NULL, HCI_CMD_TIMEOUT);
4232 }
4233 
4234 /* Clear LE Resolving List */
hci_le_clear_resolv_list_sync(struct hci_dev * hdev)4235 static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4236 {
4237 	if (!(hdev->commands[34] & 0x20))
4238 		return 0;
4239 
4240 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4241 				     HCI_CMD_TIMEOUT);
4242 }
4243 
4244 /* Set RPA timeout */
hci_le_set_rpa_timeout_sync(struct hci_dev * hdev)4245 static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4246 {
4247 	__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4248 
4249 	if (!(hdev->commands[35] & 0x04) ||
4250 	    test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
4251 		return 0;
4252 
4253 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4254 				     sizeof(timeout), &timeout,
4255 				     HCI_CMD_TIMEOUT);
4256 }
4257 
4258 /* Read LE Maximum Data Length */
hci_le_read_max_data_len_sync(struct hci_dev * hdev)4259 static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4260 {
4261 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4262 		return 0;
4263 
4264 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4265 				     HCI_CMD_TIMEOUT);
4266 }
4267 
4268 /* Read LE Suggested Default Data Length */
hci_le_read_def_data_len_sync(struct hci_dev * hdev)4269 static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4270 {
4271 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4272 		return 0;
4273 
4274 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4275 				     HCI_CMD_TIMEOUT);
4276 }
4277 
4278 /* Read LE Number of Supported Advertising Sets */
hci_le_read_num_support_adv_sets_sync(struct hci_dev * hdev)4279 static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4280 {
4281 	if (!ext_adv_capable(hdev))
4282 		return 0;
4283 
4284 	return __hci_cmd_sync_status(hdev,
4285 				     HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4286 				     0, NULL, HCI_CMD_TIMEOUT);
4287 }
4288 
4289 /* Write LE Host Supported */
hci_set_le_support_sync(struct hci_dev * hdev)4290 static int hci_set_le_support_sync(struct hci_dev *hdev)
4291 {
4292 	struct hci_cp_write_le_host_supported cp;
4293 
4294 	/* LE-only devices do not support explicit enablement */
4295 	if (!lmp_bredr_capable(hdev))
4296 		return 0;
4297 
4298 	memset(&cp, 0, sizeof(cp));
4299 
4300 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4301 		cp.le = 0x01;
4302 		cp.simul = 0x00;
4303 	}
4304 
4305 	if (cp.le == lmp_host_le_capable(hdev))
4306 		return 0;
4307 
4308 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4309 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4310 }
4311 
4312 /* LE Set Host Feature */
hci_le_set_host_feature_sync(struct hci_dev * hdev)4313 static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4314 {
4315 	struct hci_cp_le_set_host_feature cp;
4316 
4317 	if (!cis_capable(hdev))
4318 		return 0;
4319 
4320 	memset(&cp, 0, sizeof(cp));
4321 
4322 	/* Connected Isochronous Channels (Host Support) */
4323 	cp.bit_number = 32;
4324 	cp.bit_value = 1;
4325 
4326 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4327 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4328 }
4329 
4330 /* LE Controller init stage 3 command sequence */
4331 static const struct hci_init_stage le_init3[] = {
4332 	/* HCI_OP_LE_SET_EVENT_MASK */
4333 	HCI_INIT(hci_le_set_event_mask_sync),
4334 	/* HCI_OP_LE_READ_ADV_TX_POWER */
4335 	HCI_INIT(hci_le_read_adv_tx_power_sync),
4336 	/* HCI_OP_LE_READ_TRANSMIT_POWER */
4337 	HCI_INIT(hci_le_read_tx_power_sync),
4338 	/* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4339 	HCI_INIT(hci_le_read_accept_list_size_sync),
4340 	/* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4341 	HCI_INIT(hci_le_clear_accept_list_sync),
4342 	/* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4343 	HCI_INIT(hci_le_read_resolv_list_size_sync),
4344 	/* HCI_OP_LE_CLEAR_RESOLV_LIST */
4345 	HCI_INIT(hci_le_clear_resolv_list_sync),
4346 	/* HCI_OP_LE_SET_RPA_TIMEOUT */
4347 	HCI_INIT(hci_le_set_rpa_timeout_sync),
4348 	/* HCI_OP_LE_READ_MAX_DATA_LEN */
4349 	HCI_INIT(hci_le_read_max_data_len_sync),
4350 	/* HCI_OP_LE_READ_DEF_DATA_LEN */
4351 	HCI_INIT(hci_le_read_def_data_len_sync),
4352 	/* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4353 	HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4354 	/* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4355 	HCI_INIT(hci_set_le_support_sync),
4356 	/* HCI_OP_LE_SET_HOST_FEATURE */
4357 	HCI_INIT(hci_le_set_host_feature_sync),
4358 	{}
4359 };
4360 
hci_init3_sync(struct hci_dev * hdev)4361 static int hci_init3_sync(struct hci_dev *hdev)
4362 {
4363 	int err;
4364 
4365 	bt_dev_dbg(hdev, "");
4366 
4367 	err = hci_init_stage_sync(hdev, hci_init3);
4368 	if (err)
4369 		return err;
4370 
4371 	if (lmp_le_capable(hdev))
4372 		return hci_init_stage_sync(hdev, le_init3);
4373 
4374 	return 0;
4375 }
4376 
hci_delete_stored_link_key_sync(struct hci_dev * hdev)4377 static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4378 {
4379 	struct hci_cp_delete_stored_link_key cp;
4380 
4381 	/* Some Broadcom based Bluetooth controllers do not support the
4382 	 * Delete Stored Link Key command. They are clearly indicating its
4383 	 * absence in the bit mask of supported commands.
4384 	 *
4385 	 * Check the supported commands and only if the command is marked
4386 	 * as supported send it. If not supported assume that the controller
4387 	 * does not have actual support for stored link keys which makes this
4388 	 * command redundant anyway.
4389 	 *
4390 	 * Some controllers indicate that they support handling deleting
4391 	 * stored link keys, but they don't. The quirk lets a driver
4392 	 * just disable this command.
4393 	 */
4394 	if (!(hdev->commands[6] & 0x80) ||
4395 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4396 		return 0;
4397 
4398 	memset(&cp, 0, sizeof(cp));
4399 	bacpy(&cp.bdaddr, BDADDR_ANY);
4400 	cp.delete_all = 0x01;
4401 
4402 	return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4403 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4404 }
4405 
hci_set_event_mask_page_2_sync(struct hci_dev * hdev)4406 static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4407 {
4408 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4409 	bool changed = false;
4410 
4411 	/* Set event mask page 2 if the HCI command for it is supported */
4412 	if (!(hdev->commands[22] & 0x04))
4413 		return 0;
4414 
4415 	/* If Connectionless Peripheral Broadcast central role is supported
4416 	 * enable all necessary events for it.
4417 	 */
4418 	if (lmp_cpb_central_capable(hdev)) {
4419 		events[1] |= 0x40;	/* Triggered Clock Capture */
4420 		events[1] |= 0x80;	/* Synchronization Train Complete */
4421 		events[2] |= 0x08;	/* Truncated Page Complete */
4422 		events[2] |= 0x20;	/* CPB Channel Map Change */
4423 		changed = true;
4424 	}
4425 
4426 	/* If Connectionless Peripheral Broadcast peripheral role is supported
4427 	 * enable all necessary events for it.
4428 	 */
4429 	if (lmp_cpb_peripheral_capable(hdev)) {
4430 		events[2] |= 0x01;	/* Synchronization Train Received */
4431 		events[2] |= 0x02;	/* CPB Receive */
4432 		events[2] |= 0x04;	/* CPB Timeout */
4433 		events[2] |= 0x10;	/* Peripheral Page Response Timeout */
4434 		changed = true;
4435 	}
4436 
4437 	/* Enable Authenticated Payload Timeout Expired event if supported */
4438 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4439 		events[2] |= 0x80;
4440 		changed = true;
4441 	}
4442 
4443 	/* Some Broadcom based controllers indicate support for Set Event
4444 	 * Mask Page 2 command, but then actually do not support it. Since
4445 	 * the default value is all bits set to zero, the command is only
4446 	 * required if the event mask has to be changed. In case no change
4447 	 * to the event mask is needed, skip this command.
4448 	 */
4449 	if (!changed)
4450 		return 0;
4451 
4452 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4453 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4454 }
4455 
4456 /* Read local codec list if the HCI command is supported */
hci_read_local_codecs_sync(struct hci_dev * hdev)4457 static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4458 {
4459 	if (hdev->commands[45] & 0x04)
4460 		hci_read_supported_codecs_v2(hdev);
4461 	else if (hdev->commands[29] & 0x20)
4462 		hci_read_supported_codecs(hdev);
4463 
4464 	return 0;
4465 }
4466 
4467 /* Read local pairing options if the HCI command is supported */
hci_read_local_pairing_opts_sync(struct hci_dev * hdev)4468 static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4469 {
4470 	if (!(hdev->commands[41] & 0x08))
4471 		return 0;
4472 
4473 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4474 				     0, NULL, HCI_CMD_TIMEOUT);
4475 }
4476 
4477 /* Get MWS transport configuration if the HCI command is supported */
hci_get_mws_transport_config_sync(struct hci_dev * hdev)4478 static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4479 {
4480 	if (!mws_transport_config_capable(hdev))
4481 		return 0;
4482 
4483 	return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4484 				     0, NULL, HCI_CMD_TIMEOUT);
4485 }
4486 
4487 /* Check for Synchronization Train support */
hci_read_sync_train_params_sync(struct hci_dev * hdev)4488 static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4489 {
4490 	if (!lmp_sync_train_capable(hdev))
4491 		return 0;
4492 
4493 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4494 				     0, NULL, HCI_CMD_TIMEOUT);
4495 }
4496 
4497 /* Enable Secure Connections if supported and configured */
hci_write_sc_support_1_sync(struct hci_dev * hdev)4498 static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4499 {
4500 	u8 support = 0x01;
4501 
4502 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4503 	    !bredr_sc_enabled(hdev))
4504 		return 0;
4505 
4506 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4507 				     sizeof(support), &support,
4508 				     HCI_CMD_TIMEOUT);
4509 }
4510 
4511 /* Set erroneous data reporting if supported to the wideband speech
4512  * setting value
4513  */
hci_set_err_data_report_sync(struct hci_dev * hdev)4514 static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4515 {
4516 	struct hci_cp_write_def_err_data_reporting cp;
4517 	bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4518 
4519 	if (!(hdev->commands[18] & 0x08) ||
4520 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4521 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4522 		return 0;
4523 
4524 	if (enabled == hdev->err_data_reporting)
4525 		return 0;
4526 
4527 	memset(&cp, 0, sizeof(cp));
4528 	cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4529 				ERR_DATA_REPORTING_DISABLED;
4530 
4531 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4532 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4533 }
4534 
4535 static const struct hci_init_stage hci_init4[] = {
4536 	 /* HCI_OP_DELETE_STORED_LINK_KEY */
4537 	HCI_INIT(hci_delete_stored_link_key_sync),
4538 	/* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4539 	HCI_INIT(hci_set_event_mask_page_2_sync),
4540 	/* HCI_OP_READ_LOCAL_CODECS */
4541 	HCI_INIT(hci_read_local_codecs_sync),
4542 	 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4543 	HCI_INIT(hci_read_local_pairing_opts_sync),
4544 	 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4545 	HCI_INIT(hci_get_mws_transport_config_sync),
4546 	 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4547 	HCI_INIT(hci_read_sync_train_params_sync),
4548 	/* HCI_OP_WRITE_SC_SUPPORT */
4549 	HCI_INIT(hci_write_sc_support_1_sync),
4550 	/* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4551 	HCI_INIT(hci_set_err_data_report_sync),
4552 	{}
4553 };
4554 
4555 /* Set Suggested Default Data Length to maximum if supported */
hci_le_set_write_def_data_len_sync(struct hci_dev * hdev)4556 static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4557 {
4558 	struct hci_cp_le_write_def_data_len cp;
4559 
4560 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4561 		return 0;
4562 
4563 	memset(&cp, 0, sizeof(cp));
4564 	cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4565 	cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4566 
4567 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4568 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4569 }
4570 
4571 /* Set Default PHY parameters if command is supported, enables all supported
4572  * PHYs according to the LE Features bits.
4573  */
hci_le_set_default_phy_sync(struct hci_dev * hdev)4574 static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4575 {
4576 	struct hci_cp_le_set_default_phy cp;
4577 
4578 	if (!(hdev->commands[35] & 0x20)) {
4579 		/* If the command is not supported it means only 1M PHY is
4580 		 * supported.
4581 		 */
4582 		hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4583 		hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4584 		return 0;
4585 	}
4586 
4587 	memset(&cp, 0, sizeof(cp));
4588 	cp.all_phys = 0x00;
4589 	cp.tx_phys = HCI_LE_SET_PHY_1M;
4590 	cp.rx_phys = HCI_LE_SET_PHY_1M;
4591 
4592 	/* Enables 2M PHY if supported */
4593 	if (le_2m_capable(hdev)) {
4594 		cp.tx_phys |= HCI_LE_SET_PHY_2M;
4595 		cp.rx_phys |= HCI_LE_SET_PHY_2M;
4596 	}
4597 
4598 	/* Enables Coded PHY if supported */
4599 	if (le_coded_capable(hdev)) {
4600 		cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4601 		cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4602 	}
4603 
4604 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4605 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4606 }
4607 
4608 static const struct hci_init_stage le_init4[] = {
4609 	/* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4610 	HCI_INIT(hci_le_set_write_def_data_len_sync),
4611 	/* HCI_OP_LE_SET_DEFAULT_PHY */
4612 	HCI_INIT(hci_le_set_default_phy_sync),
4613 	{}
4614 };
4615 
hci_init4_sync(struct hci_dev * hdev)4616 static int hci_init4_sync(struct hci_dev *hdev)
4617 {
4618 	int err;
4619 
4620 	bt_dev_dbg(hdev, "");
4621 
4622 	err = hci_init_stage_sync(hdev, hci_init4);
4623 	if (err)
4624 		return err;
4625 
4626 	if (lmp_le_capable(hdev))
4627 		return hci_init_stage_sync(hdev, le_init4);
4628 
4629 	return 0;
4630 }
4631 
hci_init_sync(struct hci_dev * hdev)4632 static int hci_init_sync(struct hci_dev *hdev)
4633 {
4634 	int err;
4635 
4636 	err = hci_init1_sync(hdev);
4637 	if (err < 0)
4638 		return err;
4639 
4640 	if (hci_dev_test_flag(hdev, HCI_SETUP))
4641 		hci_debugfs_create_basic(hdev);
4642 
4643 	err = hci_init2_sync(hdev);
4644 	if (err < 0)
4645 		return err;
4646 
4647 	err = hci_init3_sync(hdev);
4648 	if (err < 0)
4649 		return err;
4650 
4651 	err = hci_init4_sync(hdev);
4652 	if (err < 0)
4653 		return err;
4654 
4655 	/* This function is only called when the controller is actually in
4656 	 * configured state. When the controller is marked as unconfigured,
4657 	 * this initialization procedure is not run.
4658 	 *
4659 	 * It means that it is possible that a controller runs through its
4660 	 * setup phase and then discovers missing settings. If that is the
4661 	 * case, then this function will not be called. It then will only
4662 	 * be called during the config phase.
4663 	 *
4664 	 * So only when in setup phase or config phase, create the debugfs
4665 	 * entries and register the SMP channels.
4666 	 */
4667 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4668 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
4669 		return 0;
4670 
4671 	if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4672 		return 0;
4673 
4674 	hci_debugfs_create_common(hdev);
4675 
4676 	if (lmp_bredr_capable(hdev))
4677 		hci_debugfs_create_bredr(hdev);
4678 
4679 	if (lmp_le_capable(hdev))
4680 		hci_debugfs_create_le(hdev);
4681 
4682 	return 0;
4683 }
4684 
4685 #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4686 
4687 static const struct {
4688 	unsigned long quirk;
4689 	const char *desc;
4690 } hci_broken_table[] = {
4691 	HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4692 			 "HCI Read Local Supported Commands not supported"),
4693 	HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4694 			 "HCI Delete Stored Link Key command is advertised, "
4695 			 "but not supported."),
4696 	HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4697 			 "HCI Read Default Erroneous Data Reporting command is "
4698 			 "advertised, but not supported."),
4699 	HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4700 			 "HCI Read Transmit Power Level command is advertised, "
4701 			 "but not supported."),
4702 	HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4703 			 "HCI Set Event Filter command not supported."),
4704 	HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4705 			 "HCI Enhanced Setup Synchronous Connection command is "
4706 			 "advertised, but not supported."),
4707 	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4708 			 "HCI LE Set Random Private Address Timeout command is "
4709 			 "advertised, but not supported."),
4710 	HCI_QUIRK_BROKEN(LE_CODED,
4711 			 "HCI LE Coded PHY feature bit is set, "
4712 			 "but its usage is not supported.")
4713 };
4714 
4715 /* This function handles hdev setup stage:
4716  *
4717  * Calls hdev->setup
4718  * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4719  */
hci_dev_setup_sync(struct hci_dev * hdev)4720 static int hci_dev_setup_sync(struct hci_dev *hdev)
4721 {
4722 	int ret = 0;
4723 	bool invalid_bdaddr;
4724 	size_t i;
4725 
4726 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4727 	    !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks))
4728 		return 0;
4729 
4730 	bt_dev_dbg(hdev, "");
4731 
4732 	hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4733 
4734 	if (hdev->setup)
4735 		ret = hdev->setup(hdev);
4736 
4737 	for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4738 		if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
4739 			bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4740 	}
4741 
4742 	/* The transport driver can set the quirk to mark the
4743 	 * BD_ADDR invalid before creating the HCI device or in
4744 	 * its setup callback.
4745 	 */
4746 	invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
4747 			 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
4748 	if (!ret) {
4749 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
4750 		    !bacmp(&hdev->public_addr, BDADDR_ANY))
4751 			hci_dev_get_bd_addr_from_property(hdev);
4752 
4753 		if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4754 		    hdev->set_bdaddr) {
4755 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4756 			if (!ret)
4757 				invalid_bdaddr = false;
4758 		}
4759 	}
4760 
4761 	/* The transport driver can set these quirks before
4762 	 * creating the HCI device or in its setup callback.
4763 	 *
4764 	 * For the invalid BD_ADDR quirk it is possible that
4765 	 * it becomes a valid address if the bootloader does
4766 	 * provide it (see above).
4767 	 *
4768 	 * In case any of them is set, the controller has to
4769 	 * start up as unconfigured.
4770 	 */
4771 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
4772 	    invalid_bdaddr)
4773 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4774 
4775 	/* For an unconfigured controller it is required to
4776 	 * read at least the version information provided by
4777 	 * the Read Local Version Information command.
4778 	 *
4779 	 * If the set_bdaddr driver callback is provided, then
4780 	 * also the original Bluetooth public device address
4781 	 * will be read using the Read BD Address command.
4782 	 */
4783 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4784 		return hci_unconf_init_sync(hdev);
4785 
4786 	return ret;
4787 }
4788 
4789 /* This function handles hdev init stage:
4790  *
4791  * Calls hci_dev_setup_sync to perform setup stage
4792  * Calls hci_init_sync to perform HCI command init sequence
4793  */
hci_dev_init_sync(struct hci_dev * hdev)4794 static int hci_dev_init_sync(struct hci_dev *hdev)
4795 {
4796 	int ret;
4797 
4798 	bt_dev_dbg(hdev, "");
4799 
4800 	atomic_set(&hdev->cmd_cnt, 1);
4801 	set_bit(HCI_INIT, &hdev->flags);
4802 
4803 	ret = hci_dev_setup_sync(hdev);
4804 
4805 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
4806 		/* If public address change is configured, ensure that
4807 		 * the address gets programmed. If the driver does not
4808 		 * support changing the public address, fail the power
4809 		 * on procedure.
4810 		 */
4811 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
4812 		    hdev->set_bdaddr)
4813 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4814 		else
4815 			ret = -EADDRNOTAVAIL;
4816 	}
4817 
4818 	if (!ret) {
4819 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4820 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4821 			ret = hci_init_sync(hdev);
4822 			if (!ret && hdev->post_init)
4823 				ret = hdev->post_init(hdev);
4824 		}
4825 	}
4826 
4827 	/* If the HCI Reset command is clearing all diagnostic settings,
4828 	 * then they need to be reprogrammed after the init procedure
4829 	 * completed.
4830 	 */
4831 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
4832 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4833 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
4834 		ret = hdev->set_diag(hdev, true);
4835 
4836 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4837 		msft_do_open(hdev);
4838 		aosp_do_open(hdev);
4839 	}
4840 
4841 	clear_bit(HCI_INIT, &hdev->flags);
4842 
4843 	return ret;
4844 }
4845 
hci_dev_open_sync(struct hci_dev * hdev)4846 int hci_dev_open_sync(struct hci_dev *hdev)
4847 {
4848 	int ret;
4849 
4850 	bt_dev_dbg(hdev, "");
4851 
4852 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
4853 		ret = -ENODEV;
4854 		goto done;
4855 	}
4856 
4857 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4858 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4859 		/* Check for rfkill but allow the HCI setup stage to
4860 		 * proceed (which in itself doesn't cause any RF activity).
4861 		 */
4862 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
4863 			ret = -ERFKILL;
4864 			goto done;
4865 		}
4866 
4867 		/* Check for valid public address or a configured static
4868 		 * random address, but let the HCI setup proceed to
4869 		 * be able to determine if there is a public address
4870 		 * or not.
4871 		 *
4872 		 * In case of user channel usage, it is not important
4873 		 * if a public address or static random address is
4874 		 * available.
4875 		 */
4876 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4877 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
4878 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
4879 			ret = -EADDRNOTAVAIL;
4880 			goto done;
4881 		}
4882 	}
4883 
4884 	if (test_bit(HCI_UP, &hdev->flags)) {
4885 		ret = -EALREADY;
4886 		goto done;
4887 	}
4888 
4889 	if (hdev->open(hdev)) {
4890 		ret = -EIO;
4891 		goto done;
4892 	}
4893 
4894 	hci_devcd_reset(hdev);
4895 
4896 	set_bit(HCI_RUNNING, &hdev->flags);
4897 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4898 
4899 	ret = hci_dev_init_sync(hdev);
4900 	if (!ret) {
4901 		hci_dev_hold(hdev);
4902 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4903 		hci_adv_instances_set_rpa_expired(hdev, true);
4904 		set_bit(HCI_UP, &hdev->flags);
4905 		hci_sock_dev_event(hdev, HCI_DEV_UP);
4906 		hci_leds_update_powered(hdev, true);
4907 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4908 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
4909 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4910 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4911 		    hci_dev_test_flag(hdev, HCI_MGMT)) {
4912 			ret = hci_powered_update_sync(hdev);
4913 			mgmt_power_on(hdev, ret);
4914 		}
4915 	} else {
4916 		/* Init failed, cleanup */
4917 		flush_work(&hdev->tx_work);
4918 
4919 		/* Since hci_rx_work() is possible to awake new cmd_work
4920 		 * it should be flushed first to avoid unexpected call of
4921 		 * hci_cmd_work()
4922 		 */
4923 		flush_work(&hdev->rx_work);
4924 		flush_work(&hdev->cmd_work);
4925 
4926 		skb_queue_purge(&hdev->cmd_q);
4927 		skb_queue_purge(&hdev->rx_q);
4928 
4929 		if (hdev->flush)
4930 			hdev->flush(hdev);
4931 
4932 		if (hdev->sent_cmd) {
4933 			cancel_delayed_work_sync(&hdev->cmd_timer);
4934 			kfree_skb(hdev->sent_cmd);
4935 			hdev->sent_cmd = NULL;
4936 		}
4937 
4938 		if (hdev->req_skb) {
4939 			kfree_skb(hdev->req_skb);
4940 			hdev->req_skb = NULL;
4941 		}
4942 
4943 		clear_bit(HCI_RUNNING, &hdev->flags);
4944 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4945 
4946 		hdev->close(hdev);
4947 		hdev->flags &= BIT(HCI_RAW);
4948 	}
4949 
4950 done:
4951 	return ret;
4952 }
4953 
4954 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)4955 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
4956 {
4957 	struct hci_conn_params *p;
4958 
4959 	list_for_each_entry(p, &hdev->le_conn_params, list) {
4960 		hci_pend_le_list_del_init(p);
4961 		if (p->conn) {
4962 			hci_conn_drop(p->conn);
4963 			hci_conn_put(p->conn);
4964 			p->conn = NULL;
4965 		}
4966 	}
4967 
4968 	BT_DBG("All LE pending actions cleared");
4969 }
4970 
hci_dev_shutdown(struct hci_dev * hdev)4971 static int hci_dev_shutdown(struct hci_dev *hdev)
4972 {
4973 	int err = 0;
4974 	/* Similar to how we first do setup and then set the exclusive access
4975 	 * bit for userspace, we must first unset userchannel and then clean up.
4976 	 * Otherwise, the kernel can't properly use the hci channel to clean up
4977 	 * the controller (some shutdown routines require sending additional
4978 	 * commands to the controller for example).
4979 	 */
4980 	bool was_userchannel =
4981 		hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
4982 
4983 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
4984 	    test_bit(HCI_UP, &hdev->flags)) {
4985 		/* Execute vendor specific shutdown routine */
4986 		if (hdev->shutdown)
4987 			err = hdev->shutdown(hdev);
4988 	}
4989 
4990 	if (was_userchannel)
4991 		hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
4992 
4993 	return err;
4994 }
4995 
hci_dev_close_sync(struct hci_dev * hdev)4996 int hci_dev_close_sync(struct hci_dev *hdev)
4997 {
4998 	bool auto_off;
4999 	int err = 0;
5000 
5001 	bt_dev_dbg(hdev, "");
5002 
5003 	cancel_delayed_work(&hdev->power_off);
5004 	cancel_delayed_work(&hdev->ncmd_timer);
5005 	cancel_delayed_work(&hdev->le_scan_disable);
5006 
5007 	hci_request_cancel_all(hdev);
5008 
5009 	if (hdev->adv_instance_timeout) {
5010 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
5011 		hdev->adv_instance_timeout = 0;
5012 	}
5013 
5014 	err = hci_dev_shutdown(hdev);
5015 
5016 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5017 		cancel_delayed_work_sync(&hdev->cmd_timer);
5018 		return err;
5019 	}
5020 
5021 	hci_leds_update_powered(hdev, false);
5022 
5023 	/* Flush RX and TX works */
5024 	flush_work(&hdev->tx_work);
5025 	flush_work(&hdev->rx_work);
5026 
5027 	if (hdev->discov_timeout > 0) {
5028 		hdev->discov_timeout = 0;
5029 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5030 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5031 	}
5032 
5033 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5034 		cancel_delayed_work(&hdev->service_cache);
5035 
5036 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5037 		struct adv_info *adv_instance;
5038 
5039 		cancel_delayed_work_sync(&hdev->rpa_expired);
5040 
5041 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5042 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5043 	}
5044 
5045 	/* Avoid potential lockdep warnings from the *_flush() calls by
5046 	 * ensuring the workqueue is empty up front.
5047 	 */
5048 	drain_workqueue(hdev->workqueue);
5049 
5050 	hci_dev_lock(hdev);
5051 
5052 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5053 
5054 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5055 
5056 	if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5057 	    hci_dev_test_flag(hdev, HCI_MGMT))
5058 		__mgmt_power_off(hdev);
5059 
5060 	hci_inquiry_cache_flush(hdev);
5061 	hci_pend_le_actions_clear(hdev);
5062 	hci_conn_hash_flush(hdev);
5063 	/* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5064 	smp_unregister(hdev);
5065 	hci_dev_unlock(hdev);
5066 
5067 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5068 
5069 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5070 		aosp_do_close(hdev);
5071 		msft_do_close(hdev);
5072 	}
5073 
5074 	if (hdev->flush)
5075 		hdev->flush(hdev);
5076 
5077 	/* Reset device */
5078 	skb_queue_purge(&hdev->cmd_q);
5079 	atomic_set(&hdev->cmd_cnt, 1);
5080 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
5081 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5082 		set_bit(HCI_INIT, &hdev->flags);
5083 		hci_reset_sync(hdev);
5084 		clear_bit(HCI_INIT, &hdev->flags);
5085 	}
5086 
5087 	/* flush cmd  work */
5088 	flush_work(&hdev->cmd_work);
5089 
5090 	/* Drop queues */
5091 	skb_queue_purge(&hdev->rx_q);
5092 	skb_queue_purge(&hdev->cmd_q);
5093 	skb_queue_purge(&hdev->raw_q);
5094 
5095 	/* Drop last sent command */
5096 	if (hdev->sent_cmd) {
5097 		cancel_delayed_work_sync(&hdev->cmd_timer);
5098 		kfree_skb(hdev->sent_cmd);
5099 		hdev->sent_cmd = NULL;
5100 	}
5101 
5102 	/* Drop last request */
5103 	if (hdev->req_skb) {
5104 		kfree_skb(hdev->req_skb);
5105 		hdev->req_skb = NULL;
5106 	}
5107 
5108 	clear_bit(HCI_RUNNING, &hdev->flags);
5109 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5110 
5111 	/* After this point our queues are empty and no tasks are scheduled. */
5112 	hdev->close(hdev);
5113 
5114 	/* Clear flags */
5115 	hdev->flags &= BIT(HCI_RAW);
5116 	hci_dev_clear_volatile_flags(hdev);
5117 
5118 	memset(hdev->eir, 0, sizeof(hdev->eir));
5119 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5120 	bacpy(&hdev->random_addr, BDADDR_ANY);
5121 	hci_codec_list_clear(&hdev->local_codecs);
5122 
5123 	hci_dev_put(hdev);
5124 	return err;
5125 }
5126 
5127 /* This function perform power on HCI command sequence as follows:
5128  *
5129  * If controller is already up (HCI_UP) performs hci_powered_update_sync
5130  * sequence otherwise run hci_dev_open_sync which will follow with
5131  * hci_powered_update_sync after the init sequence is completed.
5132  */
hci_power_on_sync(struct hci_dev * hdev)5133 static int hci_power_on_sync(struct hci_dev *hdev)
5134 {
5135 	int err;
5136 
5137 	if (test_bit(HCI_UP, &hdev->flags) &&
5138 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
5139 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5140 		cancel_delayed_work(&hdev->power_off);
5141 		return hci_powered_update_sync(hdev);
5142 	}
5143 
5144 	err = hci_dev_open_sync(hdev);
5145 	if (err < 0)
5146 		return err;
5147 
5148 	/* During the HCI setup phase, a few error conditions are
5149 	 * ignored and they need to be checked now. If they are still
5150 	 * valid, it is important to return the device back off.
5151 	 */
5152 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5153 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5154 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5155 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
5156 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5157 		hci_dev_close_sync(hdev);
5158 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5159 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5160 				   HCI_AUTO_OFF_TIMEOUT);
5161 	}
5162 
5163 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5164 		/* For unconfigured devices, set the HCI_RAW flag
5165 		 * so that userspace can easily identify them.
5166 		 */
5167 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5168 			set_bit(HCI_RAW, &hdev->flags);
5169 
5170 		/* For fully configured devices, this will send
5171 		 * the Index Added event. For unconfigured devices,
5172 		 * it will send Unconfigued Index Added event.
5173 		 *
5174 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
5175 		 * and no event will be send.
5176 		 */
5177 		mgmt_index_added(hdev);
5178 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5179 		/* When the controller is now configured, then it
5180 		 * is important to clear the HCI_RAW flag.
5181 		 */
5182 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5183 			clear_bit(HCI_RAW, &hdev->flags);
5184 
5185 		/* Powering on the controller with HCI_CONFIG set only
5186 		 * happens with the transition from unconfigured to
5187 		 * configured. This will send the Index Added event.
5188 		 */
5189 		mgmt_index_added(hdev);
5190 	}
5191 
5192 	return 0;
5193 }
5194 
hci_remote_name_cancel_sync(struct hci_dev * hdev,bdaddr_t * addr)5195 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5196 {
5197 	struct hci_cp_remote_name_req_cancel cp;
5198 
5199 	memset(&cp, 0, sizeof(cp));
5200 	bacpy(&cp.bdaddr, addr);
5201 
5202 	return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5203 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5204 }
5205 
hci_stop_discovery_sync(struct hci_dev * hdev)5206 int hci_stop_discovery_sync(struct hci_dev *hdev)
5207 {
5208 	struct discovery_state *d = &hdev->discovery;
5209 	struct inquiry_entry *e;
5210 	int err;
5211 
5212 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5213 
5214 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5215 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5216 			err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5217 						    0, NULL, HCI_CMD_TIMEOUT);
5218 			if (err)
5219 				return err;
5220 		}
5221 
5222 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5223 			cancel_delayed_work(&hdev->le_scan_disable);
5224 
5225 			err = hci_scan_disable_sync(hdev);
5226 			if (err)
5227 				return err;
5228 		}
5229 
5230 	} else {
5231 		err = hci_scan_disable_sync(hdev);
5232 		if (err)
5233 			return err;
5234 	}
5235 
5236 	/* Resume advertising if it was paused */
5237 	if (use_ll_privacy(hdev))
5238 		hci_resume_advertising_sync(hdev);
5239 
5240 	/* No further actions needed for LE-only discovery */
5241 	if (d->type == DISCOV_TYPE_LE)
5242 		return 0;
5243 
5244 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5245 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5246 						     NAME_PENDING);
5247 		if (!e)
5248 			return 0;
5249 
5250 		return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5251 	}
5252 
5253 	return 0;
5254 }
5255 
hci_disconnect_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5256 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5257 			       u8 reason)
5258 {
5259 	struct hci_cp_disconnect cp;
5260 
5261 	if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
5262 		/* This is a BIS connection, hci_conn_del will
5263 		 * do the necessary cleanup.
5264 		 */
5265 		hci_dev_lock(hdev);
5266 		hci_conn_failed(conn, reason);
5267 		hci_dev_unlock(hdev);
5268 
5269 		return 0;
5270 	}
5271 
5272 	memset(&cp, 0, sizeof(cp));
5273 	cp.handle = cpu_to_le16(conn->handle);
5274 	cp.reason = reason;
5275 
5276 	/* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5277 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5278 	 * used when suspending or powering off, where we don't want to wait
5279 	 * for the peer's response.
5280 	 */
5281 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5282 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5283 						sizeof(cp), &cp,
5284 						HCI_EV_DISCONN_COMPLETE,
5285 						HCI_CMD_TIMEOUT, NULL);
5286 
5287 	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5288 				     HCI_CMD_TIMEOUT);
5289 }
5290 
hci_le_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5291 static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5292 				      struct hci_conn *conn, u8 reason)
5293 {
5294 	/* Return reason if scanning since the connection shall probably be
5295 	 * cleanup directly.
5296 	 */
5297 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5298 		return reason;
5299 
5300 	if (conn->role == HCI_ROLE_SLAVE ||
5301 	    test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5302 		return 0;
5303 
5304 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5305 				     0, NULL, HCI_CMD_TIMEOUT);
5306 }
5307 
hci_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5308 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5309 				   u8 reason)
5310 {
5311 	if (conn->type == LE_LINK)
5312 		return hci_le_connect_cancel_sync(hdev, conn, reason);
5313 
5314 	if (conn->type == ISO_LINK) {
5315 		/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5316 		 * page 1857:
5317 		 *
5318 		 * If this command is issued for a CIS on the Central and the
5319 		 * CIS is successfully terminated before being established,
5320 		 * then an HCI_LE_CIS_Established event shall also be sent for
5321 		 * this CIS with the Status Operation Cancelled by Host (0x44).
5322 		 */
5323 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5324 			return hci_disconnect_sync(hdev, conn, reason);
5325 
5326 		/* CIS with no Create CIS sent have nothing to cancel */
5327 		if (bacmp(&conn->dst, BDADDR_ANY))
5328 			return HCI_ERROR_LOCAL_HOST_TERM;
5329 
5330 		/* There is no way to cancel a BIS without terminating the BIG
5331 		 * which is done later on connection cleanup.
5332 		 */
5333 		return 0;
5334 	}
5335 
5336 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5337 		return 0;
5338 
5339 	/* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5340 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5341 	 * used when suspending or powering off, where we don't want to wait
5342 	 * for the peer's response.
5343 	 */
5344 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5345 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5346 						6, &conn->dst,
5347 						HCI_EV_CONN_COMPLETE,
5348 						HCI_CMD_TIMEOUT, NULL);
5349 
5350 	return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5351 				     6, &conn->dst, HCI_CMD_TIMEOUT);
5352 }
5353 
hci_reject_sco_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5354 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5355 			       u8 reason)
5356 {
5357 	struct hci_cp_reject_sync_conn_req cp;
5358 
5359 	memset(&cp, 0, sizeof(cp));
5360 	bacpy(&cp.bdaddr, &conn->dst);
5361 	cp.reason = reason;
5362 
5363 	/* SCO rejection has its own limited set of
5364 	 * allowed error values (0x0D-0x0F).
5365 	 */
5366 	if (reason < 0x0d || reason > 0x0f)
5367 		cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5368 
5369 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5370 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5371 }
5372 
hci_le_reject_cis_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5373 static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5374 				  u8 reason)
5375 {
5376 	struct hci_cp_le_reject_cis cp;
5377 
5378 	memset(&cp, 0, sizeof(cp));
5379 	cp.handle = cpu_to_le16(conn->handle);
5380 	cp.reason = reason;
5381 
5382 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5383 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5384 }
5385 
hci_reject_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5386 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5387 				u8 reason)
5388 {
5389 	struct hci_cp_reject_conn_req cp;
5390 
5391 	if (conn->type == ISO_LINK)
5392 		return hci_le_reject_cis_sync(hdev, conn, reason);
5393 
5394 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5395 		return hci_reject_sco_sync(hdev, conn, reason);
5396 
5397 	memset(&cp, 0, sizeof(cp));
5398 	bacpy(&cp.bdaddr, &conn->dst);
5399 	cp.reason = reason;
5400 
5401 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5402 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5403 }
5404 
hci_abort_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5405 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5406 {
5407 	int err = 0;
5408 	u16 handle = conn->handle;
5409 	bool disconnect = false;
5410 	struct hci_conn *c;
5411 
5412 	switch (conn->state) {
5413 	case BT_CONNECTED:
5414 	case BT_CONFIG:
5415 		err = hci_disconnect_sync(hdev, conn, reason);
5416 		break;
5417 	case BT_CONNECT:
5418 		err = hci_connect_cancel_sync(hdev, conn, reason);
5419 		break;
5420 	case BT_CONNECT2:
5421 		err = hci_reject_conn_sync(hdev, conn, reason);
5422 		break;
5423 	case BT_OPEN:
5424 	case BT_BOUND:
5425 		break;
5426 	default:
5427 		disconnect = true;
5428 		break;
5429 	}
5430 
5431 	hci_dev_lock(hdev);
5432 
5433 	/* Check if the connection has been cleaned up concurrently */
5434 	c = hci_conn_hash_lookup_handle(hdev, handle);
5435 	if (!c || c != conn) {
5436 		err = 0;
5437 		goto unlock;
5438 	}
5439 
5440 	/* Cleanup hci_conn object if it cannot be cancelled as it
5441 	 * likelly means the controller and host stack are out of sync
5442 	 * or in case of LE it was still scanning so it can be cleanup
5443 	 * safely.
5444 	 */
5445 	if (disconnect) {
5446 		conn->state = BT_CLOSED;
5447 		hci_disconn_cfm(conn, reason);
5448 		hci_conn_del(conn);
5449 	} else {
5450 		hci_conn_failed(conn, reason);
5451 	}
5452 
5453 unlock:
5454 	hci_dev_unlock(hdev);
5455 	return err;
5456 }
5457 
hci_disconnect_all_sync(struct hci_dev * hdev,u8 reason)5458 static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5459 {
5460 	struct list_head *head = &hdev->conn_hash.list;
5461 	struct hci_conn *conn;
5462 
5463 	rcu_read_lock();
5464 	while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5465 		/* Make sure the connection is not freed while unlocking */
5466 		conn = hci_conn_get(conn);
5467 		rcu_read_unlock();
5468 		/* Disregard possible errors since hci_conn_del shall have been
5469 		 * called even in case of errors had occurred since it would
5470 		 * then cause hci_conn_failed to be called which calls
5471 		 * hci_conn_del internally.
5472 		 */
5473 		hci_abort_conn_sync(hdev, conn, reason);
5474 		hci_conn_put(conn);
5475 		rcu_read_lock();
5476 	}
5477 	rcu_read_unlock();
5478 
5479 	return 0;
5480 }
5481 
5482 /* This function perform power off HCI command sequence as follows:
5483  *
5484  * Clear Advertising
5485  * Stop Discovery
5486  * Disconnect all connections
5487  * hci_dev_close_sync
5488  */
hci_power_off_sync(struct hci_dev * hdev)5489 static int hci_power_off_sync(struct hci_dev *hdev)
5490 {
5491 	int err;
5492 
5493 	/* If controller is already down there is nothing to do */
5494 	if (!test_bit(HCI_UP, &hdev->flags))
5495 		return 0;
5496 
5497 	hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
5498 
5499 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
5500 	    test_bit(HCI_PSCAN, &hdev->flags)) {
5501 		err = hci_write_scan_enable_sync(hdev, 0x00);
5502 		if (err)
5503 			goto out;
5504 	}
5505 
5506 	err = hci_clear_adv_sync(hdev, NULL, false);
5507 	if (err)
5508 		goto out;
5509 
5510 	err = hci_stop_discovery_sync(hdev);
5511 	if (err)
5512 		goto out;
5513 
5514 	/* Terminated due to Power Off */
5515 	err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5516 	if (err)
5517 		goto out;
5518 
5519 	err = hci_dev_close_sync(hdev);
5520 
5521 out:
5522 	hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
5523 	return err;
5524 }
5525 
hci_set_powered_sync(struct hci_dev * hdev,u8 val)5526 int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5527 {
5528 	if (val)
5529 		return hci_power_on_sync(hdev);
5530 
5531 	return hci_power_off_sync(hdev);
5532 }
5533 
hci_write_iac_sync(struct hci_dev * hdev)5534 static int hci_write_iac_sync(struct hci_dev *hdev)
5535 {
5536 	struct hci_cp_write_current_iac_lap cp;
5537 
5538 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5539 		return 0;
5540 
5541 	memset(&cp, 0, sizeof(cp));
5542 
5543 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5544 		/* Limited discoverable mode */
5545 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
5546 		cp.iac_lap[0] = 0x00;	/* LIAC */
5547 		cp.iac_lap[1] = 0x8b;
5548 		cp.iac_lap[2] = 0x9e;
5549 		cp.iac_lap[3] = 0x33;	/* GIAC */
5550 		cp.iac_lap[4] = 0x8b;
5551 		cp.iac_lap[5] = 0x9e;
5552 	} else {
5553 		/* General discoverable mode */
5554 		cp.num_iac = 1;
5555 		cp.iac_lap[0] = 0x33;	/* GIAC */
5556 		cp.iac_lap[1] = 0x8b;
5557 		cp.iac_lap[2] = 0x9e;
5558 	}
5559 
5560 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5561 				     (cp.num_iac * 3) + 1, &cp,
5562 				     HCI_CMD_TIMEOUT);
5563 }
5564 
hci_update_discoverable_sync(struct hci_dev * hdev)5565 int hci_update_discoverable_sync(struct hci_dev *hdev)
5566 {
5567 	int err = 0;
5568 
5569 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5570 		err = hci_write_iac_sync(hdev);
5571 		if (err)
5572 			return err;
5573 
5574 		err = hci_update_scan_sync(hdev);
5575 		if (err)
5576 			return err;
5577 
5578 		err = hci_update_class_sync(hdev);
5579 		if (err)
5580 			return err;
5581 	}
5582 
5583 	/* Advertising instances don't use the global discoverable setting, so
5584 	 * only update AD if advertising was enabled using Set Advertising.
5585 	 */
5586 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5587 		err = hci_update_adv_data_sync(hdev, 0x00);
5588 		if (err)
5589 			return err;
5590 
5591 		/* Discoverable mode affects the local advertising
5592 		 * address in limited privacy mode.
5593 		 */
5594 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5595 			if (ext_adv_capable(hdev))
5596 				err = hci_start_ext_adv_sync(hdev, 0x00);
5597 			else
5598 				err = hci_enable_advertising_sync(hdev);
5599 		}
5600 	}
5601 
5602 	return err;
5603 }
5604 
update_discoverable_sync(struct hci_dev * hdev,void * data)5605 static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5606 {
5607 	return hci_update_discoverable_sync(hdev);
5608 }
5609 
hci_update_discoverable(struct hci_dev * hdev)5610 int hci_update_discoverable(struct hci_dev *hdev)
5611 {
5612 	/* Only queue if it would have any effect */
5613 	if (hdev_is_powered(hdev) &&
5614 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5615 	    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5616 	    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5617 		return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5618 					  NULL);
5619 
5620 	return 0;
5621 }
5622 
hci_update_connectable_sync(struct hci_dev * hdev)5623 int hci_update_connectable_sync(struct hci_dev *hdev)
5624 {
5625 	int err;
5626 
5627 	err = hci_update_scan_sync(hdev);
5628 	if (err)
5629 		return err;
5630 
5631 	/* If BR/EDR is not enabled and we disable advertising as a
5632 	 * by-product of disabling connectable, we need to update the
5633 	 * advertising flags.
5634 	 */
5635 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5636 		err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5637 
5638 	/* Update the advertising parameters if necessary */
5639 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5640 	    !list_empty(&hdev->adv_instances)) {
5641 		if (ext_adv_capable(hdev))
5642 			err = hci_start_ext_adv_sync(hdev,
5643 						     hdev->cur_adv_instance);
5644 		else
5645 			err = hci_enable_advertising_sync(hdev);
5646 
5647 		if (err)
5648 			return err;
5649 	}
5650 
5651 	return hci_update_passive_scan_sync(hdev);
5652 }
5653 
hci_inquiry_sync(struct hci_dev * hdev,u8 length)5654 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
5655 {
5656 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5657 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5658 	struct hci_cp_inquiry cp;
5659 
5660 	bt_dev_dbg(hdev, "");
5661 
5662 	if (test_bit(HCI_INQUIRY, &hdev->flags))
5663 		return 0;
5664 
5665 	hci_dev_lock(hdev);
5666 	hci_inquiry_cache_flush(hdev);
5667 	hci_dev_unlock(hdev);
5668 
5669 	memset(&cp, 0, sizeof(cp));
5670 
5671 	if (hdev->discovery.limited)
5672 		memcpy(&cp.lap, liac, sizeof(cp.lap));
5673 	else
5674 		memcpy(&cp.lap, giac, sizeof(cp.lap));
5675 
5676 	cp.length = length;
5677 
5678 	return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5679 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5680 }
5681 
hci_active_scan_sync(struct hci_dev * hdev,uint16_t interval)5682 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5683 {
5684 	u8 own_addr_type;
5685 	/* Accept list is not used for discovery */
5686 	u8 filter_policy = 0x00;
5687 	/* Default is to enable duplicates filter */
5688 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5689 	int err;
5690 
5691 	bt_dev_dbg(hdev, "");
5692 
5693 	/* If controller is scanning, it means the passive scanning is
5694 	 * running. Thus, we should temporarily stop it in order to set the
5695 	 * discovery scanning parameters.
5696 	 */
5697 	err = hci_scan_disable_sync(hdev);
5698 	if (err) {
5699 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5700 		return err;
5701 	}
5702 
5703 	cancel_interleave_scan(hdev);
5704 
5705 	/* Pause address resolution for active scan and stop advertising if
5706 	 * privacy is enabled.
5707 	 */
5708 	err = hci_pause_addr_resolution(hdev);
5709 	if (err)
5710 		goto failed;
5711 
5712 	/* All active scans will be done with either a resolvable private
5713 	 * address (when privacy feature has been enabled) or non-resolvable
5714 	 * private address.
5715 	 */
5716 	err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5717 					     &own_addr_type);
5718 	if (err < 0)
5719 		own_addr_type = ADDR_LE_DEV_PUBLIC;
5720 
5721 	if (hci_is_adv_monitoring(hdev) ||
5722 	    (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
5723 	    hdev->discovery.result_filtering)) {
5724 		/* Duplicate filter should be disabled when some advertisement
5725 		 * monitor is activated, otherwise AdvMon can only receive one
5726 		 * advertisement for one peer(*) during active scanning, and
5727 		 * might report loss to these peers.
5728 		 *
5729 		 * If controller does strict duplicate filtering and the
5730 		 * discovery requires result filtering disables controller based
5731 		 * filtering since that can cause reports that would match the
5732 		 * host filter to not be reported.
5733 		 */
5734 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5735 	}
5736 
5737 	err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5738 				  hdev->le_scan_window_discovery,
5739 				  own_addr_type, filter_policy, filter_dup);
5740 	if (!err)
5741 		return err;
5742 
5743 failed:
5744 	/* Resume advertising if it was paused */
5745 	if (use_ll_privacy(hdev))
5746 		hci_resume_advertising_sync(hdev);
5747 
5748 	/* Resume passive scanning */
5749 	hci_update_passive_scan_sync(hdev);
5750 	return err;
5751 }
5752 
hci_start_interleaved_discovery_sync(struct hci_dev * hdev)5753 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
5754 {
5755 	int err;
5756 
5757 	bt_dev_dbg(hdev, "");
5758 
5759 	err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
5760 	if (err)
5761 		return err;
5762 
5763 	return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
5764 }
5765 
hci_start_discovery_sync(struct hci_dev * hdev)5766 int hci_start_discovery_sync(struct hci_dev *hdev)
5767 {
5768 	unsigned long timeout;
5769 	int err;
5770 
5771 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
5772 
5773 	switch (hdev->discovery.type) {
5774 	case DISCOV_TYPE_BREDR:
5775 		return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
5776 	case DISCOV_TYPE_INTERLEAVED:
5777 		/* When running simultaneous discovery, the LE scanning time
5778 		 * should occupy the whole discovery time sine BR/EDR inquiry
5779 		 * and LE scanning are scheduled by the controller.
5780 		 *
5781 		 * For interleaving discovery in comparison, BR/EDR inquiry
5782 		 * and LE scanning are done sequentially with separate
5783 		 * timeouts.
5784 		 */
5785 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
5786 			     &hdev->quirks)) {
5787 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5788 			/* During simultaneous discovery, we double LE scan
5789 			 * interval. We must leave some time for the controller
5790 			 * to do BR/EDR inquiry.
5791 			 */
5792 			err = hci_start_interleaved_discovery_sync(hdev);
5793 			break;
5794 		}
5795 
5796 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
5797 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5798 		break;
5799 	case DISCOV_TYPE_LE:
5800 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5801 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5802 		break;
5803 	default:
5804 		return -EINVAL;
5805 	}
5806 
5807 	if (err)
5808 		return err;
5809 
5810 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
5811 
5812 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
5813 			   timeout);
5814 	return 0;
5815 }
5816 
hci_suspend_monitor_sync(struct hci_dev * hdev)5817 static void hci_suspend_monitor_sync(struct hci_dev *hdev)
5818 {
5819 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
5820 	case HCI_ADV_MONITOR_EXT_MSFT:
5821 		msft_suspend_sync(hdev);
5822 		break;
5823 	default:
5824 		return;
5825 	}
5826 }
5827 
5828 /* This function disables discovery and mark it as paused */
hci_pause_discovery_sync(struct hci_dev * hdev)5829 static int hci_pause_discovery_sync(struct hci_dev *hdev)
5830 {
5831 	int old_state = hdev->discovery.state;
5832 	int err;
5833 
5834 	/* If discovery already stopped/stopping/paused there nothing to do */
5835 	if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
5836 	    hdev->discovery_paused)
5837 		return 0;
5838 
5839 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5840 	err = hci_stop_discovery_sync(hdev);
5841 	if (err)
5842 		return err;
5843 
5844 	hdev->discovery_paused = true;
5845 	hdev->discovery_old_state = old_state;
5846 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5847 
5848 	return 0;
5849 }
5850 
hci_update_event_filter_sync(struct hci_dev * hdev)5851 static int hci_update_event_filter_sync(struct hci_dev *hdev)
5852 {
5853 	struct bdaddr_list_with_flags *b;
5854 	u8 scan = SCAN_DISABLED;
5855 	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
5856 	int err;
5857 
5858 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5859 		return 0;
5860 
5861 	/* Some fake CSR controllers lock up after setting this type of
5862 	 * filter, so avoid sending the request altogether.
5863 	 */
5864 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
5865 		return 0;
5866 
5867 	/* Always clear event filter when starting */
5868 	hci_clear_event_filter_sync(hdev);
5869 
5870 	list_for_each_entry(b, &hdev->accept_list, list) {
5871 		if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
5872 			continue;
5873 
5874 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
5875 
5876 		err =  hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
5877 						 HCI_CONN_SETUP_ALLOW_BDADDR,
5878 						 &b->bdaddr,
5879 						 HCI_CONN_SETUP_AUTO_ON);
5880 		if (err)
5881 			bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
5882 				   &b->bdaddr);
5883 		else
5884 			scan = SCAN_PAGE;
5885 	}
5886 
5887 	if (scan && !scanning)
5888 		hci_write_scan_enable_sync(hdev, scan);
5889 	else if (!scan && scanning)
5890 		hci_write_scan_enable_sync(hdev, scan);
5891 
5892 	return 0;
5893 }
5894 
5895 /* This function disables scan (BR and LE) and mark it as paused */
hci_pause_scan_sync(struct hci_dev * hdev)5896 static int hci_pause_scan_sync(struct hci_dev *hdev)
5897 {
5898 	if (hdev->scanning_paused)
5899 		return 0;
5900 
5901 	/* Disable page scan if enabled */
5902 	if (test_bit(HCI_PSCAN, &hdev->flags))
5903 		hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
5904 
5905 	hci_scan_disable_sync(hdev);
5906 
5907 	hdev->scanning_paused = true;
5908 
5909 	return 0;
5910 }
5911 
5912 /* This function performs the HCI suspend procedures in the follow order:
5913  *
5914  * Pause discovery (active scanning/inquiry)
5915  * Pause Directed Advertising/Advertising
5916  * Pause Scanning (passive scanning in case discovery was not active)
5917  * Disconnect all connections
5918  * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
5919  * otherwise:
5920  * Update event mask (only set events that are allowed to wake up the host)
5921  * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
5922  * Update passive scanning (lower duty cycle)
5923  * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
5924  */
hci_suspend_sync(struct hci_dev * hdev)5925 int hci_suspend_sync(struct hci_dev *hdev)
5926 {
5927 	int err;
5928 
5929 	/* If marked as suspended there nothing to do */
5930 	if (hdev->suspended)
5931 		return 0;
5932 
5933 	/* Mark device as suspended */
5934 	hdev->suspended = true;
5935 
5936 	/* Pause discovery if not already stopped */
5937 	hci_pause_discovery_sync(hdev);
5938 
5939 	/* Pause other advertisements */
5940 	hci_pause_advertising_sync(hdev);
5941 
5942 	/* Suspend monitor filters */
5943 	hci_suspend_monitor_sync(hdev);
5944 
5945 	/* Prevent disconnects from causing scanning to be re-enabled */
5946 	hci_pause_scan_sync(hdev);
5947 
5948 	if (hci_conn_count(hdev)) {
5949 		/* Soft disconnect everything (power off) */
5950 		err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5951 		if (err) {
5952 			/* Set state to BT_RUNNING so resume doesn't notify */
5953 			hdev->suspend_state = BT_RUNNING;
5954 			hci_resume_sync(hdev);
5955 			return err;
5956 		}
5957 
5958 		/* Update event mask so only the allowed event can wakeup the
5959 		 * host.
5960 		 */
5961 		hci_set_event_mask_sync(hdev);
5962 	}
5963 
5964 	/* Only configure accept list if disconnect succeeded and wake
5965 	 * isn't being prevented.
5966 	 */
5967 	if (!hdev->wakeup || !hdev->wakeup(hdev)) {
5968 		hdev->suspend_state = BT_SUSPEND_DISCONNECT;
5969 		return 0;
5970 	}
5971 
5972 	/* Unpause to take care of updating scanning params */
5973 	hdev->scanning_paused = false;
5974 
5975 	/* Enable event filter for paired devices */
5976 	hci_update_event_filter_sync(hdev);
5977 
5978 	/* Update LE passive scan if enabled */
5979 	hci_update_passive_scan_sync(hdev);
5980 
5981 	/* Pause scan changes again. */
5982 	hdev->scanning_paused = true;
5983 
5984 	hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
5985 
5986 	return 0;
5987 }
5988 
5989 /* This function resumes discovery */
hci_resume_discovery_sync(struct hci_dev * hdev)5990 static int hci_resume_discovery_sync(struct hci_dev *hdev)
5991 {
5992 	int err;
5993 
5994 	/* If discovery not paused there nothing to do */
5995 	if (!hdev->discovery_paused)
5996 		return 0;
5997 
5998 	hdev->discovery_paused = false;
5999 
6000 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6001 
6002 	err = hci_start_discovery_sync(hdev);
6003 
6004 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6005 				DISCOVERY_FINDING);
6006 
6007 	return err;
6008 }
6009 
hci_resume_monitor_sync(struct hci_dev * hdev)6010 static void hci_resume_monitor_sync(struct hci_dev *hdev)
6011 {
6012 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
6013 	case HCI_ADV_MONITOR_EXT_MSFT:
6014 		msft_resume_sync(hdev);
6015 		break;
6016 	default:
6017 		return;
6018 	}
6019 }
6020 
6021 /* This function resume scan and reset paused flag */
hci_resume_scan_sync(struct hci_dev * hdev)6022 static int hci_resume_scan_sync(struct hci_dev *hdev)
6023 {
6024 	if (!hdev->scanning_paused)
6025 		return 0;
6026 
6027 	hdev->scanning_paused = false;
6028 
6029 	hci_update_scan_sync(hdev);
6030 
6031 	/* Reset passive scanning to normal */
6032 	hci_update_passive_scan_sync(hdev);
6033 
6034 	return 0;
6035 }
6036 
6037 /* This function performs the HCI suspend procedures in the follow order:
6038  *
6039  * Restore event mask
6040  * Clear event filter
6041  * Update passive scanning (normal duty cycle)
6042  * Resume Directed Advertising/Advertising
6043  * Resume discovery (active scanning/inquiry)
6044  */
hci_resume_sync(struct hci_dev * hdev)6045 int hci_resume_sync(struct hci_dev *hdev)
6046 {
6047 	/* If not marked as suspended there nothing to do */
6048 	if (!hdev->suspended)
6049 		return 0;
6050 
6051 	hdev->suspended = false;
6052 
6053 	/* Restore event mask */
6054 	hci_set_event_mask_sync(hdev);
6055 
6056 	/* Clear any event filters and restore scan state */
6057 	hci_clear_event_filter_sync(hdev);
6058 
6059 	/* Resume scanning */
6060 	hci_resume_scan_sync(hdev);
6061 
6062 	/* Resume monitor filters */
6063 	hci_resume_monitor_sync(hdev);
6064 
6065 	/* Resume other advertisements */
6066 	hci_resume_advertising_sync(hdev);
6067 
6068 	/* Resume discovery */
6069 	hci_resume_discovery_sync(hdev);
6070 
6071 	return 0;
6072 }
6073 
conn_use_rpa(struct hci_conn * conn)6074 static bool conn_use_rpa(struct hci_conn *conn)
6075 {
6076 	struct hci_dev *hdev = conn->hdev;
6077 
6078 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
6079 }
6080 
hci_le_ext_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6081 static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6082 						struct hci_conn *conn)
6083 {
6084 	struct hci_cp_le_set_ext_adv_params cp;
6085 	int err;
6086 	bdaddr_t random_addr;
6087 	u8 own_addr_type;
6088 
6089 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6090 					     &own_addr_type);
6091 	if (err)
6092 		return err;
6093 
6094 	/* Set require_privacy to false so that the remote device has a
6095 	 * chance of identifying us.
6096 	 */
6097 	err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6098 				     &own_addr_type, &random_addr);
6099 	if (err)
6100 		return err;
6101 
6102 	memset(&cp, 0, sizeof(cp));
6103 
6104 	cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6105 	cp.channel_map = hdev->le_adv_channel_map;
6106 	cp.tx_power = HCI_TX_POWER_INVALID;
6107 	cp.primary_phy = HCI_ADV_PHY_1M;
6108 	cp.secondary_phy = HCI_ADV_PHY_1M;
6109 	cp.handle = 0x00; /* Use instance 0 for directed adv */
6110 	cp.own_addr_type = own_addr_type;
6111 	cp.peer_addr_type = conn->dst_type;
6112 	bacpy(&cp.peer_addr, &conn->dst);
6113 
6114 	/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6115 	 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6116 	 * does not supports advertising data when the advertising set already
6117 	 * contains some, the controller shall return erroc code 'Invalid
6118 	 * HCI Command Parameters(0x12).
6119 	 * So it is required to remove adv set for handle 0x00. since we use
6120 	 * instance 0 for directed adv.
6121 	 */
6122 	err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6123 	if (err)
6124 		return err;
6125 
6126 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
6127 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6128 	if (err)
6129 		return err;
6130 
6131 	/* Check if random address need to be updated */
6132 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6133 	    bacmp(&random_addr, BDADDR_ANY) &&
6134 	    bacmp(&random_addr, &hdev->random_addr)) {
6135 		err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6136 						       &random_addr);
6137 		if (err)
6138 			return err;
6139 	}
6140 
6141 	return hci_enable_ext_advertising_sync(hdev, 0x00);
6142 }
6143 
hci_le_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6144 static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6145 					    struct hci_conn *conn)
6146 {
6147 	struct hci_cp_le_set_adv_param cp;
6148 	u8 status;
6149 	u8 own_addr_type;
6150 	u8 enable;
6151 
6152 	if (ext_adv_capable(hdev))
6153 		return hci_le_ext_directed_advertising_sync(hdev, conn);
6154 
6155 	/* Clear the HCI_LE_ADV bit temporarily so that the
6156 	 * hci_update_random_address knows that it's safe to go ahead
6157 	 * and write a new random address. The flag will be set back on
6158 	 * as soon as the SET_ADV_ENABLE HCI command completes.
6159 	 */
6160 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
6161 
6162 	/* Set require_privacy to false so that the remote device has a
6163 	 * chance of identifying us.
6164 	 */
6165 	status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6166 						&own_addr_type);
6167 	if (status)
6168 		return status;
6169 
6170 	memset(&cp, 0, sizeof(cp));
6171 
6172 	/* Some controllers might reject command if intervals are not
6173 	 * within range for undirected advertising.
6174 	 * BCM20702A0 is known to be affected by this.
6175 	 */
6176 	cp.min_interval = cpu_to_le16(0x0020);
6177 	cp.max_interval = cpu_to_le16(0x0020);
6178 
6179 	cp.type = LE_ADV_DIRECT_IND;
6180 	cp.own_address_type = own_addr_type;
6181 	cp.direct_addr_type = conn->dst_type;
6182 	bacpy(&cp.direct_addr, &conn->dst);
6183 	cp.channel_map = hdev->le_adv_channel_map;
6184 
6185 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6186 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6187 	if (status)
6188 		return status;
6189 
6190 	enable = 0x01;
6191 
6192 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6193 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6194 }
6195 
set_ext_conn_params(struct hci_conn * conn,struct hci_cp_le_ext_conn_param * p)6196 static void set_ext_conn_params(struct hci_conn *conn,
6197 				struct hci_cp_le_ext_conn_param *p)
6198 {
6199 	struct hci_dev *hdev = conn->hdev;
6200 
6201 	memset(p, 0, sizeof(*p));
6202 
6203 	p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6204 	p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6205 	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6206 	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6207 	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6208 	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6209 	p->min_ce_len = cpu_to_le16(0x0000);
6210 	p->max_ce_len = cpu_to_le16(0x0000);
6211 }
6212 
hci_le_ext_create_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 own_addr_type)6213 static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6214 				       struct hci_conn *conn, u8 own_addr_type)
6215 {
6216 	struct hci_cp_le_ext_create_conn *cp;
6217 	struct hci_cp_le_ext_conn_param *p;
6218 	u8 data[sizeof(*cp) + sizeof(*p) * 3];
6219 	u32 plen;
6220 
6221 	cp = (void *)data;
6222 	p = (void *)cp->data;
6223 
6224 	memset(cp, 0, sizeof(*cp));
6225 
6226 	bacpy(&cp->peer_addr, &conn->dst);
6227 	cp->peer_addr_type = conn->dst_type;
6228 	cp->own_addr_type = own_addr_type;
6229 
6230 	plen = sizeof(*cp);
6231 
6232 	if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
6233 			      conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
6234 		cp->phys |= LE_SCAN_PHY_1M;
6235 		set_ext_conn_params(conn, p);
6236 
6237 		p++;
6238 		plen += sizeof(*p);
6239 	}
6240 
6241 	if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
6242 			      conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
6243 		cp->phys |= LE_SCAN_PHY_2M;
6244 		set_ext_conn_params(conn, p);
6245 
6246 		p++;
6247 		plen += sizeof(*p);
6248 	}
6249 
6250 	if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
6251 				 conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
6252 		cp->phys |= LE_SCAN_PHY_CODED;
6253 		set_ext_conn_params(conn, p);
6254 
6255 		plen += sizeof(*p);
6256 	}
6257 
6258 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6259 					plen, data,
6260 					HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6261 					conn->conn_timeout, NULL);
6262 }
6263 
hci_le_create_conn_sync(struct hci_dev * hdev,void * data)6264 static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6265 {
6266 	struct hci_cp_le_create_conn cp;
6267 	struct hci_conn_params *params;
6268 	u8 own_addr_type;
6269 	int err;
6270 	struct hci_conn *conn = data;
6271 
6272 	if (!hci_conn_valid(hdev, conn))
6273 		return -ECANCELED;
6274 
6275 	bt_dev_dbg(hdev, "conn %p", conn);
6276 
6277 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
6278 	conn->state = BT_CONNECT;
6279 
6280 	/* If requested to connect as peripheral use directed advertising */
6281 	if (conn->role == HCI_ROLE_SLAVE) {
6282 		/* If we're active scanning and simultaneous roles is not
6283 		 * enabled simply reject the attempt.
6284 		 */
6285 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6286 		    hdev->le_scan_type == LE_SCAN_ACTIVE &&
6287 		    !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6288 			hci_conn_del(conn);
6289 			return -EBUSY;
6290 		}
6291 
6292 		/* Pause advertising while doing directed advertising. */
6293 		hci_pause_advertising_sync(hdev);
6294 
6295 		err = hci_le_directed_advertising_sync(hdev, conn);
6296 		goto done;
6297 	}
6298 
6299 	/* Disable advertising if simultaneous roles is not in use. */
6300 	if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6301 		hci_pause_advertising_sync(hdev);
6302 
6303 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6304 	if (params) {
6305 		conn->le_conn_min_interval = params->conn_min_interval;
6306 		conn->le_conn_max_interval = params->conn_max_interval;
6307 		conn->le_conn_latency = params->conn_latency;
6308 		conn->le_supv_timeout = params->supervision_timeout;
6309 	} else {
6310 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
6311 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
6312 		conn->le_conn_latency = hdev->le_conn_latency;
6313 		conn->le_supv_timeout = hdev->le_supv_timeout;
6314 	}
6315 
6316 	/* If controller is scanning, we stop it since some controllers are
6317 	 * not able to scan and connect at the same time. Also set the
6318 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6319 	 * handler for scan disabling knows to set the correct discovery
6320 	 * state.
6321 	 */
6322 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6323 		hci_scan_disable_sync(hdev);
6324 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6325 	}
6326 
6327 	/* Update random address, but set require_privacy to false so
6328 	 * that we never connect with an non-resolvable address.
6329 	 */
6330 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6331 					     &own_addr_type);
6332 	if (err)
6333 		goto done;
6334 
6335 	if (use_ext_conn(hdev)) {
6336 		err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6337 		goto done;
6338 	}
6339 
6340 	memset(&cp, 0, sizeof(cp));
6341 
6342 	cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6343 	cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6344 
6345 	bacpy(&cp.peer_addr, &conn->dst);
6346 	cp.peer_addr_type = conn->dst_type;
6347 	cp.own_address_type = own_addr_type;
6348 	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6349 	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6350 	cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6351 	cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6352 	cp.min_ce_len = cpu_to_le16(0x0000);
6353 	cp.max_ce_len = cpu_to_le16(0x0000);
6354 
6355 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6356 	 *
6357 	 * If this event is unmasked and the HCI_LE_Connection_Complete event
6358 	 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6359 	 * sent when a new connection has been created.
6360 	 */
6361 	err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6362 				       sizeof(cp), &cp,
6363 				       use_enhanced_conn_complete(hdev) ?
6364 				       HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6365 				       HCI_EV_LE_CONN_COMPLETE,
6366 				       conn->conn_timeout, NULL);
6367 
6368 done:
6369 	if (err == -ETIMEDOUT)
6370 		hci_le_connect_cancel_sync(hdev, conn, 0x00);
6371 
6372 	/* Re-enable advertising after the connection attempt is finished. */
6373 	hci_resume_advertising_sync(hdev);
6374 	return err;
6375 }
6376 
hci_le_create_cis_sync(struct hci_dev * hdev)6377 int hci_le_create_cis_sync(struct hci_dev *hdev)
6378 {
6379 	DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
6380 	size_t aux_num_cis = 0;
6381 	struct hci_conn *conn;
6382 	u8 cig = BT_ISO_QOS_CIG_UNSET;
6383 
6384 	/* The spec allows only one pending LE Create CIS command at a time. If
6385 	 * the command is pending now, don't do anything. We check for pending
6386 	 * connections after each CIS Established event.
6387 	 *
6388 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6389 	 * page 2566:
6390 	 *
6391 	 * If the Host issues this command before all the
6392 	 * HCI_LE_CIS_Established events from the previous use of the
6393 	 * command have been generated, the Controller shall return the
6394 	 * error code Command Disallowed (0x0C).
6395 	 *
6396 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6397 	 * page 2567:
6398 	 *
6399 	 * When the Controller receives the HCI_LE_Create_CIS command, the
6400 	 * Controller sends the HCI_Command_Status event to the Host. An
6401 	 * HCI_LE_CIS_Established event will be generated for each CIS when it
6402 	 * is established or if it is disconnected or considered lost before
6403 	 * being established; until all the events are generated, the command
6404 	 * remains pending.
6405 	 */
6406 
6407 	hci_dev_lock(hdev);
6408 
6409 	rcu_read_lock();
6410 
6411 	/* Wait until previous Create CIS has completed */
6412 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6413 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6414 			goto done;
6415 	}
6416 
6417 	/* Find CIG with all CIS ready */
6418 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6419 		struct hci_conn *link;
6420 
6421 		if (hci_conn_check_create_cis(conn))
6422 			continue;
6423 
6424 		cig = conn->iso_qos.ucast.cig;
6425 
6426 		list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6427 			if (hci_conn_check_create_cis(link) > 0 &&
6428 			    link->iso_qos.ucast.cig == cig &&
6429 			    link->state != BT_CONNECTED) {
6430 				cig = BT_ISO_QOS_CIG_UNSET;
6431 				break;
6432 			}
6433 		}
6434 
6435 		if (cig != BT_ISO_QOS_CIG_UNSET)
6436 			break;
6437 	}
6438 
6439 	if (cig == BT_ISO_QOS_CIG_UNSET)
6440 		goto done;
6441 
6442 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6443 		struct hci_cis *cis = &cmd->cis[aux_num_cis];
6444 
6445 		if (hci_conn_check_create_cis(conn) ||
6446 		    conn->iso_qos.ucast.cig != cig)
6447 			continue;
6448 
6449 		set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6450 		cis->acl_handle = cpu_to_le16(conn->parent->handle);
6451 		cis->cis_handle = cpu_to_le16(conn->handle);
6452 		aux_num_cis++;
6453 
6454 		if (aux_num_cis >= cmd->num_cis)
6455 			break;
6456 	}
6457 	cmd->num_cis = aux_num_cis;
6458 
6459 done:
6460 	rcu_read_unlock();
6461 
6462 	hci_dev_unlock(hdev);
6463 
6464 	if (!aux_num_cis)
6465 		return 0;
6466 
6467 	/* Wait for HCI_LE_CIS_Established */
6468 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6469 					struct_size(cmd, cis, cmd->num_cis),
6470 					cmd, HCI_EVT_LE_CIS_ESTABLISHED,
6471 					conn->conn_timeout, NULL);
6472 }
6473 
hci_le_remove_cig_sync(struct hci_dev * hdev,u8 handle)6474 int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6475 {
6476 	struct hci_cp_le_remove_cig cp;
6477 
6478 	memset(&cp, 0, sizeof(cp));
6479 	cp.cig_id = handle;
6480 
6481 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6482 				     &cp, HCI_CMD_TIMEOUT);
6483 }
6484 
hci_le_big_terminate_sync(struct hci_dev * hdev,u8 handle)6485 int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6486 {
6487 	struct hci_cp_le_big_term_sync cp;
6488 
6489 	memset(&cp, 0, sizeof(cp));
6490 	cp.handle = handle;
6491 
6492 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6493 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6494 }
6495 
hci_le_pa_terminate_sync(struct hci_dev * hdev,u16 handle)6496 int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6497 {
6498 	struct hci_cp_le_pa_term_sync cp;
6499 
6500 	memset(&cp, 0, sizeof(cp));
6501 	cp.handle = cpu_to_le16(handle);
6502 
6503 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6504 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6505 }
6506 
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)6507 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6508 			   bool use_rpa, struct adv_info *adv_instance,
6509 			   u8 *own_addr_type, bdaddr_t *rand_addr)
6510 {
6511 	int err;
6512 
6513 	bacpy(rand_addr, BDADDR_ANY);
6514 
6515 	/* If privacy is enabled use a resolvable private address. If
6516 	 * current RPA has expired then generate a new one.
6517 	 */
6518 	if (use_rpa) {
6519 		/* If Controller supports LL Privacy use own address type is
6520 		 * 0x03
6521 		 */
6522 		if (use_ll_privacy(hdev))
6523 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6524 		else
6525 			*own_addr_type = ADDR_LE_DEV_RANDOM;
6526 
6527 		if (adv_instance) {
6528 			if (adv_rpa_valid(adv_instance))
6529 				return 0;
6530 		} else {
6531 			if (rpa_valid(hdev))
6532 				return 0;
6533 		}
6534 
6535 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6536 		if (err < 0) {
6537 			bt_dev_err(hdev, "failed to generate new RPA");
6538 			return err;
6539 		}
6540 
6541 		bacpy(rand_addr, &hdev->rpa);
6542 
6543 		return 0;
6544 	}
6545 
6546 	/* In case of required privacy without resolvable private address,
6547 	 * use an non-resolvable private address. This is useful for
6548 	 * non-connectable advertising.
6549 	 */
6550 	if (require_privacy) {
6551 		bdaddr_t nrpa;
6552 
6553 		while (true) {
6554 			/* The non-resolvable private address is generated
6555 			 * from random six bytes with the two most significant
6556 			 * bits cleared.
6557 			 */
6558 			get_random_bytes(&nrpa, 6);
6559 			nrpa.b[5] &= 0x3f;
6560 
6561 			/* The non-resolvable private address shall not be
6562 			 * equal to the public address.
6563 			 */
6564 			if (bacmp(&hdev->bdaddr, &nrpa))
6565 				break;
6566 		}
6567 
6568 		*own_addr_type = ADDR_LE_DEV_RANDOM;
6569 		bacpy(rand_addr, &nrpa);
6570 
6571 		return 0;
6572 	}
6573 
6574 	/* No privacy so use a public address. */
6575 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
6576 
6577 	return 0;
6578 }
6579 
_update_adv_data_sync(struct hci_dev * hdev,void * data)6580 static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6581 {
6582 	u8 instance = PTR_UINT(data);
6583 
6584 	return hci_update_adv_data_sync(hdev, instance);
6585 }
6586 
hci_update_adv_data(struct hci_dev * hdev,u8 instance)6587 int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6588 {
6589 	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6590 				  UINT_PTR(instance), NULL);
6591 }
6592 
hci_acl_create_conn_sync(struct hci_dev * hdev,void * data)6593 static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6594 {
6595 	struct hci_conn *conn = data;
6596 	struct inquiry_entry *ie;
6597 	struct hci_cp_create_conn cp;
6598 	int err;
6599 
6600 	if (!hci_conn_valid(hdev, conn))
6601 		return -ECANCELED;
6602 
6603 	/* Many controllers disallow HCI Create Connection while it is doing
6604 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6605 	 * Connection. This may cause the MGMT discovering state to become false
6606 	 * without user space's request but it is okay since the MGMT Discovery
6607 	 * APIs do not promise that discovery should be done forever. Instead,
6608 	 * the user space monitors the status of MGMT discovering and it may
6609 	 * request for discovery again when this flag becomes false.
6610 	 */
6611 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6612 		err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6613 					    NULL, HCI_CMD_TIMEOUT);
6614 		if (err)
6615 			bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6616 	}
6617 
6618 	conn->state = BT_CONNECT;
6619 	conn->out = true;
6620 	conn->role = HCI_ROLE_MASTER;
6621 
6622 	conn->attempt++;
6623 
6624 	conn->link_policy = hdev->link_policy;
6625 
6626 	memset(&cp, 0, sizeof(cp));
6627 	bacpy(&cp.bdaddr, &conn->dst);
6628 	cp.pscan_rep_mode = 0x02;
6629 
6630 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6631 	if (ie) {
6632 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6633 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6634 			cp.pscan_mode     = ie->data.pscan_mode;
6635 			cp.clock_offset   = ie->data.clock_offset |
6636 					    cpu_to_le16(0x8000);
6637 		}
6638 
6639 		memcpy(conn->dev_class, ie->data.dev_class, 3);
6640 	}
6641 
6642 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
6643 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6644 		cp.role_switch = 0x01;
6645 	else
6646 		cp.role_switch = 0x00;
6647 
6648 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6649 					sizeof(cp), &cp,
6650 					HCI_EV_CONN_COMPLETE,
6651 					conn->conn_timeout, NULL);
6652 }
6653 
hci_connect_acl_sync(struct hci_dev * hdev,struct hci_conn * conn)6654 int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6655 {
6656 	return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6657 				       NULL);
6658 }
6659 
create_le_conn_complete(struct hci_dev * hdev,void * data,int err)6660 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6661 {
6662 	struct hci_conn *conn = data;
6663 
6664 	bt_dev_dbg(hdev, "err %d", err);
6665 
6666 	if (err == -ECANCELED)
6667 		return;
6668 
6669 	hci_dev_lock(hdev);
6670 
6671 	if (!hci_conn_valid(hdev, conn))
6672 		goto done;
6673 
6674 	if (!err) {
6675 		hci_connect_le_scan_cleanup(conn, 0x00);
6676 		goto done;
6677 	}
6678 
6679 	/* Check if connection is still pending */
6680 	if (conn != hci_lookup_le_connect(hdev))
6681 		goto done;
6682 
6683 	/* Flush to make sure we send create conn cancel command if needed */
6684 	flush_delayed_work(&conn->le_conn_timeout);
6685 	hci_conn_failed(conn, bt_status(err));
6686 
6687 done:
6688 	hci_dev_unlock(hdev);
6689 }
6690 
hci_connect_le_sync(struct hci_dev * hdev,struct hci_conn * conn)6691 int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6692 {
6693 	return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6694 				       create_le_conn_complete);
6695 }
6696 
hci_cancel_connect_sync(struct hci_dev * hdev,struct hci_conn * conn)6697 int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6698 {
6699 	if (conn->state != BT_OPEN)
6700 		return -EINVAL;
6701 
6702 	switch (conn->type) {
6703 	case ACL_LINK:
6704 		return !hci_cmd_sync_dequeue_once(hdev,
6705 						  hci_acl_create_conn_sync,
6706 						  conn, NULL);
6707 	case LE_LINK:
6708 		return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6709 						  conn, create_le_conn_complete);
6710 	}
6711 
6712 	return -ENOENT;
6713 }
6714